hexsha
stringlengths 40
40
| size
int64 109
1.04M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
227
| max_stars_repo_name
stringlengths 4
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
82.5k
⌀ | max_stars_repo_stars_event_min_datetime
stringdate 2015-01-01 00:04:51
2022-03-31 21:53:07
⌀ | max_stars_repo_stars_event_max_datetime
stringdate 2015-01-01 06:08:26
2022-03-31 23:57:44
⌀ | max_issues_repo_path
stringlengths 3
225
| max_issues_repo_name
stringlengths 4
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringdate 2015-01-01 00:00:47
2022-03-31 22:44:55
⌀ | max_issues_repo_issues_event_max_datetime
stringdate 2015-01-01 16:29:55
2022-03-31 23:59:47
⌀ | max_forks_repo_path
stringlengths 3
225
| max_forks_repo_name
stringlengths 4
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
54.3k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 109
1.04M
| avg_line_length
float64 5.17
98.7
| max_line_length
int64 9
999
| alphanum_fraction
float64 0.28
0.91
| qsc_code_num_words_quality_signal
int64 30
182k
| qsc_code_num_chars_quality_signal
float64 109
1.04M
| qsc_code_mean_word_length_quality_signal
float64 2
9.97
| qsc_code_frac_words_unique_quality_signal
float64 0.01
1
| qsc_code_frac_chars_top_2grams_quality_signal
float64 0
0.2
| qsc_code_frac_chars_top_3grams_quality_signal
float64 0
0.18
| qsc_code_frac_chars_top_4grams_quality_signal
float64 0
0.16
| qsc_code_frac_chars_dupe_5grams_quality_signal
float64 0
0.8
| qsc_code_frac_chars_dupe_6grams_quality_signal
float64 0
0.7
| qsc_code_frac_chars_dupe_7grams_quality_signal
float64 0
0.7
| qsc_code_frac_chars_dupe_8grams_quality_signal
float64 0
0.7
| qsc_code_frac_chars_dupe_9grams_quality_signal
float64 0
0.69
| qsc_code_frac_chars_dupe_10grams_quality_signal
float64 0
0.6
| qsc_code_frac_chars_replacement_symbols_quality_signal
float64 0
0.01
| qsc_code_frac_chars_digital_quality_signal
float64 0
0.2
| qsc_code_frac_chars_whitespace_quality_signal
float64 0.01
0.5
| qsc_code_size_file_byte_quality_signal
float64 109
1.04M
| qsc_code_num_lines_quality_signal
float64 10
25.7k
| qsc_code_num_chars_line_max_quality_signal
float64 10
1k
| qsc_code_num_chars_line_mean_quality_signal
float64 5.19
98.7
| qsc_code_frac_chars_alphabet_quality_signal
float64 0.5
1
| qsc_code_frac_chars_comments_quality_signal
float64 0
0.8
| qsc_code_cate_xml_start_quality_signal
float64 0
0
| qsc_code_frac_lines_dupe_lines_quality_signal
float64 0
0.7
⌀ | qsc_code_cate_autogen_quality_signal
float64 0
0
| qsc_code_frac_lines_long_string_quality_signal
float64 0
0.2
⌀ | qsc_code_frac_chars_string_length_quality_signal
float64 0
0.6
| qsc_code_frac_chars_long_word_length_quality_signal
float64 0
0.4
| qsc_code_frac_lines_string_concat_quality_signal
float64 0
0.42
⌀ | qsc_code_cate_encoded_data_quality_signal
float64 0
0
| qsc_code_frac_chars_hex_words_quality_signal
float64 0
0.33
| qsc_code_frac_lines_prompt_comments_quality_signal
float64 0
0.01
| qsc_code_frac_lines_assert_quality_signal
float64 0
0.4
⌀ | qsc_codepython_cate_ast_quality_signal
float64 0
0
| qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool 0
classes | qsc_codepython_frac_lines_pass_quality_signal
float64 0
0.05
| qsc_codepython_frac_lines_import_quality_signal
float64 0
0.3
| qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 0
0.4
⌀ | qsc_code_num_words
int64 0
0
| qsc_code_num_chars
int64 0
0
| qsc_code_mean_word_length
int64 0
0
| qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 0
0
| qsc_code_frac_chars_top_3grams
int64 0
0
| qsc_code_frac_chars_top_4grams
int64 0
0
| qsc_code_frac_chars_dupe_5grams
int64 0
0
| qsc_code_frac_chars_dupe_6grams
int64 0
0
| qsc_code_frac_chars_dupe_7grams
int64 0
0
| qsc_code_frac_chars_dupe_8grams
int64 0
0
| qsc_code_frac_chars_dupe_9grams
int64 0
0
| qsc_code_frac_chars_dupe_10grams
int64 0
0
| qsc_code_frac_chars_replacement_symbols
int64 0
0
| qsc_code_frac_chars_digital
int64 0
0
| qsc_code_frac_chars_whitespace
int64 0
0
| qsc_code_size_file_byte
int64 0
0
| qsc_code_num_lines
int64 0
0
| qsc_code_num_chars_line_max
int64 0
0
| qsc_code_num_chars_line_mean
int64 0
0
| qsc_code_frac_chars_alphabet
int64 0
0
| qsc_code_frac_chars_comments
int64 0
0
| qsc_code_cate_xml_start
int64 0
0
| qsc_code_frac_lines_dupe_lines
int64 0
0
| qsc_code_cate_autogen
int64 0
0
| qsc_code_frac_lines_long_string
int64 0
0
| qsc_code_frac_chars_string_length
int64 0
0
| qsc_code_frac_chars_long_word_length
int64 0
0
| qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 0
0
| qsc_code_frac_chars_hex_words
int64 0
0
| qsc_code_frac_lines_prompt_comments
int64 0
0
| qsc_code_frac_lines_assert
int64 0
0
| qsc_codepython_cate_ast
int64 1
1
| qsc_codepython_frac_lines_func_ratio
int64 0
0
| qsc_codepython_cate_var_zero
int64 0
0
| qsc_codepython_frac_lines_pass
int64 0
0
| qsc_codepython_frac_lines_import
int64 0
0
| qsc_codepython_frac_lines_simplefunc
int64 0
0
| qsc_codepython_score_lines_no_logic
int64 0
0
| qsc_codepython_frac_lines_print
int64 0
0
| effective
stringclasses 1
value | hits
int64 1
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d9c3b05c7fcf1f87eb65a4b552deef9342032f24
| 6,520
|
py
|
Python
|
src/Components/missions/GEMS/mcd43c.py
|
GEOS-ESM/AeroApps
|
874dad6f34420c014d98eccbe81a061bdc0110cf
|
[
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 2
|
2020-12-02T14:23:30.000Z
|
2021-12-31T15:39:30.000Z
|
src/Components/missions/GEMS/mcd43c.py
|
GEOS-ESM/AeroApps
|
874dad6f34420c014d98eccbe81a061bdc0110cf
|
[
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | 9
|
2020-04-15T16:22:14.000Z
|
2022-03-24T13:59:25.000Z
|
src/Components/missions/SENTINEL-4/mcd43c.py
|
GEOS-ESM/AeroApps
|
874dad6f34420c014d98eccbe81a061bdc0110cf
|
[
"NASA-1.3",
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
"""
Reads climate modeling grid 0.05 degree MCD43 BRDF files.
"""
import os
import sys
from numpy import loadtxt, array, tile, where, concatenate, flipud
from numpy import ones
from datetime import date, datetime, timedelta
from glob import glob
from pyhdf.SD import SD, HDF4Error
MISSING = 32.767
SDS = dict (
LAND = ('BRDF_Albedo_Parameter1_Band1','BRDF_Albedo_Parameter1_Band2',
'BRDF_Albedo_Parameter1_Band3','BRDF_Albedo_Parameter1_Band4',
'BRDF_Albedo_Parameter1_Band5','BRDF_Albedo_Parameter1_Band6',
'BRDF_Albedo_Parameter1_Band7',
'BRDF_Albedo_Parameter2_Band1','BRDF_Albedo_Parameter2_Band2',
'BRDF_Albedo_Parameter2_Band3','BRDF_Albedo_Parameter2_Band4',
'BRDF_Albedo_Parameter2_Band5','BRDF_Albedo_Parameter2_Band6',
'BRDF_Albedo_Parameter2_Band7',
'BRDF_Albedo_Parameter3_Band1','BRDF_Albedo_Parameter3_Band2',
'BRDF_Albedo_Parameter3_Band3','BRDF_Albedo_Parameter3_Band4',
'BRDF_Albedo_Parameter3_Band5','BRDF_Albedo_Parameter3_Band6',
'BRDF_Albedo_Parameter3_Band7'),
QUAL = ('BRDF_Albedo_Quality',
'Snow_BRDF_Albedo',
'BRDF_Albedo_Ancillary', )
)
ALIAS = dict ( BRDF_Albedo_Parameter1_Band1 = 'KISO_b1_645',
BRDF_Albedo_Parameter1_Band2 = 'KISO_b2_856',
BRDF_Albedo_Parameter1_Band3 = 'KISO_b3_465',
BRDF_Albedo_Parameter1_Band4 = 'KISO_b4_553',
BRDF_Albedo_Parameter1_Band5 = 'KISO_b5_1241',
BRDF_Albedo_Parameter1_Band6 = 'KISO_b6_1629',
BRDF_Albedo_Parameter1_Band7 = 'KISO_b7_2114',
BRDF_Albedo_Parameter2_Band1 = 'KVOL_b1_645',
BRDF_Albedo_Parameter2_Band2 = 'KVOL_b2_856',
BRDF_Albedo_Parameter2_Band3 = 'KVOL_b3_465',
BRDF_Albedo_Parameter2_Band4 = 'KVOL_b4_553',
BRDF_Albedo_Parameter2_Band5 = 'KVOL_b5_1241',
BRDF_Albedo_Parameter2_Band6 = 'KVOL_b6_1629',
BRDF_Albedo_Parameter2_Band7 = 'KVOL_b7_2114',
BRDF_Albedo_Parameter3_Band1 = 'KGEO_b1_645',
BRDF_Albedo_Parameter3_Band2 = 'KGEO_b2_856',
BRDF_Albedo_Parameter3_Band3 = 'KGEO_b3_465',
BRDF_Albedo_Parameter3_Band4 = 'KGEO_b4_553',
BRDF_Albedo_Parameter3_Band5 = 'KGEO_b5_1241',
BRDF_Albedo_Parameter3_Band6 = 'KGEO_b6_1629',
BRDF_Albedo_Parameter3_Band7 = 'KGEO_b7_2114',
)
#...........................................................................
class McD43C(object):
"""
This class implements the MODIS LAND BRDF 16-day Level 3 products, MCD43C1 (0.05 degree horz res),
"""
def __init__ (self,Path,lon,lat,Verb=1):
"""
Reads files for one day of Level 3 MCD43C1
present on a given *Path* and returns an object with
all 3 kernels coeff. On input,
Required parameters:
Path -- for now a single file. Eventually implement a single directory, or a list
of files and directories.
"""
if type(lon) is list:
lon = array(lon)
lat = array(lat)
# List of HDF files for a given date
#-----------------------------------
self.verb = Verb
self.SDS = SDS['LAND']
#self.Tfiles = glob(Path + '*.hdf')
if type(Path) is str:
self.Files = [Path]
else:
self.Files = Path
# From a list of lat and lon, return the
# dx, dy on the grid
# -------------------------------------
self.nobs = len(lon)
self._findNearest(Path,lon,lat)
# Read BRDF kernel in a MODIS tile
# ---------------------------------
self.read_BRDF()
# Result
#---
def _findNearest(self,path,lon,lat):
"""Given a list of lat, lon, return numbers to find the
position of the nearest neighbor on the grid (dx,dy)
"""
dLon = 0.05
dLat = 0.05
Lon0 = -180 - dLon
Lat0 = -90 + dLat
self.dx = (0.5+(lon-Lon0)/dLon).astype(int)
self.dy = (0.5+(lat-Lat0)/dLat).astype(int)
if self.verb:
print 'dx','dy', self.dx,self.dy
#---
def read_BRDF(self):
"""Reads MCD43C1 file with Level 3 BRDF kernels for each MODIS band."""
# Create empty lists for SDS to be read from file
# -----------------------------------------------
for name in self.SDS:
self.__dict__[name] = []
BRDF = MISSING * ones((len(self.SDS),self.nobs))
for fn in self.Files:
try:
if self.verb:
print "[] Working on "+fn
hfile = SD(fn)
except HDF4Error:
if self.verb > 2:
print "- %s: not recognized as an HDF file"%filename
return
# Read select variables (reshape to allow concatenation later)
# ------------------------------------------------------------
for sds in self.SDS:
if self.verb:
print 'sds',self.SDS.index(sds)
v = hfile.select(sds).get()
a = hfile.select(sds).attributes()
if a['scale_factor']!=1.0 or a['add_offset']!=0.0:
v = a['scale_factor'] * v + a['add_offset']
if self.verb:
print array(self.dx), BRDF.shape, BRDF[self.SDS.index(sds),:], v.shape
v = flipud(v)
BRDF[self.SDS.index(sds),:] = v[array(self.dy), array(self.dx)]
for sds in self.SDS:
self.__dict__[sds] = BRDF[self.SDS.index(sds),:]
if sds in ALIAS.keys():
self.__dict__[ALIAS[sds]] = self.__dict__[sds]
#---
#............................................................................
if __name__ == "__main__":
path = '/nobackup/3/pcastell/MODIS/MCD43C1/MCD43C1.A2005361.005.2008094071946.hdf'
lon = [-2.,-120.,15.2,17.2,170.1]
lat = [88.,40.,-20.,-20.,-55.5]
lon = np.arange(-180,180,1)
lat = np.arange(-90,90,1)
lon,lat = np.meshgrid(lon,lat)
ex = McD43C(path,lon.flatten(),lat.flatte())
| 36.222222
| 103
| 0.533282
| 747
| 6,520
| 4.373494
| 0.273092
| 0.137741
| 0.085706
| 0.018365
| 0.039792
| 0.012244
| 0
| 0
| 0
| 0
| 0
| 0.067998
| 0.316564
| 6,520
| 179
| 104
| 36.424581
| 0.665171
| 0.100767
| 0
| 0.056604
| 0
| 0
| 0.209649
| 0.133752
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.066038
| null | null | 0.04717
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a13a931088f76e07468fa49084284d44b5cf0eb
| 936
|
py
|
Python
|
autolatex-master/exemplos_codigo/certificados/certificados.py
|
luizgui05/autolatex.
|
366eb3d88b7e60c119737f958e35cce99e8775e9
|
[
"MIT"
] | null | null | null |
autolatex-master/exemplos_codigo/certificados/certificados.py
|
luizgui05/autolatex.
|
366eb3d88b7e60c119737f958e35cce99e8775e9
|
[
"MIT"
] | null | null | null |
autolatex-master/exemplos_codigo/certificados/certificados.py
|
luizgui05/autolatex.
|
366eb3d88b7e60c119737f958e35cce99e8775e9
|
[
"MIT"
] | null | null | null |
import os
import sys
import sqlite3
con = None
filename = 'certificado'
# Abrir banco de dados para ler nomes.
try:
con = sqlite3.connect('math.db')
cur = con.cursor()
cur.execute('select * from math')
data = cur.fetchall()
except sqlite3.Error, e:
print "Error %s:" % e.args[0]
sys.exit(1)
finally:
if con:
con.close()
# Gerar um certificado para cada nome.
for row in data:
f = open(filename+'.tex','r+')
old = f.readlines()
if old[0][1:4] == 'def':
offset = 1
else:
offset = 0
f.seek(0)
f.write('\\def\\name {'+row[0]+'}\n')
f.writelines(old[offset:])
f.close()
# Compilar arquivo LaTeX
try:
os.system('pdflatex '+filename+'.tex')
os.system('mv '+filename+'.pdf '+filename+'_'+row[0].replace(' ','_')+'.pdf')
#os.system('xdg-open '+filename+'.pdf &')
except OSError:
print('LaTeX not installed.')
| 20.8
| 85
| 0.569444
| 128
| 936
| 4.148438
| 0.5625
| 0.045198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018545
| 0.251068
| 936
| 44
| 86
| 21.272727
| 0.738944
| 0.145299
| 0
| 0.0625
| 0
| 0
| 0.148428
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.09375
| null | null | 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a328b7be397a48ed8f6202385b17e0dbf81357c
| 12,156
|
py
|
Python
|
networks/larflow/models/larflow_uresnet.py
|
LArbys/ublarcvserver
|
02381c937f49a2eab2f754017ab431c3f6fa70d7
|
[
"Apache-2.0"
] | 2
|
2020-07-09T19:34:03.000Z
|
2021-06-21T23:09:23.000Z
|
networks/larflow/models/larflow_uresnet.py
|
LArbys/ublarcvserver
|
02381c937f49a2eab2f754017ab431c3f6fa70d7
|
[
"Apache-2.0"
] | null | null | null |
networks/larflow/models/larflow_uresnet.py
|
LArbys/ublarcvserver
|
02381c937f49a2eab2f754017ab431c3f6fa70d7
|
[
"Apache-2.0"
] | null | null | null |
import torch.nn as nn
import torch as torch
import math
import torch.utils.model_zoo as model_zoo
###########################################################
#
# U-ResNet
# U-net witih ResNet modules
#
# Semantic segmentation network used by MicroBooNE
# to label track/shower pixels
#
# resnet implementation from pytorch.torchvision module
# U-net from (cite)
#
# meant to be copy of caffe version
#
###########################################################
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.stride = stride
self.bypass = None
if inplanes!=planes or stride>1:
self.bypass = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.bypass is not None:
outbp = self.bypass(x)
out += outbp
else:
out += x
out = self.relu(out)
return out
class Bottleneck(nn.Module):
def __init__(self, inplanes, planes, stride=1 ):
super(Bottleneck, self).__init__()
# residual path
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
# if stride >1, then we need to subsamble the input
if stride>1:
self.shortcut = nn.Conv2d(inplanes,planes,kernel_size=1,stride=stride,bias=False)
else:
self.shortcut = None
def forward(self, x):
if self.shortcut is None:
bypass = x
else:
bypass = self.shortcut(x)
residual = self.conv1(x)
residual = self.bn1(residual)
residual = self.relu(residual)
residual = self.conv2(residual)
residual = self.bn2(residual)
residual = self.relu(residual)
residual = self.conv3(residual)
residual = self.bn3(residual)
out = bypass+residual
out = self.relu(out)
return out
class PreactivationBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1 ):
super(Preactivation, self).__init__()
# residual path
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=3, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
# if stride >1, then we need to subsamble the input
if stride>1:
self.shortcut = nn.Conv2d(inplanes,planes,kernel_size=1,stride=stride,bias=False)
else:
self.shortcut = None
def forward(self, x):
if self.shortcut is None:
bypass = x
else:
bypass = self.shortcut(x)
class DoubleResNet(nn.Module):
def __init__(self,Block,inplanes,planes,stride=1):
super(DoubleResNet,self).__init__()
self.res1 = Block(inplanes,planes,stride)
self.res2 = Block( planes,planes, 1)
def forward(self, x):
out = self.res1(x)
out = self.res2(out)
return out
class ConvTransposeLayer(nn.Module):
def __init__(self,deconv_inplanes,skip_inplanes,deconv_outplanes,res_outplanes):
super(ConvTransposeLayer,self).__init__()
self.deconv = nn.ConvTranspose2d( deconv_inplanes, deconv_outplanes, kernel_size=4, stride=2, padding=1, bias=False )
self.res = DoubleResNet(BasicBlock,deconv_outplanes+skip_inplanes,res_outplanes,stride=1)
def forward(self,x,skip_x):
out = self.deconv(x,output_size=skip_x.size())
# concat skip connections
out = torch.cat( [out,skip_x], 1 )
out = self.res(out)
return out
class LArFlowUResNet(nn.Module):
def __init__(self, num_classes=3, input_channels=3, inplanes=16, showsizes=False, use_visi=True):
self.inplanes =inplanes
super(LArFlowUResNet, self).__init__()
self._showsizes = showsizes # print size at each layer
self.use_visi = use_visi
# Encoder
# stem
# one big stem
self.conv1 = nn.Conv2d(input_channels, self.inplanes, kernel_size=7, stride=1, padding=3, bias=True) # initial conv layer
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu1 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool2d( 3, stride=2, padding=1 )
self.enc_layer1 = self._make_encoding_layer( self.inplanes*1, self.inplanes*2, stride=1) # 16->32
self.enc_layer2 = self._make_encoding_layer( self.inplanes*2, self.inplanes*4, stride=2) # 32->64
self.enc_layer3 = self._make_encoding_layer( self.inplanes*4, self.inplanes*8, stride=2) # 64->128
self.enc_layer4 = self._make_encoding_layer( self.inplanes*8, self.inplanes*16, stride=2) # 128->256
self.enc_layer5 = self._make_encoding_layer( self.inplanes*16, self.inplanes*32, stride=2) # 256->512
# decoding flow
#self.num_final_flow_features = self.inplanes
self.num_final_flow_features = self.inplanes
self.flow_dec_layer5 = self._make_decoding_layer( self.inplanes*32*2, self.inplanes*16, self.inplanes*16, self.inplanes*16 ) # 512->256
self.flow_dec_layer4 = self._make_decoding_layer( self.inplanes*16, self.inplanes*8, self.inplanes*8, self.inplanes*8 ) # 256->128
self.flow_dec_layer3 = self._make_decoding_layer( self.inplanes*8, self.inplanes*4, self.inplanes*4, self.inplanes*4 ) # 128->64
self.flow_dec_layer2 = self._make_decoding_layer( self.inplanes*4, self.inplanes*2, self.inplanes*2, self.inplanes*2 ) # 64->32
#self.flow_dec_layer1 = self._make_decoding_layer( self.inplanes*2, self.inplanes, self.inplanes ) # 32->16
self.flow_dec_layer1 = self._make_decoding_layer( self.inplanes*2, self.inplanes, self.inplanes, self.num_final_flow_features ) # 32->200
# decoding matchability
if self.use_visi:
self.visi_dec_layer5 = self._make_decoding_layer( self.inplanes*32*2, self.inplanes*16, self.inplanes*16, self.inplanes*16 ) # 512->256
self.visi_dec_layer4 = self._make_decoding_layer( self.inplanes*16, self.inplanes*8, self.inplanes*8, self.inplanes*8 ) # 256->128
self.visi_dec_layer3 = self._make_decoding_layer( self.inplanes*8, self.inplanes*4, self.inplanes*4, self.inplanes*4 ) # 128->64
self.visi_dec_layer2 = self._make_decoding_layer( self.inplanes*4, self.inplanes*2, self.inplanes*2, self.inplanes*2 ) # 64->32
self.visi_dec_layer1 = self._make_decoding_layer( self.inplanes*2, self.inplanes, self.inplanes, self.inplanes ) # 32->16
# 1x1 conv for flow
self.flow_conv = nn.Conv2d( self.num_final_flow_features, 1, kernel_size=1, stride=1, padding=0, bias=True )
# 1x1 conv for mathability
if self.use_visi:
self.visi_conv = nn.Conv2d( self.inplanes, 2, kernel_size=1, stride=1, padding=0, bias=True ) # 2 classes, 0=not vis, 1=vis
self.visi_softmax = nn.LogSoftmax(dim=1)
# initialization
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m,nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_encoding_layer(self, inplanes, planes, stride=2):
return DoubleResNet(BasicBlock,inplanes,planes,stride=stride)
def _make_decoding_layer(self, inplanes, skipplanes, deconvplanes, resnetplanes ):
return ConvTransposeLayer( inplanes, skipplanes, deconvplanes, resnetplanes )
def encode(self,x):
# stem
x = self.conv1(x)
x = self.bn1(x)
x0 = self.relu1(x)
x = self.pool1(x0)
x1 = self.enc_layer1(x)
x2 = self.enc_layer2(x1)
x3 = self.enc_layer3(x2)
x4 = self.enc_layer4(x3)
x5 = self.enc_layer5(x4)
if self._showsizes:
print "after encoding: "
print " x1: ",x1.size()
print " x2: ",x2.size()
print " x3: ",x3.size()
print " x4: ",x4.size()
print " x5: ",x5.size()
return x5,x0,x1,x2,x3,x4
def flow(self,merged_encode,x0,x1,x2,x3,x4):
""" decoding to flow prediction """
x = self.flow_dec_layer5(merged_encode,x4)
if self._showsizes:
print "after decoding:"
print " dec5: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer4(x,x3)
if self._showsizes:
print " dec4: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer3(x,x2)
if self._showsizes:
print " dec3: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer2(x,x1)
if self._showsizes:
print " dec2: ",x.size()," iscuda=",x.is_cuda
x = self.flow_dec_layer1(x,x0)
if self._showsizes:
print " dec1: ",x.size()," iscuda=",x.is_cuda
return x
def visibility(self,merged_encode,x0,x1,x2,x3,x4):
""" decoding to flow prediction """
x = self.visi_dec_layer5(merged_encode,x4)
if self._showsizes:
print "after decoding:"
print " dec5: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer4(x,x3)
if self._showsizes:
print " dec4: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer3(x,x2)
if self._showsizes:
print " dec3: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer2(x,x1)
if self._showsizes:
print " dec2: ",x.size()," iscuda=",x.is_cuda
x = self.visi_dec_layer1(x,x0)
if self._showsizes:
print " dec1: ",x.size()," iscuda=",x.is_cuda
return x
def forward(self, src, target):
if self._showsizes:
print "input: ",x.size()," is_cuda=",x.is_cuda
src_encode, s0, s1, s2, s3, s4 = self.encode(src)
target_encode, t0, t1, t2, t3, t4 = self.encode(target)
merged_encode = torch.cat( [target_encode,src_encode], 1 )
flowout = self.flow( merged_encode, s0, s1, s2, s3, s4 )
if self.use_visi:
visiout = self.visibility( merged_encode, t0, t1, t2, t3, t4 )
flow_predict = self.flow_conv( flowout )
if self.use_visi:
visi_predict = self.visi_conv( visiout )
visi_predict = self.visi_softmax(visi_predict)
else:
visi_predict = None
if self._showsizes:
print " softmax: ",x.size()
return flow_predict,visi_predict
| 36.286567
| 152
| 0.599786
| 1,583
| 12,156
| 4.43904
| 0.131396
| 0.107585
| 0.043546
| 0.037
| 0.562971
| 0.525829
| 0.444002
| 0.410132
| 0.362317
| 0.346236
| 0
| 0.044164
| 0.27542
| 12,156
| 334
| 153
| 36.39521
| 0.753633
| 0.069842
| 0
| 0.362791
| 0
| 0
| 0.023833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.04186
| 0.018605
| null | null | 0.093023
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a4c6c7b420769dc35e8f30f400909774d7d25e6
| 22,154
|
py
|
Python
|
gfirefly/dbentrust/dbutils.py
|
handsome3163/H2Dgame-Firefly
|
2d213928977dc490909f456327e5cae80998e60d
|
[
"MIT"
] | 675
|
2015-01-01T05:18:30.000Z
|
2022-03-18T08:27:06.000Z
|
gfirefly/dbentrust/dbutils.py
|
liuis/Firefly
|
fd2795b8c26de6ab63bbec23d11f18c3dfb39a50
|
[
"MIT"
] | 3
|
2015-01-29T02:36:14.000Z
|
2022-01-21T09:19:21.000Z
|
gfirefly/dbentrust/dbutils.py
|
liuis/Firefly
|
fd2795b8c26de6ab63bbec23d11f18c3dfb39a50
|
[
"MIT"
] | 248
|
2015-01-04T08:24:31.000Z
|
2022-02-18T07:14:02.000Z
|
#coding:utf8
'''
Created on 2013-8-21
@author: lan (www.9miao.com)
'''
import itertools
import datetime
def safeunicode(obj, encoding='utf-8'):
r"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding)
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
return unicode(obj)
else:
return str(obj).decode(encoding)
def safestr(obj, encoding='utf-8'):
r"""
Converts any given object to utf-8 encoded string.
>>> safestr('hello')
'hello'
>>> safestr(u'\u1234')
'\xe1\x88\xb4'
>>> safestr(2)
'2'
"""
if isinstance(obj, unicode):
return obj.encode(encoding)
elif isinstance(obj, str):
return obj
elif hasattr(obj, 'next'): # iterator
return itertools.imap(safestr, obj)
else:
return str(obj)
def sqlify(obj):
"""
converts `obj` to its proper SQL version
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif datetime and isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
if isinstance(obj, unicode): obj = obj.encode('utf8')
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
>>> sqllist(u'abc')
u'abc'
"""
if isinstance(lst, basestring):
return lst
else:
return ', '.join(lst)
def _sqllist(values):
"""
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
def _interpolate(sformat):
"""
Takes a format string and returns a list of 2-tuples of the form
(boolean, string) where boolean says whether string should be evaled
or not.
from <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
from tokenize import tokenprog
tokenprog = tokenprog
def matchorfail(text, pos):
match = tokenprog.match(text, pos)
if match is None:
raise _ItplError(text, pos)
return match, match.end()
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
chunks = []
pos = 0
while 1:
dollar = sformat.find("$", pos)
if dollar < 0:
break
nextchar = sformat[dollar + 1]
if nextchar == "{":
chunks.append((0, sformat[pos:dollar]))
pos, level = dollar + 2, 1
while level:
match, pos = matchorfail(sformat, pos)
tstart, tend = match.regs[3]
token = sformat[tstart:tend]
if token == "{":
level = level + 1
elif token == "}":
level = level - 1
chunks.append((1, sformat[dollar + 2:pos - 1]))
elif nextchar in namechars:
chunks.append((0, sformat[pos:dollar]))
match, pos = matchorfail(sformat, dollar + 1)
while pos < len(sformat):
if sformat[pos] == "." and \
pos + 1 < len(sformat) and sformat[pos + 1] in namechars:
match, pos = matchorfail(sformat, pos + 1)
elif sformat[pos] in "([":
pos, level = pos + 1, 1
while level:
match, pos = matchorfail(sformat, pos)
tstart, tend = match.regs[3]
token = sformat[tstart:tend]
if token[0] in "([":
level = level + 1
elif token[0] in ")]":
level = level - 1
else:
break
chunks.append((1, sformat[dollar + 1:pos]))
else:
chunks.append((0, sformat[pos:dollar + 1]))
pos = dollar + 1 + (nextchar == "$")
if pos < len(sformat):
chunks.append((0, sformat[pos:]))
return chunks
def sqlwhere(dictionary, grouping=' AND '):
"""
Converts a `dictionary` to an SQL WHERE clause `SQLQuery`.
>>> sqlwhere({'cust_id': 2, 'order_id':3})
<sql: 'order_id = 3 AND cust_id = 2'>
>>> sqlwhere({'cust_id': 2, 'order_id':3}, grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere({'a': 'a', 'b': 'b'}).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in dictionary.items()], grouping)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""
dictionary = dictionary.copy() # eval mucks with it
result = []
for live, chunk in _interpolate(string_):
if live:
v = eval(chunk, dictionary)
result.append(sqlquote(v))
else:
result.append(chunk)
return SQLQuery.join(result, '')
class UnknownParamstyle(Exception):
"""
raised for unsupported db paramstyles
(currently supported: qmark, numeric, format, pyformat)
"""
pass
class _ItplError(ValueError):
def __init__(self, text, pos):
ValueError.__init__(self)
self.text = text
self.pos = pos
def __str__(self):
return "unfinished expression in %s at char %d" % (
repr(self.text), self.pos)
class SQLParam(object):
"""
Parameter in SQLQuery.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle, paramstyle
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
r"""Creates a new SQLQuery.
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, basestring):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, basestring):
items = [other]
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (basestring, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
If target argument is provided, the items are appended to target instead of creating a new SQLQuery.
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0:
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '<sql: %s>' % repr(str(self))
class SQLLiteral:
"""
Protects a string from `sqlquote`.
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return self.v
class SQLProducer:
"""Database"""
def __init__(self):
"""Creates a database.
"""
pass
def query(self, sql_query,processed=False, svars=None):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
>>> db = DB(None, {})
>>> db.query("SELECT * FROM foo", _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.query("SELECT * FROM foo WHERE x = $x", vars=dict(x='f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
>>> db.query("SELECT * FROM foo WHERE x = " + sqlquote('f'), _test=True)
<sql: "SELECT * FROM foo WHERE x = 'f'">
"""
if svars is None:
svars = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, svars)
return sql_query
def sql_clauses(self, what, tables, where, group, order, limit, offset):
return (
('SELECT', what),
('FROM', sqllist(tables)),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
('LIMIT', limit),
('OFFSET', offset))
def gen_clause(self, sql, val, svars):
if isinstance(val, (int, long)):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, svars)
def xjoin(a, b):
if a and b: return a + ' ' + b
else: return a or b
return xjoin(sql, nout)
def _where(self, where, svars):
if isinstance(where, (int, long)):
where = "id = " + sqlparam(where)
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, svars)
return where
def select(self, tables, svars=None, what='*', where=None, order=None, group=None,
limit=None, offset=None, _test=False):
"""
Selects `what` from `tables` with clauses `where`, `order`,
`group`, `limit`, and `offset`. Uses vars to interpolate.
Otherwise, each clause can be a SQLQuery.
>>> db = DB(None, {})
>>> db.select('foo', _test=True)
<sql: 'SELECT * FROM foo'>
>>> db.select(['foo', 'bar'], where="foo.bar_id = bar.id", limit=5, _test=True)
<sql: 'SELECT * FROM foo, bar WHERE foo.bar_id = bar.id LIMIT 5'>
"""
if svars is None: svars = {}
sql_clauses = self.sql_clauses(what, tables, where, group, order, limit, offset)
clauses = [self.gen_clause(sql, val, svars) for sql, val in sql_clauses if val is not None]
qout = SQLQuery.join(clauses)
if _test: return qout
return self.query(qout, processed=True)
def insert(self, tablename, seqname=None, _test=False, **values):
"""
Inserts `values` into `tablename`. Returns current sequence ID.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> q = db.insert('foo', name='bob', age=2, created=SQLLiteral('NOW()'), _test=True)
>>> q
<sql: "INSERT INTO foo (age, name, created) VALUES (2, 'bob', NOW())">
>>> q.query()
'INSERT INTO foo (age, name, created) VALUES (%s, %s, NOW())'
>>> q.values()
[2, 'bob']
"""
def q(x): return "(" + x + ")"
if values:
_keys = SQLQuery.join(values.keys(), ', ')
_values = SQLQuery.join([sqlparam(v) for v in values.values()], ', ')
sql_query = "INSERT INTO %s " % tablename + q(_keys) + ' VALUES ' + q(_values)
else:
sql_query = SQLQuery(self._get_insert_default_values_query(tablename))
return sql_query
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s DEFAULT VALUES" % table
def multiple_insert(self, tablename, values, seqname=None, _test=False):
"""
Inserts multiple rows into `tablename`. The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
Returns the list of ids of the inserted rows.
Set `seqname` to the ID if it's not the default, or to `False`
if there isn't one.
>>> db = DB(None, {})
>>> db.supports_multiple_insert = True
>>> values = [{"name": "foo", "email": "foo@example.com"}, {"name": "bar", "email": "bar@example.com"}]
>>> db.multiple_insert('person', values=values, _test=True)
<sql: "INSERT INTO person (name, email) VALUES ('foo', 'foo@example.com'), ('bar', 'bar@example.com')">
"""
if not values:
return []
if not self.supports_multiple_insert:
out = [self.insert(tablename, seqname=seqname, _test=_test, **v) for v in values]
if seqname is False:
return None
else:
return out
keys = values[0].keys()
#@@ make sure all keys are valid
# make sure all rows have same keys.
for v in values:
if v.keys() != keys:
raise ValueError, 'Bad data'
sql_query = SQLQuery('INSERT INTO %s (%s) VALUES ' % (tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k]) for k in keys], sep=", ", target=sql_query, prefix="(", suffix=")")
if _test: return sql_query
db_cursor = self._db_cursor()
if seqname is not False:
sql_query = self._process_insert_query(sql_query, tablename, seqname)
if isinstance(sql_query, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql_query
self._db_execute(db_cursor, q1)
self._db_execute(db_cursor, q2)
else:
self._db_execute(db_cursor, sql_query)
try:
out = db_cursor.fetchone()[0]
out = range(out-len(values)+1, out+1)
except Exception:
out = None
if not self.ctx.transactions:
self.ctx.commit()
return out
def update(self, tables, where, svars=None, _test=False, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
>>> db = DB(None, {})
>>> name = 'Joseph'
>>> q = db.update('foo', where='name = $name', name='bob', age=2,
... created=SQLLiteral('NOW()'), vars=locals(), _test=True)
>>> q
<sql: "UPDATE foo SET age = 2, name = 'bob', created = NOW() WHERE name = 'Joseph'">
>>> q.query()
'UPDATE foo SET age = %s, name = %s, created = NOW() WHERE name = %s'
>>> q.values()
[2, 'bob', 'Joseph']
"""
if svars is None: svars = {}
where = self._where(where, svars)
query = (
"UPDATE " + sqllist(tables) +
" SET " + sqlwhere(values, ', ') +
" WHERE " + where)
if _test: return query
db_cursor = self._db_cursor()
self._db_execute(db_cursor, query)
if not self.ctx.transactions:
self.ctx.commit()
return db_cursor.rowcount
def delete(self, table, where, using=None, svars=None, _test=False):
"""
Deletes from `table` with clauses `where` and `using`.
>>> db = DB(None, {})
>>> name = 'Joe'
>>> db.delete('foo', where='name = $name', vars=locals(), _test=True)
<sql: "DELETE FROM foo WHERE name = 'Joe'">
"""
if svars is None:
svars = {}
where = self._where(where, svars)
q = 'DELETE FROM ' + table
if using:
q += ' USING ' + sqllist(using)
if where:
q += ' WHERE ' + where
return q
sqlproducer = SQLProducer()
| 31.693848
| 115
| 0.510743
| 2,510
| 22,154
| 4.4251
| 0.147809
| 0.015846
| 0.012605
| 0.017106
| 0.231476
| 0.174124
| 0.124066
| 0.094715
| 0.08121
| 0.073467
| 0
| 0.009403
| 0.35673
| 22,154
| 699
| 116
| 31.693848
| 0.769981
| 0.021802
| 0
| 0.235135
| 0
| 0
| 0.033266
| 0.004527
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.008108
| 0.008108
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a59e89d09e32fec1b404a96ad1edf1ccd223adb
| 8,871
|
py
|
Python
|
tests/test_preempt_return.py
|
vpv11110000/pyss
|
bc2226e2e66e0b551a09ae6ab6835b0bb6c7f32b
|
[
"MIT"
] | null | null | null |
tests/test_preempt_return.py
|
vpv11110000/pyss
|
bc2226e2e66e0b551a09ae6ab6835b0bb6c7f32b
|
[
"MIT"
] | 2
|
2017-09-05T11:12:05.000Z
|
2017-09-07T19:23:15.000Z
|
tests/test_preempt_return.py
|
vpv11110000/pyss
|
bc2226e2e66e0b551a09ae6ab6835b0bb6c7f32b
|
[
"MIT"
] | null | null | null |
# #!/usr/bin/python
# -*- coding: utf-8 -*-
# test_preempt_return.py
# pylint: disable=line-too-long,missing-docstring,bad-whitespace, unused-argument, too-many-locals
import sys
import os
import random
import unittest
DIRNAME_MODULE = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(sys.argv[0])))) + os.sep
sys.path.append(DIRNAME_MODULE)
sys.path.append(DIRNAME_MODULE + "pyss" + os.sep)
from pyss import pyssobject
from pyss.pyss_model import PyssModel
from pyss.segment import Segment
from pyss.generate import Generate
from pyss.terminate import Terminate
from pyss import logger
from pyss.table import Table
from pyss.handle import Handle
from pyss.enter import Enter
from pyss.leave import Leave
from pyss.storage import Storage
from pyss.advance import Advance
from pyss.preempt import Preempt
from pyss.g_return import GReturn
from pyss.facility import Facility
from pyss.seize import Seize
from pyss.release import Release
from pyss.transfer import Transfer
from pyss.test import Test
from pyss.pyss_const import *
class TestPreemptReturn(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
# @unittest.skip("testing skipping test_preempt_return_001")
def test_preempt_return_001(self):
"""Тест Preempt - Return
Формируется один транзакт в момент времени 1.
Прерывает работу устройства F_1 на 5 единиц времени.
Выходит из модели в момент времени 6.
"""
logger.info("--- test_preempt_return_001 ----------------------------------")
### MODEL ----------------------------------
m = PyssModel()
sgm = Segment(m)
#
m[OPTIONS].setAllFalse()
MAX_TIME = 20
#
list_all_transact = []
#
MAX_TIME = 20
#
F_1 = "F_1"
# ОКУ
Facility(m, facilityName=F_1)
#
def funcTransactTo_list_all_transact(owner, transact):
# складируем транзакты в список
list_all_transact.append(transact)
### SEGMENT ----------------------------
# формируется одна заявка в момент времени 1
Generate(sgm, med_value=None,
modificatorFunc=None,
first_tx=1,
max_amount=1)
Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
Preempt(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))
#
Advance(sgm, meanTime=5, modificatorFunc=None)
GReturn(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:not self.assertNotIn(F_1, t[FACILITY]))
#
Terminate(sgm, deltaTerminate=0)
# ЗАПУСК ----------------------
m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)
# ТЕСТЫ ----------------------
for t in list_all_transact:
self.assertEqual(t[TIME_CREATED], 1)
self.assertEqual(t[TERMINATED_TIME], 6)
print str(["%s:%s" % (k, t[k])
for k in t.keys() if k
in [TIME_CREATED, TERMINATED_TIME]])
# @unittest.skip("testing skipping test_preempt_return_002")
def test_preempt_return_002(self):
"""Тест Preempt - Return
Формируется транзакт A в момент времени 1.
Идёт на обработку устройством F_1 в течение 3 единиц времени.
Формируется транзакт B в момент времени 2.
Прерывает работу устройства на 5 единиц времени.
Транзакт B выходит из модели в момент времени 7.
Транзакт А выходит из модели в момент времени 9.
Обработка транзакта А была прервана с 2 по 7.
"""
logger.info("--- test_preempt_return_002 ----------------------------------")
### MODEL ----------------------------------
m = PyssModel()
sgm = Segment(m)
#
m[OPTIONS].setAllFalse()
MAX_TIME = 20
# CONSTS
TRANSACT_A = "A"
TRANSACT_B = "B"
#
list_all_transact = []
tA = []
tB = []
#
F_1 = "F_1"
# ОКУ
facility_1 = Facility(m, facilityName=F_1)
#
def funcTransactTo_list_all_transact(owner, transact):
# складируем транзакты в список
list_all_transact.append(transact)
def setTransactLabel(owner, transact):
if transact[NUM] == 1:
transact[LABEL] = TRANSACT_A
tA.append(transact)
elif transact[NUM] == 2:
transact[LABEL] = TRANSACT_B
tB.append(transact)
# функция проверки условия
def checkTest(o):
t=m.getCurrentTransact()
if t[LABEL] == TRANSACT_B:
return False
return True
def printAllTransact(owner, transact):
print "Time=%s" % str(m.getCurTime())
print "\n".join([str(t) for t in list_all_transact])
print "tA=%s" % str(tA[0])
print "tB=%s" % str(tB[0])
### SEGMENT ----------------------------
# формируется одна заявка в момент времени 1
Generate(sgm,
med_value=1,
modificatorFunc=None,
first_tx=1,
max_amount=2)
# вспомогательные операции
Handle(sgm, handlerFunc=funcTransactTo_list_all_transact)
Handle(sgm, handlerFunc=setTransactLabel)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
# первый транзакт проходит, второй направляется к метке "to_preempt"
Test(sgm, funcCondition=checkTest, move2block="to_preempt")
# только первый транзакт
Seize(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))
#
Advance(sgm, meanTime=3, modificatorFunc=None)
Release(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
Transfer(sgm, funcTransfer=lambda o, t: o.findBlockByLabel("to_term"))
#---
# только второй транзакт
Preempt(sgm, facilityName=F_1, label="to_preempt")
# test
# .addBlock(handle.Handle(handlerFunc=lambda o,t:self.assertEqual(tA[0][REMAIND_TIME], None)))
Handle(sgm, handlerFunc=printAllTransact)
Handle(sgm, handlerFunc=lambda o, t:self.assertIn(F_1, t[FACILITY]))
#
Handle(sgm, handlerFunc=printAllTransact)
Advance(sgm, meanTime=5, modificatorFunc=None)
GReturn(sgm, facilityName=F_1)
# test
Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][REMAIND_TIME], 2))
Handle(sgm, handlerFunc=lambda o, t:self.assertEqual(tA[0][SCHEDULED_TIME], 9))
Handle(sgm, handlerFunc=lambda o, t:self.assertNotIn(F_1, t[FACILITY]))
#
Handle(sgm, handlerFunc=printAllTransact)
# все транзакты
Terminate(sgm, label="to_term", deltaTerminate=0)
# ЗАПУСК ----------------------
m.start(terminationCount=MAX_TIME, maxTime=MAX_TIME)
# ТЕСТЫ ----------------------
for t in list_all_transact:
# Формируется транзакт A в момент времени 1.
# Идёт на обработку устройством F_1 в течение 3 единиц времени.
# Формируется транзакт B в момент времени 2.
# Прерывает работу устройства на 5 единиц времени.
# Транзакт B выходит из модели в момент времени 7.
# Транзакт А выходит из модели в момент времени 9.
# Обработка транзакта А была прервана с 2 по 7.
print str(["%s:%s" % (k, t[k])
for k in t.keys() if k
in [TIME_CREATED, TERMINATED_TIME, LIFE_TIME_LIST]])
if t[LABEL] == TRANSACT_A:
self.assertEqual(t[TIME_CREATED], 1)
self.assertEqual(t[REMAIND_TIME], 2)
self.assertEqual(t[TERMINATED_TIME], 9)
self.assertListEqual(t[LIFE_TIME_LIST], [
{'start': 1, 'state': 'actived'},
{'start': 2, 'state': 'preempted'},
{'start': 7, 'state': 'actived'},
{'start': 9, 'state': 'deleted'}])
elif t[LABEL] == TRANSACT_B:
self.assertEqual(t[TIME_CREATED], 2)
self.assertEqual(t[TERMINATED_TIME], 7)
self.assertListEqual(t[LIFE_TIME_LIST], [
{'start': 2, 'state': 'actived'},
{'start': 7, 'state': 'deleted'}])
if __name__ == '__main__':
unittest.main(module="test_preempt_return")
| 35.342629
| 106
| 0.578289
| 1,021
| 8,871
| 4.89618
| 0.199804
| 0.009202
| 0.064013
| 0.041808
| 0.602721
| 0.556511
| 0.524505
| 0.458092
| 0.434687
| 0.417083
| 0
| 0.016611
| 0.294217
| 8,871
| 250
| 107
| 35.484
| 0.781824
| 0.153872
| 0
| 0.356643
| 0
| 0
| 0.048515
| 0.01676
| 0
| 0
| 0
| 0
| 0.132867
| 0
| null | null | 0.013986
| 0.167832
| null | null | 0.06993
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a75b4a74e6ecd635d9404db9ea5df06d5114069
| 10,282
|
py
|
Python
|
bufr_extract_unique_stations.py
|
glamod/glamod-misc
|
4c8743dd3aa436377017c49bec990b11fe1c6f7d
|
[
"BSD-3-Clause"
] | null | null | null |
bufr_extract_unique_stations.py
|
glamod/glamod-misc
|
4c8743dd3aa436377017c49bec990b11fe1c6f7d
|
[
"BSD-3-Clause"
] | 16
|
2018-10-23T08:06:18.000Z
|
2018-10-30T10:20:01.000Z
|
bufr_extract_unique_stations.py
|
glamod/glamod-misc
|
4c8743dd3aa436377017c49bec990b11fe1c6f7d
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python2.7
"""
Extract unique set of station locations (and names) along with number of obs
RJHD - Exeter - October 2017
"""
# ECMWF import defaults
import traceback
import sys
from eccodes import *
# RJHD imports
import cartopy
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import gc
VERBOSE = 1 # verbose error reporting.
ATTRS = [
'code',
'units',
'scale',
'reference',
'width'
]
INTMDI = 2147483647
#***************************************************
def process_file(infilename, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year):
infile = open(infilename)
year = int(infilename.split(".")[0].split("_")[-1])
cmatch = 0
counter = 0
# loop all messages (with stop statement)
while 1:
"""OPEN MESSAGE"""
# get handle for message
bufr = codes_bufr_new_from_file(infile)
if bufr is None:
break
if counter%100000 == 0:
print "message: {:d}".format(counter)
# we need to instruct ecCodes to expand all the descriptors
# i.e. unpack the data values
codes_set(bufr, 'unpack', 1)
"""ITERATOR TO EXTRACT KEYS"""
these_keys = []
# get BUFR key iterator
iterid = codes_bufr_keys_iterator_new(bufr)
# loop over the keys
while codes_bufr_keys_iterator_next(iterid):
# print key name
keyname = codes_bufr_keys_iterator_get_name(iterid)
# print(" %s" % keyname)
these_keys += [keyname]
# delete the key iterator
codes_bufr_keys_iterator_delete(iterid)
# Use these to select obs from land/marine surface
name_keys = ["#1#shipOrMobileLandStationIdentifier", "#1#stationNumber"]
processed = False
for nk in name_keys:
if nk in these_keys:
try:
name = codes_get(bufr, nk)
lat = codes_get(bufr, "#1#latitude")
lon = codes_get(bufr, "#1#longitude")
sloc = tloc = nloc = [-1]
if name in station_names:
sloc, = np.where(station_names == name)
if lat in latitudes:
tloc, = np.where(latitudes == lat)
if lon in longitudes:
nloc, = np.where(longitudes == lon)
if tloc[0] == -1 and nloc[0] == -1:
# if not in list, then add
station_names = np.append(station_names, name)
latitudes = np.append(latitudes, lat)
longitudes = np.append(longitudes, lon)
observations = np.append(observations, 1)
start_year = np.append(start_year, year)
end_year = np.append(end_year, year)
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
fixed_station = np.append(fixed_station, True)
else:
fixed_station = np.append(fixed_station, False)
elif (tloc[0] != -1 or nloc[0] != -1) and tloc[0] != nloc[0]:
# add if one element of position is unique
station_names = np.append(station_names, name)
latitudes = np.append(latitudes, lat)
longitudes = np.append(longitudes, lon)
observations = np.append(observations, 1)
start_year = np.append(start_year, year)
end_year = np.append(end_year, year)
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
fixed_station = np.append(fixed_station, True)
else:
fixed_station = np.append(fixed_station, False)
elif tloc[0] != -1 and tloc[0] == nloc[0]:
# if position matches exactly, up observation counter
observations[tloc[0]] += 1
end_year[tloc[0]] = year
# allow splitting of land and marine/mobile
if nk == "#1#stationNumber":
if fixed_station[tloc[0]] != True:
# if listed as land and now marine, take marine
fixed_station[tloc[0]] = False
else:
if fixed_station[tloc[0]] != False:
# easier to leave as mobile/marine than to move
# hopefully will stand out later
pass
else:
cmatch += 1
processed = True
except CodesInternalError:
raw_input("key error?")
# check for new keys which give station ID information
if not processed:
other_keys = ["#1#carrierBalloonOrAircraftIdentifier", "#1#aircraftFlightNumber"]
new_key = True
for ok in other_keys:
if ok in these_keys: new_key = False
if new_key:
raw_input(these_keys)
# if counter > 10000: break
counter += 1
codes_release(bufr)
# print "Number of unique locations in this year: {}".format(len(latitudes))
return station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year # process_file
#***************************************************
def scatter_map(outname, data, lons, lats, cmap, bounds, cb_label, title = "", figtext = "", doText = False):
'''
Standard scatter map
:param str outname: output filename root
:param array data: data to plot
:param array lons: longitudes
:param array lats: latitudes
:param obj cmap: colourmap to use
:param array bounds: bounds for discrete colormap
:param str cb_label: colorbar label
'''
norm=mpl.cm.colors.BoundaryNorm(bounds,cmap.N)
fig = plt.figure(figsize =(10,6.5))
plt.clf()
ax = plt.axes([0.05, 0.10, 0.90, 0.90], projection=cartopy.crs.Robinson())
ax.gridlines() #draw_labels=True)
ax.add_feature(cartopy.feature.LAND, zorder = 0, facecolor = "0.9", edgecolor = "k")
ax.coastlines()
ext = ax.get_extent() # save the original extent
scatter = plt.scatter(lons, lats, c = data, cmap = cmap, norm = norm, s=10, \
transform = cartopy.crs.Geodetic(), edgecolor = "r", linewidth = 0.1)
cb=plt.colorbar(scatter, orientation = 'horizontal', pad = 0.05, fraction = 0.05, \
aspect = 30, ticks = bounds[1:-1], label = cb_label, drawedges=True)
# thicken border of colorbar and the dividers
# http://stackoverflow.com/questions/14477696/customizing-colorbar-border-color-on-matplotlib
# cb.set_ticklabels(["{:g}".format(b) for b in bounds[1:-1]])
# cb.outline.set_color('k')
# cb.outline.set_linewidth(2)
cb.dividers.set_color('k')
cb.dividers.set_linewidth(2)
ax.set_extent(ext, ax.projection) # fix the extent change from colormesh
plt.title(title)
if doText: plt.text(0.01, 0.98, "#stations: {}".format(data.shape[0]), transform = ax.transAxes, fontsize = 10)
plt.savefig(outname)
plt.close()
return # scatter_map
#***************************************************
def main(ms = "era40_", year = 1980):
LOCS = "/group_workspaces/jasmin2/c3s311a_lot2/data/incoming/mars/v20170628/data/"
print year
station_names = np.array([])
fixed_station = np.array([])
latitudes = np.array([])
longitudes = np.array([])
observations = np.array([])
start_year = np.array([])
end_year = np.array([])
if ms == "erai_" and year < 1979:
return
else:
INFILE = "{}mars_{}{}.bufr".format(LOCS, ms, year)
try:
station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year = \
process_file(INFILE, station_names, fixed_station, latitudes, longitudes, observations, start_year, end_year)
except CodesInternalError as err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
sys.stderr.write(err.msg + '\n')
land = np.where(np.array(fixed_station) == True)
marine = np.where(np.array(fixed_station) == False)
bounds = np.linspace(0,max(observations),10).astype(int)
cmap = plt.cm.YlOrRd_r
if ms == "erai_":
title = "MARS - SYNOP - {}".format(year)
else:
title = "MARS - ERA40 - {}".format(year)
scatter_map("mars_{}{}_land_observations.png".format(ms, year), observations[land], longitudes[land], latitudes[land], cmap, bounds, "Number of Observations", title, doText = True)
scatter_map("mars_{}{}_marine_observations.png".format(ms, year), observations[marine], longitudes[marine], latitudes[marine], cmap, bounds, "Number of Observations", title)
station_names = 0
fixed_station = 0
latitudes = 0
longitudes = 0
observations = 0
start_year = 0
end_year = 0
land = 0
marine = 0
gc.collect()
return # main
#***************************************************
if __name__ == "__main__":
import argparse
# set up keyword arguments
parser = argparse.ArgumentParser()
parser.add_argument('--ms', dest='ms', action='store', default = "era40_",
help='Run on ERA40 ["era40_"] (default) or ERA-I ["erai_"] data')
parser.add_argument('--year', dest='year', action='store', default = 1980,
help='Which year to process - default 1980')
args = parser.parse_args()
main(ms = args.ms, year = args.year)
sys.exit()
#***************************************************
# END
#***************************************************
| 33.167742
| 184
| 0.540751
| 1,142
| 10,282
| 4.74606
| 0.283713
| 0.042066
| 0.012177
| 0.017712
| 0.240959
| 0.228967
| 0.192066
| 0.1869
| 0.1869
| 0.1869
| 0
| 0.025263
| 0.326298
| 10,282
| 309
| 185
| 33.275081
| 0.757182
| 0.15814
| 0
| 0.181287
| 0
| 0
| 0.080679
| 0.028656
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.005848
| 0.052632
| null | null | 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a7a7334b3428135d28ee8a3da56e39eed250254
| 1,564
|
py
|
Python
|
day16/solve16.py
|
jmacarthur/aoc2017
|
2a3096aabf464ef52c05f9437498035cfb5ca1a6
|
[
"MIT"
] | null | null | null |
day16/solve16.py
|
jmacarthur/aoc2017
|
2a3096aabf464ef52c05f9437498035cfb5ca1a6
|
[
"MIT"
] | null | null | null |
day16/solve16.py
|
jmacarthur/aoc2017
|
2a3096aabf464ef52c05f9437498035cfb5ca1a6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import copy
stage_length = 16
stage = map(chr, range(ord('a'),ord('a')+stage_length))
def spin(amount):
"""To save time, this function isn't used except at the end.
Normally, a counter marks the start of the stage and this changes
instead. """
global stage
stage = stage[amount:] + stage[:amount]
def swap(pos1, pos2):
global stage
(stage[pos1], stage[pos2]) = (stage[pos2], stage[pos1])
with open(sys.argv[1], 'rt') as f:
program = ",".join(f.readlines()).split(",")
n = 0
pos = 0
arguments_list = [x[1:].strip().split("/") for x in program]
action_list = [x[0] for x in program]
history = []
# Change this to 1 for the solution to part 1.
iterations = 1000000000
while n<iterations:
for s in range(0,len(program)):
arguments = arguments_list[s]
if action_list[s] == 's':
pos += stage_length-int(arguments[0])
elif action_list[s] == 'x':
swap((int(arguments[0])+pos)%stage_length, (int(arguments[1])+pos)%stage_length)
elif action_list[s] == 'p':
pos1 = stage.index(arguments[0])
pos2 = stage.index(arguments[1])
swap(pos1, pos2)
if stage in history:
print("Duplicate found: %r at index %d matches at stage %d"%(stage, history.index(stage), n))
loop_length = n - history.index(stage)
complete_cycles = (iterations - n) / loop_length
n += complete_cycles * loop_length
history.append(copy.copy(stage))
n += 1
spin(pos % stage_length)
print "".join(stage)
| 30.076923
| 101
| 0.621483
| 229
| 1,564
| 4.170306
| 0.375546
| 0.06911
| 0.058639
| 0.027225
| 0.05445
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029826
| 0.228261
| 1,564
| 51
| 102
| 30.666667
| 0.761392
| 0.039003
| 0
| 0.052632
| 0
| 0
| 0.045052
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.052632
| null | null | 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a82d93e4ba8abbe55f44853090dbccbc8c6e819
| 48,277
|
py
|
Python
|
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/GenericHazards.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/GenericHazards.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | null | null | null |
edexOsgi/com.raytheon.edex.plugin.gfe/utility/common_static/base/gfe/textproducts/templates/product/GenericHazards.py
|
srcarter3/awips2
|
37f31f5e88516b9fd576eaa49d43bfb762e1d174
|
[
"Apache-2.0"
] | 1
|
2021-10-30T00:03:05.000Z
|
2021-10-30T00:03:05.000Z
|
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 05/07/2015 4027 randerso Migrated A1 OB9.16 code to A2
# 06/17/2015 4027 dgilling Perform case-insensitive
# comparisons in foundCTAs.
# 07/13/2015 4648 randerso Fix bullets in follow up products
# 02/24/2016 5411 randerso Make bullet headers upper case
# 07/15/2016 5749 randerso Replaced ellipses with commas in hazardBodyText
#
##
# This is a base file that is not intended to be overridden.
##
#-------------------------------------------------------------------------
# Description: This product is a template for creating Hazard Products.
#-------------------------------------------------------------------------
# Copying:
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#-------------------------------------------------------------------------
# Standard and Local file names and Locations:
# GenericHazards
#-------------------------------------------------------------------------
# Customization Points:
#
# DEFINITION SECTION
#
# Required Configuration Items:
#
# displayName If not None, defines how product appears in GFE GUI
#
# You must set the following:
#
# productName defines name of product e.g. "Zone Forecast Product"
# fullStationID Full station identifier, 4 letter, such as "KSLC".
# wmoID WMO ID code for product header, such as "FOUS45"
# pil Product pil, such as "SFTBOS"
# areaName (opt.) Area name for product header, such as "Western New York"
# wfoCityState City,state that the WFO is located in, such as "Buffalo NY"
#
# Optional Configuration Items
#
# mapNameForCombinations Name of the map background that is used for
# creating/editing the combinations file. This must
# be defined or the GFE zone combiner
# database Source database for product. Can be "Official",
# "Fcst" or "ISC"
# outputFile Defines the output location of the finished product.
# Product is saved if autoWrite is 1.
# debug If on, debug_print statements will appear.
# textdbPil Defines the awips product identifier
# (e.g., DENCCFDEN) that is used to store the product
# in the AWIPS text database. The product is not
# automatically stored unless autoStore is 1. This
# value is also used for the default GUI entry for
# storage.
# awipsWANPil Defines the awips product identifier
# (e.g., KBOUCCFDEN) that is used to transmit the
# product to the AWIPS WAN. The product is not
# automatically transmitted unless autoSend is 1.
# This value is also used for the default GUI
# entry for storage.
# autoSend If set to 1, then the product will be automatically
# sent on the AWIPS WAN to the "autoSendAddress" with
# the "awipsWANPil after product creation.
# autoStore If set to 1, then the product will be automatically
# stored into the text database using the "textdbPil"
# after product creation.
# autoWrite If set to 1, then the product will be automatically
# written to the "output" named disk file after
# product creation.
#
# lineLength max length of each line
#
# defaultEditAreas defines edit areas, default is Combinations
#
# purgeTime Maximum number of hours past issuance time for the
# expire time.
# includeCities If 1, cities will be included in the area header
# accurateCities If 1, cities are determined from grids
# citiesPhrase "Including the cities of" phrase used when including
# cities
# includeZoneNames If 1, zone names will be included in the area header
# easPhrase Optional EAS phrase to be include in product header
#
# hazardSamplingThreshold Defines the percentage coverage or number of
# grid points in a zone that must contain the hazard
# in order for it to be considered. Tuple (percent, points)
# includeOverviewHeadline If 1, the overview header is templated
# includeOverview If 1, the overview section is templated
# bulletProd If 1, the product will use a bullet format
#-------------------------------------------------------------------------
# Weather Elements Needed:
# Hazards
#-------------------------------------------------------------------------
# Edit Areas Needed: None
#-------------------------------------------------------------------------
# Associated Utilities Files e.g. Combinations file:
# Combinations file
#-------------------------------------------------------------------------
# Component Products:
# Hazards
#-------------------------------------------------------------------------
# Development tasks that are identified and in progress:
#
# To look up tasks and their status, see the Text Product User Guide
# Section on "Tkgnats: Task Reporting System".
#-------------------------------------------------------------------------
# Additional Information:
#-------------------------------------------------------------------------
# Example Output:
#-------------------------------------------------------------------------
import LogStream
import TextRules
import SampleAnalysis
import time, string, types, copy, re
import CallToActions
import AbsTime
class TextProduct(TextRules.TextRules, SampleAnalysis.SampleAnalysis,
CallToActions.CallToActions):
Definition = {
"type": "smart",
"displayName": None,
# Source database for product. Can be "Official", "Fcst" or "ISC"
"database": "Official",
# Defines output location of finished product.
"outputFile": "{prddir}/TEXT/genHaz.txt",
"debug": 0,
# Name of map background for creating Combinations
# Can be:
# Zones_BOU
# FireWxZones_BOU
# Counties
# Marine_Zones_BOU
"mapNameForCombinations": "Zones_<site>",
## Edit Areas: Create Combinations file with edit area combinations.
## Can be:
## EditAreas_PublicZones_BOU
## EditAreas_FireWx_BOU
## EditAreas_FIPS_BOU
## EditAreas_MarineZones_BOU
"defaultEditAreas" : "EditAreas_PublicZones_<site>_<MultiPil>",
# product identifiers
"productName": "Generic Hazard Product", # product name
"fullStationID": "<fullStationID>", # full station identifier (4letter)
"wmoID": "<wmoID>", # WMO ID
"pil": "<pil>", # Product pil
"areaName": "", # Name of state, such as "Georgia" -- optional
"wfoCityState": "<wfoCityState>", # Location of WFO - city,state
"textdbPil": "<textdbPil>", # Product ID for storing to AWIPS text database.
"awipsWANPil": "<awipsWANPil>", # Product ID for transmitting to AWIPS WAN.
"periodCombining" : 0, # If 1, combine periods, if possible
# automatic functions
"autoSend": 0, #set to 1 to automatically transmit product
"autoSendAddress": "000", #transmission address
"autoStore": 0, #set to 1 to automatically store product in textDB
"autoWrite": 0, #set to 1 to automatically write product to file
# Area Dictionary -- Descriptive information about zones
"areaDictionary": "AreaDictionary",
# Language
"language": "english",
"lineLength": 66, #Maximum line length
"purgeTime": 8, # Maximum hours for expireTime
"includeCities": 1 , # Cities included in area header
"accurateCities": 0, # Include all cities in area header
"cityLocation": "CityLocation", # City lat/lon dictionary to use
"cityDescriptor":"Including the cities of",
"includeZoneNames":1, # Zone names will be included in the area header
"easPhrase" :"", # Optional EAS phrase to be include in product header
"includeOverviewHeadline": 1, #include overview header
"includeOverview": 1, #include overview section
"bulletProd": 0, # do not default to bullets
"hazardSamplingThreshold": (10, None), #(%cov, #points)
"callToAction": 1,
}
def __init__(self):
TextRules.TextRules.__init__(self)
SampleAnalysis.SampleAnalysis.__init__(self)
self.__overviewText = ""
self.__procCTA = None
def generateForecast(self, argDict):
# Generate Text Phrases for a list of edit areas
# Get variables
error = self._getVariables(argDict)
if error is not None:
return error
# Get the segments
hazardsC = argDict['hazards']
segmentList = self.organizeHazards(hazardsC.rawAnalyzedTable())
if len(segmentList) == 0:
return "No hazards to report"
# Determine time ranges
error = self._determineTimeRanges(argDict)
if error is not None:
return error
# Initialize the output string
fcst = ""
fcst = self._preProcessProduct(fcst, argDict)
# Generate the product for each segment in the segmentList
fraction = 0
fractionOne = 1.0/float(len(segmentList))
percent = 50.0
self.setProgressPercentage(50)
for segmentAreas in segmentList:
self.progressMessage(fraction, percent, "Making Product for Segment")
fcst = self._preProcessArea(fcst, segmentAreas, self._expireTime, argDict)
fcst = self._makeProduct(fcst, segmentAreas, argDict)
fcst = self._postProcessArea(fcst, segmentAreas, argDict)
fraction = fractionOne
fcst = self._postProcessProduct(fcst, argDict)
return fcst
def _getVariables(self, argDict):
# Make argDict accessible
self.__argDict = argDict
# Get Definition variables
self._definition = argDict["forecastDef"]
for key in self._definition.keys():
exec "self._" + key + "= self._definition[key]"
# Get VariableList
varDict = argDict["varDict"]
for key in varDict.keys():
if type(key) is types.TupleType:
label, variable = key
exec "self._" + variable + "= varDict[key]"
self._language = argDict["language"]
# Set up information for Hazards product
self._hazards = argDict['hazards']
self._combinations = argDict["combinations"]
return None
def _determineTimeRanges(self, argDict):
# Set up the time range for 0-240 hours
self._timeRange = self.createTimeRange(0, 240)
self._ddhhmmTime = self.getCurrentTime(
argDict, "%d%H%M", shiftToLocal=0, stripLeading=0)
self._issueTime = AbsTime.AbsTime(argDict['creationTime'])
self._currentTime = argDict['creationTime']
self._expireTime = self._issueTime + self._purgeTime*3600
self._timeLabel = self.getCurrentTime(
argDict, "%l%M %p %Z %a %b %e %Y", stripLeading=1)
return None
def _preProcessProduct(self, fcst, argDict):
# Product header
if self._areaName != "":
self._areaName = " for " + self._areaName
issuedByString = self.getIssuedByString()
productName = self.checkTestMode(argDict,
self._productName + self._areaName)
if len(self._easPhrase) != 0:
eas = self._easPhrase + '\n'
else:
eas = ''
s = self._wmoID + " " + self._fullStationID + " " + \
self._ddhhmmTime + "\n" + self._pil + "\n\n"
fcst = fcst + s.upper()
s = eas + productName + "\n" +\
"National Weather Service " + self._wfoCityState + \
"\n" + issuedByString + self._timeLabel + "\n\n"
fcst = fcst + s
fcst = fcst + "Default overview section\n"
return fcst
def _preProcessArea(self, fcst, segmentAreas, expireTime, argDict):
# This is the header for an edit area combination
areaHeader = self.makeAreaHeader(
argDict, "", self._issueTime, expireTime,
self._areaDictionary, None, cityDescriptor=self._cityDescriptor,
areaList=segmentAreas, includeCities=self._includeCities,
includeZoneNames = self._includeZoneNames,
accurateCities = self._accurateCities)
fcst = fcst + areaHeader
return fcst
def _makeProduct(self, fcst, segmentAreas, argDict):
argDict["language"] = self._language
# Generate Narrative Forecast for Edit Area
# get the hazards text
# We only need to get headlines for the first edit area
# in the segment since all areas in the segment have
# the same headlines
editArea = segmentAreas[0]
areaLabel = editArea
headlines = self.generateProduct("Hazards", argDict, area = editArea,
areaLabel=areaLabel,
timeRange = self._timeRange)
fcst = fcst + headlines
return fcst
def _postProcessArea(self, fcst, segmentAreas, argDict):
return fcst + "\n\n$$\n\n"
def _postProcessProduct(self, fcst, argDict):
#
# If an overview exists for this product, insert it
#
overview = self.finalOverviewText()
overviewSearch = re.compile(r'Default overview section', re.DOTALL)
fcst = overviewSearch.sub(overview, fcst)
#
# Added to place line feeds in the CAP tags to keep separate from CTAs
fcst = string.replace(fcst, \
r"PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", \
r"\nPRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\n")
fcst = string.replace(fcst, "\n ","\n")
fcst = string.replace(fcst, "&&", "\n&&\n")
# Prevent empty Call to Action Tags
fcst = re.sub(r'\nPRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\s*&&\n', \
"", fcst)
fcst = self._indentBulletText(fcst)
#
# Clean up multiple line feeds
#
fixMultiLF = re.compile(r'(\n\n)\n*', re.DOTALL)
fcst = fixMultiLF.sub(r'\1', fcst)
# finish progress meter
self.setProgressPercentage(100)
self.progressMessage(0, 100, self._displayName + " Complete")
return fcst
def allowedHazards(self):
return []
# Added for DR 21194
def _bulletDict(self):
return []
# Added for DR 21309
def _bulletOrder(self):
return []
## Replaced by 21309 code
## def _getBullets(self, newBulletList, argDict):
##
## ### get the bullet dictionary and split the bullets
## bDict = self._bulletDict()
## bLine = bDict.get(eachHazard['phen'])
## print 20* "*" + (eachHazard['phen'])
## bList = newBulletList.split(",")
##
## ### initialize the bullet output
## bullets = ""
##
## ### loop through the bullets and format the output
## for b in bList:
## bullets = bullets + "* " + b + "...|* Enter bullet text *|\n\n"
## # bullets = bullets + "\n"
## return bullets
def _indentBulletText(self, prevText):
print prevText
### if previous text is empty, return nothing
if prevText is None:
return prevText
###
### split the text
###
bullets = []
bullets = string.split(prevText, '\n\n')
if len(bullets) <= 1:
return prevText
###
### process the text
###
outText = ""
for b in bullets:
### if first character is a * we found a bullet
if re.match("\*", b):
### remove line feeds
removeLF = re.compile(r'(s*[^\n])\n([^\n])', re.DOTALL)
bullet = removeLF.sub(r'\1 \2',b)
### indent code
bullet = self.indentText(bullet, indentFirstString = '',
indentNextString = ' ', maxWidth=self._lineLength,
breakStrings=[" ", "..."])
###
### the "-" in the breakStrings line above is causing issues with
### offices that use "-20 degrees" in the text.
###
outText = outText + bullet + "\n\n"
else: ### not a bullet, CTA text
outText = outText + b + "\n\n"
### that's it
print outText
return outText
# The _hazardTimePhrases method is passed a hazard key, and returns
# time phrase wording consistent with that generated by the headline
# algorithms in DiscretePhrases.
#
def hazardTimePhrases(self, hazard, argDict, prefixSpace=True):
timeWords = self.getTimingPhrase(hazard, argDict['creationTime'])
if prefixSpace and len(timeWords):
timeWords = " " + timeWords #add a leading space
return timeWords
#
# The method hazardBodyText creates an attribution phrase
#
def hazardBodyText(self, hazardList, argDict):
bulletProd = self._bulletProd
hazardBodyPhrase = ''
#
# First, sort the hazards for this segment by importance
#
sortedHazardList = []
for each in ['W', 'Y', 'A', 'O', 'S']:
for eachHazard in hazardList:
if eachHazard['sig'] == each:
if eachHazard not in sortedHazardList:
sortedHazardList.append(eachHazard)
#
# Next, break them into individual lists based on action
#
newList = []
canList = []
expList = []
extList = []
conList = []
upgList = []
statementList = []
for eachHazard in sortedHazardList:
if eachHazard['sig'] in ['S']and eachHazard['phen'] in ['CF', 'LS']:
statementList.append(eachHazard)
elif eachHazard['act'] in ['NEW', 'EXA', 'EXB']:
newList.append(eachHazard)
elif eachHazard['act'] in ['CAN']:
canList.append(eachHazard)
elif eachHazard['act'] in ['EXP']:
expList.append(eachHazard)
elif eachHazard['act'] in ['EXT']:
extList.append(eachHazard)
elif eachHazard['act'] in ['UPG']:
upgList.append(eachHazard)
else:
conList.append(eachHazard)
#
# Now, go through each list and build the phrases
#
nwsIntroUsed = 0
#
# This is for the new hazards
#
phraseCount = 0
lastHdln = None
for eachHazard in newList:
hdln = eachHazard['hdln']
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)
hazNameA = self.hazardName(eachHazard['hdln'], argDict, True)
hazNameACap = self.sentence(hazNameA, addPeriod=False)
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
if hazName in ["Winter Weather Advisory", "Winter Storm Warning", "Beach Hazards Statement"]:
forPhrase = " for |* Enter hazard type *|"
else:
forPhrase =""
if nwsIntroUsed == 0:
hazardBodyPhrase = "The National Weather Service in " + self._wfoCity
nwsIntroUsed = 1
if phraseCount == 0:
phraseCount = 1
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase = hazardBodyPhrase + " has issued " + \
hazNameA + ". "
else:
hazardBodyPhrase += " has issued " + hazNameA + forPhrase + \
", which is in effect" + endTimePhrase + ". "
elif phraseCount == 1:
phraseCount = 2
if hdln != lastHdln:
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \
" has also been issued."
else:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \
" has also been issued. This " + hazName + forPhrase + \
" is in effect" + endTimePhrase + ". "
else:
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + \
" has also been issued."
else:
hazardBodyPhrase = hazardBodyPhrase + hazNameACap + forPhrase + \
" has also been issued" + endTimePhrase + ". "
else:
if eachHazard['phen'] in ['HU', 'TR', 'TY']:
hazardBodyPhrase += "In addition, " + \
hazNameA + " has been issued."
else:
hazardBodyPhrase += "In addition, " + \
hazNameA + forPhrase + " has been issued. This " + hazName + \
" is in effect" + endTimePhrase + ". "
lastHdln = hdln
#
# This is for the can hazards
#
for eachHazard in canList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
if nwsIntroUsed == 0:
hazardBodyPhrase = "The National Weather Service in " +\
self._wfoCity
nwsIntroUsed = 1
hazardBodyPhrase = hazardBodyPhrase + \
" has cancelled the " + hazName + ". "
else:
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" has been cancelled. "
#
# This is for the exp hazards
#
phraseCount = 0
for eachHazard in expList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
if self._bulletProd:
continue # No attribution for this case if it is a bullet product
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
if eachHazard['endTime'] <= argDict['creationTime']:
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" is no longer in effect. "
else:
expTimeCurrent = argDict['creationTime']
timeWords = self.getTimingPhrase(eachHazard, expTimeCurrent)
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" will expire " + timeWords + ". "
#
# This is for ext hazards
#
for eachHazard in extList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
if self._bulletProd:
continue # No attribution for this case if it is a bullet product
endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" is now in effect" + endTimePhrase + ". "
#
# This is for upgrade hazards
#
for eachHazard in upgList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
hazName = self.hazardName(eachHazard['hdln'], argDict, False)
hazardBodyPhrase = hazardBodyPhrase + "The " + hazName + \
" is no longer in effect. "
#
# This is for con hazards
#
for eachHazard in conList:
if len(eachHazard['hdln']) == 0:
continue #no defined headline, skip phrase
if self._bulletProd:
continue # No attribution for this case if it is a bullet product
endTimePhrase = self.hazardTimePhrases(eachHazard, argDict)
hazNameA = self.hazardName(eachHazard['hdln'], argDict, True)
hazardBodyPhrase = hazardBodyPhrase + hazNameA + \
" remains in effect" + endTimePhrase + ". "
#
# This is for statement hazards
#
for eachHazard in statementList:
hazardBodyPhrase = "...|* Add statement headline *|...\n\n"
#
# This adds segment text
#
segmentText = ''
#
# Check that this segment codes to determine capture or not,
# and frame captured text or not
#
incTextFlag, incFramingCodes, skipCTAs, forceCTAList = \
self.useCaptureText(sortedHazardList)
#
#
# Check that the previous text exists
#
foundCTAs = []
for eachHazard in sortedHazardList:
if eachHazard.has_key('prevText'):
prevText = eachHazard['prevText']
if eachHazard['pil'] == 'MWS':
startPara = 0
else:
startPara = 1
segmentText, foundCTAs = self.cleanCapturedText(prevText,
startPara, addFramingCodes = False,
skipCTAs = skipCTAs)
tester = segmentText[0]
if tester == '*':
startPara = 1
else:
startPara = 2
segmentText, foundCTAs = self.cleanCapturedText(prevText,
startPara, addFramingCodes = False,
skipCTAs = skipCTAs)
#
# Check that the segment text isn't very short or blank
#
if len(segmentText) < 6:
incTextFlag = 0
# DR 21309 code addition from Middendorf (BYZ)
#
# Now if there is a new hazard and previous segment Text, then
# we may have to add bullets.
#
if incTextFlag and bulletProd:
for eachHazard in sortedHazardList:
if not eachHazard.has_key('prevText'):
newBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
print "newBullets = ", newBullets
print "segment text is: ", segmentText
for bullet in newBullets:
if re.search("\* " + bullet + "\.\.\.", segmentText, flags=re.IGNORECASE) is None:
print bullet + " not in segmentText"
start = self._bulletOrder().index(bullet) + 1
end = len(self._bulletOrder())
bulletFlag = 1
for i in range(start,end):
if (re.search("\* " + self._bulletOrder()[i] + "\.\.\.", segmentText, flags=re.IGNORECASE) is not None) and bulletFlag:
print "* " + self._bulletOrder()[i] + "... found!"
segmentTextSplit = re.split("\* " + self._bulletOrder()[i] + "\.\.\.", segmentText, flags=re.IGNORECASE)
segmentText = string.join(segmentTextSplit,"* " + bullet.upper() + \
"...|* Enter bullet text *|\n\n* " + self._bulletOrder()[i] + "...")
bulletFlag = 0
if bulletFlag:
print "appending to bottom list of bullets!"
segmentTextSplit = re.split("PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", segmentText, flags=re.IGNORECASE)
segmentText = "\n" + string.join(segmentTextSplit,"* " + bullet.upper() + \
"...|* Enter bullet text *|\n\nPRECAUTIONARY/PREPAREDNESS ACTIONS...")
bulletFlag = 0
#
# Now if there is a can/exp hazard and previous segment Text, then
# we may have to remove bullets.
#
if incTextFlag and bulletProd:
# First make list of bullets that we need to keep.
keepBulletList = []
for eachHazard in sortedHazardList:
if eachHazard['act'] not in ["CAN","EXP"]:
saveBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
for saveBullet in saveBullets:
if saveBullet not in keepBulletList:
keepBulletList.append(saveBullet)
# Now determine which bullets we have to remove.
removeBulletList = []
for eachHazard in sortedHazardList:
if eachHazard['act'] in ["CAN","EXP"]:
canBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
for canBullet in canBullets:
if canBullet not in keepBulletList and canBullet not in removeBulletList:
removeBulletList.append(canBullet)
print "hazardBodyText info: keepBulletList: ",keepBulletList
print "hazardBodyText info: removeBulletList: ",removeBulletList
# Finally remove the bullets no longer needed.
for bullet in removeBulletList:
if re.search("\* "+ bullet + "\.\.\.", segmentText, flags=re.IGNORECASE) is not None:
segmentTextSplit = re.split("\* " + bullet + "\.\.\.", segmentText, flags=re.IGNORECASE)
print "segmentTextSplit is ", segmentTextSplit
segmentTextSplit2 = string.split(segmentTextSplit[1],"*",1)
if len(segmentTextSplit2) == 2:
segmentTextSplit[1] = "*" + segmentTextSplit2[1]
else:
segmentTextSplit2 = re.split("PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.", segmentTextSplit[1], 1, flags=re.IGNORECASE)
if len(segmentTextSplit2) == 2:
segmentTextSplit[1] = "PRECAUTIONARY/PREPAREDNESS ACTIONS..." + segmentTextSplit2[1]
segmentText = string.join(segmentTextSplit,"")
if removeBulletList != []:
segmentText = "|*\n" + segmentText + "*|"
else:
segmentText = segmentText
#
# If segment passes the above checks, add the text
#
print "hazardBodyText info: incTextFlag: ",incTextFlag
if incTextFlag:
print "hazardBodyText info: segmentText: ",segmentText
hazardBodyPhrase = hazardBodyPhrase + "\n\n" + \
segmentText + '\n\n'
elif bulletProd:
bulletFlag = 0
if eachHazard['act'] == 'CAN':
hazardBodyPhrase = hazardBodyPhrase + \
"\n\n|* Wrap-up text goes here *|.\n"
elif eachHazard['act'] == 'EXP':
hazardBodyPhrase = hazardBodyPhrase + \
"\n\n|* Wrap-up text goes here *|.\n"
else:
bulletFlag = 1
## print "bulletFlag is: ",bulletFlag
if bulletFlag:
newBulletList = []
bullets = ""
for eachHazard in sortedHazardList:
### get the default bullets for all hazards from the bullet diction
newBullets = string.split(self._bulletDict().get(eachHazard['phen']),",")
for newBullet in newBullets:
if newBullet not in newBulletList:
newBulletList.append(newBullet)
print "my bullets are: ", newBulletList
### Determine the correct order for all bullets
bulletOrder = self._bulletOrder()
staticBulletOrder = self._bulletOrder()
for bullet in staticBulletOrder:
print "correct bullet order should be: ", bulletOrder
if bullet not in newBulletList:
bulletOrder.remove(bullet)
print "reordered bullets are: ", bulletOrder
for b in bulletOrder:
bullets = bullets + "* " + b.upper() + "...|* Enter bullet text *|\n\n"
hazardBodyPhrase = hazardBodyPhrase + "\n\n" + bullets
# If segment doesn't pass the checks, put in framing codes
else:
hazardBodyPhrase = hazardBodyPhrase + \
"\n\n|* Statement text goes here *|.\n\n"
# End code for DR 21310
#
# This adds the call to action statements. This is only performed
# if the segment is 'NEW' or if the previous text has been discarded
# due to a CAN/EXP/UPG segment
#
# remove items from forceCTAList if they exist in foundCTAs. Note
# that the formats of these lists are different, thus this code
# is more complicated
for ent in foundCTAs:
#only process CTAs that are vtec phen/sig based
if ent.find('.') == 2:
phensig = (ent[0:2], ent[3]) #phen.sig
if phensig in forceCTAList:
del forceCTAList[forceCTAList.index(phensig)]
hazardBodyPhrase = hazardBodyPhrase + '\n\n'
ctas = []
for (phen,sig) in forceCTAList:
hazardPhenSig = phen + "." + sig
cta = self.defaultCTA(hazardPhenSig)
if cta not in ctas:
ctas.append(cta)
if len(ctas) > 0:
hazardBodyPhrase = hazardBodyPhrase + \
'PRECAUTIONARY/PREPAREDNESS ACTIONS...\n\n'
for c in ctas:
hazardBodyPhrase = hazardBodyPhrase + c + '\n\n'
hazardBodyPhrase = hazardBodyPhrase + '&&\n\n'
# Make sure there is only one CAP tag pairs
hazardBodyPhrase = re.sub(r'&&\s*PRECAUTIONARY/PREPAREDNESS ACTIONS\.\.\.\n', \
"", hazardBodyPhrase)
return hazardBodyPhrase
def finalOverviewText(self):
#if didn't calculate any, use the default
if len(self.__overviewText) == 0:
if self._includeOverviewHeadline:
overviewHeadline = "...|*Overview headline (must edit)*|...\n\n"
else:
overviewHeadline = ""
if self._includeOverview:
overviewBody = ".|*Overview (must edit)*|.\n\n"
else:
overviewBody = ""
#assemble the lines
overview = overviewHeadline + overviewBody
return overview
else:
return self.__overviewText
def overviewText(self, hazardList, pil):
#
# This method finds an overview in the previous product
#
overview = ""
for each in hazardList:
if (each.has_key('prevOverviewText') and
each.has_key('pil') and
each.has_key('endTime') and
each.has_key('act')):
if (each['pil'] == pil and
each['endTime'] > self._currentTime and
each['act'] not in ['CAN', 'EXP']):
overview = each['prevOverviewText']
self.__overviewText, dummy = self.cleanCapturedText(
overview, 0)
break
def useCaptureText(self, hazardList):
#Based on the hazardlist, returns a tuple indicating:
# (inc capture text, inc framing codes, skip CTAs, forceCTAList)
#
# For the values to be considered, the 'hdln' value must be
# present in the list, or it needs to be a Statement (sig="S")
cans = ['CAN','UPG','EXP']
acts = ['NEW','EXT','EXA','EXB','CON']
foundACTS = 0
foundCANS = 0
foundSig = []
for eh in hazardList:
if eh['act'] in acts and (len(eh['hdln']) or eh['sig'] == 'S'):
foundACTS = 1
if eh['act'] in cans and (len(eh['hdln']) or eh['sig'] == 'S'):
foundCANS = 1
if eh['sig'] not in foundSig:
foundSig.append(eh['sig'])
includeFrameCodes = 0
includeText = 1
skipCTAs = 0
forceCTAList = []
# all actions are in CAN, UPG, EXP only (don't include text)
if foundCANS and not foundACTS:
if 'S' in foundSig and len(foundSig) == 1: #only S
includeFrameCodes = 1 #capture text, but frame it
else:
includeText = 0 #end of non statement
# something in CANS and something in acts (frame it, include text)
elif foundCANS and foundACTS:
includeFrameCodes = 1
skipCTAs = 1
for eh in hazardList:
if eh['act'] in acts and \
(eh['phen'], eh['sig']) not in forceCTAList and \
len(eh['hdln']):
forceCTAList.append((eh['phen'], eh['sig']))
#everything in active entries, captured text is used, but still
# need to handle the "NEW" entries.
else:
for eh in hazardList:
if eh['act'] in ['NEW'] and len(eh['hdln']):
forceCTAList.append((eh['phen'], eh['sig']))
return (includeText, includeFrameCodes, skipCTAs, forceCTAList)
def cleanCapturedText(self, text, paragraphs, addFramingCodes = False,
skipCTAs = False):
#
# This method takes a block of text, wraps it preserving blank lines,
# then returns the part after 'paragraphs'. So, if paragraphs is 0, it
# returns the whole thing, if it's 2, it returns paragraphs 2 -> end, etc.
# Headlines are always removed.
# Framing codes are added if specified.
#
paras = self.convertSingleParas(text) #single paragraphs
# keep track of any call to actions found
foundCTAs = []
# Process the paragraphs, keep only the interested ones
paraCount = 0
processedText = ''
for eachPara in paras:
if paraCount >= paragraphs:
found = self.ctasFound(eachPara) #get list of ctas found
if skipCTAs and len(found):
pass
else:
processedText = processedText + eachPara + '\n\n'
#keep track of remaining CTAs in processed text
for f in found:
if f not in foundCTAs:
foundCTAs.append(f)
if eachPara.find('...') == 0:
pass #ignore headlines
paraCount = paraCount + 1
# Add framing codes
if addFramingCodes:
processedText = processedText.rstrip()
processedText = "|*\n" + processedText + "*|\n"
# Wrap
processedText = self.endline(processedText,
linelength=self._lineLength, breakStr=[" ", "-", "..."])
return processedText, foundCTAs
def decodeBulletedText(self, prevText):
# returns the bullet paragraph text or None, returns the
# regular text after the bullets. The afterText is text up to
# the next bullet or up to "The National Weather Service". Note
# that this only correctly handles the 1st set of entries in
# a segment, thus double events will only decode the first set
# of bullets and text. The multipleRecords is set to 1 in the
# event that there are multiple sets of bullets. In this case
# only the 1st set was captured/decoded.
# (hazard, time, basis, impact, afterText, multipleRecords)
if prevText is None:
return (None, None, None, None, None, None)
# find the bullets
bullets = []
buf = prevText.split('\n\n* ')
if len(buf) <= 1:
return (None, None, None, None, None, None)
multRecords = 0 #indicator of multiple sets of bullets
for x in xrange(len(buf)):
if x == 0:
continue #headlines and text before the bullets
bullets.append(buf[x])
# find only the bulleted text, defined by the double line feed term.
# of the text
regText = "" #regular text after bullets
for x in xrange(1, len(bullets)):
index = bullets[x].find('\n\n')
if index != -1:
regText = bullets[x][index+2:]
bullets[x] = bullets[x][0:index] #eliminate after bullet text
if len(bullets) > x+2: #more bullets are present
multRecords = 1
bullets = bullets[0:x+1] #only interested in these bullets
break
# regular text is the remainder of the text. However we only
# want text from the last in the series of bullets to the
# beginning of any next NWS phrase.
lines = regText.split('\n')
for x in xrange(len(lines)):
if lines[x].find('The National Weather Service') == 0:
lines = lines[0:x] #eliminate following lines
break
regText = ("\n").join(lines)
# now clean up the text
for x in xrange(len(bullets)):
bullets[x] = string.replace(bullets[x],'\n',' ')
removeLF = re.compile(r'(s*[^\n])\n([^\n])', re.DOTALL)
regText = removeLF.sub(r'\1 \2',regText)
# extract out each section for returning the values
if len(bullets) >= 1:
hazard = bullets[0]
else:
hazard = None
if len(bullets) >= 2:
time = bullets[1]
else:
time = None
if len(bullets) >= 3:
basis = bullets[2]
else:
basis = None
if len(bullets) >= 4:
impact = bullets[3]
else:
impact = None
if len(regText) == 0:
regText = None #no regular text after bullets
return (hazard, time, basis, impact, regText, multRecords)
def substituteBulletedText(self, capText, defaultText, frameit="Never"):
#returns a properly formatted bulleted text based on
#the capText variable. If capText is None or 0 length, then
#the default text is used. frameit can be "Never", in which
#nothing is wrapped in framing codes, "Always" in which the
#text (default or cap) is wrapped in framing codes, or
#DefaultOnly" in which just the default text is wrapped.
if capText is not None and len(capText):
textToUse = capText[0].upper()+capText[1:]
if frameit == "Always":
textToUse = "|* " + textToUse + " *|"
else:
textToUse = defaultText
if frameit == "Always" or frameit == "DefaultOnly":
textToUse = "|* " + textToUse + " *|"
# add bullet codes
textToUse = "* " + textToUse
# format it
return self.indentText(textToUse, indentFirstString = '',
indentNextString = ' ', maxWidth=self._lineLength,
breakStrings=[" ", "-", "..."])
def convertSingleParas(self, text):
#returns a list of paragraphs based on the input text.
lf = re.compile(r'(s*[^\n])\n([^\n])', re.DOTALL)
ptext = lf.sub(r'\1 \2', text)
ptext = ptext.replace('\n\n', '\n')
paragraphs = ptext.split('\n')
return paragraphs
def ctasFound(self, text):
#returns types of ctas found. The identifier is the pil (e.g., ZFP),
#phen/sig (e.g., DU.Y), or GENERIC. Uses the CallToAction definitions.
#convert text to single paragraphs
paragraphs = self.convertSingleParas(text)
for x in xrange(len(paragraphs)):
paragraphs[x] = string.replace(paragraphs[x],' ','')
#make list of call to actions (type, cta text)
if self.__procCTA is None:
self.__procCTA = []
ctao = CallToActions.CallToActions()
d = ctao.ctaDict()
for k in d.keys():
func = d[k]
items = func()
for it in items:
if type(it) == types.TupleType:
it = it[1] #get second string which is the CTA
ctaParas = self.convertSingleParas(it)
for cta in ctaParas:
self.__procCTA.append((k,string.replace(cta,' ','')))
d = ctao.ctaPilDict()
for k in d.keys():
func = d[k]
items = func()
for it in items:
if type(it) == types.TupleType:
it = it[1] #get second string which is the CTA
ctaParas = self.convertSingleParas(it)
for cta in ctaParas:
self.__procCTA.append((k,string.replace(cta,' ','')))
ctas = ctao.genericCTAs()
for it in ctas:
if type(it) == types.TupleType:
it = it[1] #get second string which is the CTA
ctaParas = self.convertSingleParas(it)
for cta in ctaParas:
self.__procCTA.append(("GENERIC",
string.replace(cta,' ','')))
#compare
found = []
for para in paragraphs:
for (ctaType, cta) in self.__procCTA:
## Added following line to account for framing code issues in CTA
cta = re.sub("\|\*.*\*\|","",cta)
# We want this comparison to be case-insensitive just in case
# the site is not transmitting in mixed case yet.
if para.upper() == cta.upper() and ctaType not in found:
found.append(ctaType)
return found
| 41.29769
| 151
| 0.53106
| 4,665
| 48,277
| 5.465166
| 0.18821
| 0.003138
| 0.008237
| 0.007688
| 0.223416
| 0.196038
| 0.167327
| 0.150971
| 0.134262
| 0.120455
| 0
| 0.009976
| 0.358432
| 48,277
| 1,168
| 152
| 41.333048
| 0.81316
| 0.306378
| 0
| 0.274096
| 0
| 0
| 0.101009
| 0.012153
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.003012
| 0.009036
| null | null | 0.02259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a8bd51e1880ca1483e91fca0ab41237e4c4f869
| 4,896
|
py
|
Python
|
Lib/hTools2/dialogs/glyphs/slide.py
|
gferreira/hTools2
|
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
|
[
"BSD-3-Clause"
] | 11
|
2015-01-06T15:43:56.000Z
|
2019-07-27T00:35:20.000Z
|
Lib/hTools2/dialogs/glyphs/slide.py
|
gferreira/hTools2
|
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
|
[
"BSD-3-Clause"
] | 2
|
2017-05-17T10:11:46.000Z
|
2018-11-21T21:43:43.000Z
|
Lib/hTools2/dialogs/glyphs/slide.py
|
gferreira/hTools2
|
a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c
|
[
"BSD-3-Clause"
] | 4
|
2015-01-10T13:58:50.000Z
|
2019-12-18T15:40:14.000Z
|
# [h] slide selected glyphs
from mojo.roboFont import CurrentFont, CurrentGlyph, version
from vanilla import *
from hTools2 import hDialog
from hTools2.modules.fontutils import get_full_name, get_glyphs
from hTools2.modules.messages import no_font_open, no_glyph_selected
class slideGlyphsDialog(hDialog):
'''A dialog to slide the selected glyphs vertically and/or horizontally.
.. image:: imgs/glyphs/slide.png
'''
_moveX = 0
_moveY = 0
_xMax = 1000
_xMin = -1000
_yMax = 500
_yMin = -500
font = None
font_name = '(no font selected)'
def __init__(self):
# window
self.title = "slide"
self.button_width = 70
self.column_1 = 20
self.column_2 = 240
self.width = self.column_1 + self.column_2 + self.button_width + self.padding_x*3
self.height = self.text_height*3 + self.padding_y*4
self.w = HUDFloatingWindow((self.width, self.height), self.title)
x = self.padding_x
y = self.padding_y
# current font name
self.w.box = Box(
(x, y, self.column_1 + self.column_2, self.text_height))
self.w.box.text = TextBox(
(5, 0, self.column_1 + self.column_2, self.text_height),
self.font_name,
sizeStyle=self.size_style)
x += (self.column_2 + self.column_1 + self.padding_x)
self.w.button_update_font = SquareButton(
(x, y, self.button_width, self.text_height),
"update",
callback=self.update_font_callback,
sizeStyle=self.size_style)
# x slider
x = self.padding_x
y += self.text_height + self.padding_y
self.w.x_label = TextBox(
(x, y + 5, self.column_1, self.text_height),
"x",
sizeStyle=self.size_style)
x += self.column_1
self.w.x_slider = Slider(
(x, y, self.column_2, self.text_height),
value=0,
maxValue=self._xMax,
minValue=self._xMin,
callback=self.slide_callback,
sizeStyle=self.size_style)
x += (self.column_2 + self.padding_x)
self.w.button_restore_x = SquareButton(
(x, y, self.button_width, self.text_height),
"reset x",
callback=self.restore_x_callback,
sizeStyle=self.size_style)
# y slider
x = self.padding_x
y += (self.text_height + self.padding_y)
self.w.y_label = TextBox(
(x, y + 5, self.column_1, self.text_height),
"y",
sizeStyle=self.size_style)
x += self.column_1
self.w.y_slider = Slider(
(x, y, self.column_2, self.text_height),
value=0,
maxValue=self._yMax,
minValue=self._yMin,
callback=self.slide_callback,
sizeStyle=self.size_style)
x += (self.column_2 + self.padding_x)
self.w.button_restore_y = SquareButton(
(x, y, self.button_width, self.text_height),
"reset y",
callback=self.restore_y_callback,
sizeStyle=self.size_style)
# open
self.w.open()
self.update_font()
# callbacks
def restore_x(self):
self._moveX = 0
self.w.x_slider.set(self._moveX)
def restore_y(self):
self._moveY = 0
self.w.y_slider.set(self._moveY)
def restore_x_callback(self, sender):
self.restore_x()
def restore_y_callback(self, sender):
self.restore_y()
def update_font(self):
self.font = CurrentFont()
if self.font is not None:
self.w.box.text.set(get_full_name(self.font))
self.set_defaults()
self.restore_x()
self.restore_y()
else:
print no_font_open
def set_defaults(self):
self._xMax = self.font.info.unitsPerEm
self._yMax = self.font.info.unitsPerEm / 2
self._xMin = -self._xMax
self._yMin = -self._yMax
def update_font_callback(self, sender):
self.update_font()
def slide_callback(self, sender):
xValue = self.w.x_slider.get()
yValue = self.w.y_slider.get()
x = self._moveX - xValue
y = self._moveY - yValue
self._moveX = xValue
self._moveY = yValue
glyph_names = get_glyphs(self.font)
if len(glyph_names) > 0:
for glyph_name in glyph_names:
# RF 2.0
if version[0] == '2':
self.font[glyph_name].moveBy((-x, -y))
# RF 1.8.X
else:
self.font[glyph_name].move((-x, -y))
else:
print no_glyph_selected
| 31.184713
| 89
| 0.55576
| 611
| 4,896
| 4.217676
| 0.175123
| 0.069849
| 0.065192
| 0.046566
| 0.397749
| 0.35196
| 0.32402
| 0.313931
| 0.313931
| 0.282499
| 0
| 0.019565
| 0.34232
| 4,896
| 156
| 90
| 31.384615
| 0.780745
| 0.020221
| 0
| 0.291667
| 0
| 0
| 0.009854
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.041667
| null | null | 0.016667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a92dd9cacd718af3ee73590efc1c1d73a3833aa
| 12,093
|
py
|
Python
|
beansdbadmin/core/client.py
|
ariesdevil/beansdbadmin
|
3165087ef57b7511ab84fbc50cf16eb8f54d83cd
|
[
"BSD-3-Clause"
] | 11
|
2018-08-28T09:16:02.000Z
|
2021-11-08T09:39:15.000Z
|
beansdbadmin/core/client.py
|
ariesdevil/beansdbadmin
|
3165087ef57b7511ab84fbc50cf16eb8f54d83cd
|
[
"BSD-3-Clause"
] | 2
|
2019-08-29T03:27:24.000Z
|
2020-07-24T02:45:39.000Z
|
beansdbadmin/core/client.py
|
ariesdevil/beansdbadmin
|
3165087ef57b7511ab84fbc50cf16eb8f54d83cd
|
[
"BSD-3-Clause"
] | 4
|
2019-05-10T12:10:31.000Z
|
2020-07-17T03:22:02.000Z
|
#!/usr/bin/python
# encoding: utf-8
'''a rich client
1. for one server (instead of multi like in libmc.Client)
2. encapsulate @, ?, gc ...
use is instead of libmc.Client
'''
import telnetlib
import logging
import libmc
import string
import urllib
import itertools
import warnings
from collections import defaultdict
from beansdbadmin.core.hint import parse_new_hint_body
from beansdbadmin.core.data import parse_records
from beansdbadmin.core.hash import get_khash64
def get_url_content(url):
return urllib.urlopen(url).read()
def check_bucket(bucket):
assert 0 <= bucket < 16
def dir_to_dict(dir_str):
d = dict()
if dir_str:
for line in [x for x in dir_str.split('\n') if x]:
key_or_bucket, _hash, ver_or_count = line.split(' ')
d[key_or_bucket] = int(_hash) & 0xffff, int(ver_or_count)
return d
def get_bucket_keys_count(store, bucket, depth=1):
cmd = "@"
sub = bucket
if depth == 2:
cmd = "@%x" % (bucket/16)
sub = bucket % 16
result = store.get(cmd)
if result:
lines = result.split('\n')
for line in lines:
if len(line) == 0:
continue
d, _, c = line.split()
if d.endswith('/'):
bucket_ = int(d[0], 16)
if bucket_ == sub:
return int(c)
raise Exception('get %s from %s, reply = [%s], bucket %x not found' % (cmd, store, result, bucket))
def get_buckets_keys_count(store):
""" return dict: buckets -> count """
st = {}
try:
for line in (store.get('@') or '').split('\n'):
if line:
d, _, c = line.split(' ')
if not d.endswith('/'):
continue
st[int(d[0], 16)] = int(c)
return st
except IOError:
raise Exception("cannot get @ from %s" % (store))
def get_primary_buckets(store):
""" return possible primary buckets, might be wrong on temporary nodes,
result is list of buckets in integer
"""
ss = get_buckets_keys_count(store)
bucket_list = ss.items()
bucket_list = [x for x in bucket_list if x[1] > 0]
if not bucket_list:
return None
bucket_list.sort(lambda a, b: cmp(a[1], b[1]), reverse=True)
result = [bucket_list[0]]
for i in bucket_list[1:]:
if result[-1][1] / i[1] >= 2:
break
result.append(i)
return [x[0] for x in result]
def get_key_info_disk(store, key):
'''return ver, vhash, flag, vsz, ts, fid, pos'''
info = store.get('??' + key)
if info:
return [int(x) for x in info.split()]
def is_gc_running(ip, port):
s = get_gc_status(ip, port)
if s and s.find('running') >= 0:
return True
return False
def get_gc_status(ip, port):
t = telnetlib.Telnet(ip, port)
t.write('optimize_stat\r\n')
out = t.read_until('\n')
t.write('quit\r\n')
t.close()
return out.strip("\r\n")
def connect(server, **kwargs):
comp_threshold = kwargs.pop('comp_threshold', 0)
prefix = kwargs.pop('prefix', None)
if prefix is not None:
warnings.warn('"prefix" is deprecated. '
'use douban.wrapper.Prefix instead.')
c = libmc.Client([server],
do_split=0,
comp_threshold=comp_threshold,
prefix=prefix)
c.config(libmc.MC_CONNECT_TIMEOUT, 300) # 0.3s
c.config(libmc.MC_POLL_TIMEOUT, 3000) # 3s
c.config(libmc.MC_RETRY_TIMEOUT, 5) # 5s
return c
class MCStore(object):
IGNORED_LIBMC_RET = frozenset([
libmc.MC_RETURN_OK,
libmc.MC_RETURN_INVALID_KEY_ERR
])
def __init__(self, addr):
self.addr = addr
self.host, port = addr.split(":")
self.port = int(port)
self.mc = connect(addr)
def __repr__(self):
return '<MCStore(addr=%s)>' % repr(self.addr)
def __str__(self):
return self.addr
def set(self, key, data, rev=0):
return bool(self.mc.set(key, data, rev))
def set_raw(self, key, data, rev=0, flag=0):
if rev < 0:
raise Exception(str(rev))
return self.mc.set_raw(key, data, rev, flag)
def set_multi(self, values, return_failure=False):
return self.mc.set_multi(values, return_failure=return_failure)
def _check_last_error(self):
last_err = self.mc.get_last_error()
if last_err not in self.IGNORED_LIBMC_RET:
raise IOError(last_err, self.mc.get_last_strerror())
def get(self, key):
try:
r = self.mc.get(key)
if r is None:
self._check_last_error()
return r
except ValueError:
self.mc.delete(key)
def get_raw(self, key):
r, flag = self.mc.get_raw(key)
if r is None:
self._check_last_error()
return r, flag
def get_multi(self, keys):
r = self.mc.get_multi(keys)
self._check_last_error()
return r
def delete(self, key):
return bool(self.mc.delete(key))
def delete_multi(self, keys, return_failure=False):
return self.mc.delete_multi(keys, return_failure=return_failure)
def exists(self, key):
return bool(self.mc.get('?' + key))
def incr(self, key, value):
return self.mc.incr(key, int(value))
class DBClient(MCStore):
def __init__(self, addr):
MCStore.__init__(self, addr)
self._is_old = None
def stats(self):
stats = self.mc.stats()
return stats.values()[0] if stats else None
def is_old(self):
if self._is_old is None:
ver = self.get_server_version()
self._is_old = (ver.strip().split(".")[0] == "0")
return self._is_old
def get_collision_summary(self, bucket):
check_bucket(bucket)
raw = self.get("@collision_%x" % bucket)
if raw is None:
return None
count, hcount, khash, data_size = raw.split()
return (int(count), int(hcount), int(khash, 16), int(data_size))
def get_collision(self, bucket):
check_bucket(bucket)
collisions = defaultdict(dict)
hint_data = self.get("@collision_all_%x" % bucket)
if hint_data is None:
return dict()
for key, meta, _ in parse_new_hint_body(hint_data):
khash_str, _, ver, vhash = meta
collisions[khash_str][key] = (vhash, ver)
return dict(collisions)
def get_records_by_khash_raw(self, khash):
if self.is_old():
return []
if not isinstance(khash, str):
khash = "%016x" % khash
return self.get("@@" + khash)
def get_records_by_khash(self, khash_str):
raw = self.get_records_by_khash_raw(khash_str)
if raw:
return parse_records(raw, False)
else:
return []
def start_gc(self, bucket='', start_fid=0, end_fid=None):
""" bucket must be in 0 or 00 string """
if bucket:
assert isinstance(bucket, basestring) and len(bucket) <= 2
t = telnetlib.Telnet(self.host, self.port)
tree = '@%s' % bucket
if end_fid is None:
gc_cmd = 'gc {} {}\n'.format(tree, start_fid)
else:
gc_cmd = 'gc {} {} {}\n'.format(tree, start_fid, end_fid)
t.write(gc_cmd)
out = t.read_until('\n').strip('\r\n')
assert out == 'OK'
t.write('quit\n')
t.close()
def start_gc_all_buckets(self, db_depth):
hex_digits = string.digits + 'abcdef'
buckets_iter = itertools.product(*[hex_digits for _ in range(db_depth)])
buckets = [''.join(i) for i in buckets_iter]
self.start_gc_buckets(buckets)
def start_gc_buckets(self, buckets):
for b in buckets:
self.start_gc(bucket=b)
while True:
status = self.get_gc_status()
if status.find('running') >= 0:
continue
elif status == 'success':
print "bucket %s gc done" % b
break
elif status == 'fail':
return self.fail("optimize_stat = fail")
else:
self.fail(status)
def get_gc_status(self):
return get_gc_status(self.host, self.port)
def get_version(self, key):
meta = self.get("?" + key)
if meta:
return int(meta.split()[0])
def item_count(self):
s = self.stats()
if s is None:
return None
return int(s['total_items'])
def get_key_info_mem(self, key, khash64=None):
''' return (vhash, ver) or None'''
if khash64 is None:
khash64 = get_khash64(key)
khash32_str = "@%08x" % (khash64 >> 32)
_dir = self.get_dir(khash32_str)
if self.is_old():
return _dir.get(key, None)
else:
return _dir.get("%016x" % khash64, None)
def get_khash_info_mem(self, khash):
''' return [(key, (vhash, ver))], key is "" for v2.'''
khash32 = "@%08x" % (khash >> 32)
_dir = self.get_dir(khash32)
ret = []
if self.is_old():
for k, (vhash, ver) in _dir.iteritems():
if get_khash64(k) == khash:
ret.append((k, (vhash, ver)))
else:
for k, (vhash, ver) in _dir.iteritems():
if int(k, 16) == khash:
return [("", (int(vhash), ver))]
return ret
def get_server_version(self):
try:
st = self.stats()
if st:
return st["version"]
except IOError:
logging.error("fail to get version %s", self)
except KeyError:
logging.error("fail to get version %s %s", self, st)
def get_dir(self, path):
''' return dict
case1: map dir(0-f) to (hash, count),
like {'0/': (1471, 27784005), ... },
case2: map key(or khash) to (vhash, version),
like {'3000000377e9c2ad': (22212, 1), ... }'''
try:
content = self.get(path)
except IOError:
content = ''
return dir_to_dict(content)
def list_dir(self, d): # FIXME: d should not need prefix @?
'''list all KEY in the dir!
not use it if dir is large!'''
for path, (vhash, ver) in sorted(self.get_dir(d).items()):
if path.endswith('/') and len(path) == 2:
for v in self.list_dir(d + path[:-1]):
yield v
else:
yield path, int(vhash), int(ver)
def get_bucket_keys_count(self, bucket, depth=1):
return get_bucket_keys_count(self, bucket, depth)
def get_key_info_disk(self, key):
'''return ver, vhash, flag, vsz, ts, fid, pos'''
return get_key_info_disk(self, key)
def prepare(self, data):
return libmc.encode_value(data, self.mc.comp_threshold)
def close(self):
pass
def test_new(addr, bucket):
b = bucket
c = DBClient(addr)
print "stats:", c.stats()
print 'version:', c.get_server_version()
print "isold:", c.is_old()
print "dir root:", c.get_dir("@")
print "bucket key count:", c.get_bucket_keys_count(int(b))
print "item_count:", c.item_count()
print "primary_buckets", get_primary_buckets(c)
leaf = c.get_dir("@" + b + "000000")
print "a dir leaf:", leaf
khash_str = list(leaf)[0]
print "a khash_str", khash_str
r = c.get_records_by_khash(khash_str)[0]
k = r[0]
print "key, len(value), (flag, tstamp, ver):", k, r[1], r[3:]
print "key info mem:", c.get_key_info_mem(k)
print "key info disk(ver, vhash, flag, vsz, ts, fid, pos):", \
c.get_key_info_disk(k)
print "key version:", c.get_version(k)
print "collision_summary", c.get_collision_summary(int(b))
print "gc status:", c.get_gc_status()
if __name__ == '__main__':
test_new("rosa3a:7900", '3')
| 30.308271
| 103
| 0.561399
| 1,649
| 12,093
| 3.936325
| 0.164342
| 0.019411
| 0.009706
| 0.011092
| 0.173779
| 0.099676
| 0.060545
| 0.037899
| 0.02126
| 0.0114
| 0
| 0.018807
| 0.309683
| 12,093
| 398
| 104
| 30.384422
| 0.758745
| 0.00645
| 0
| 0.133117
| 0
| 0
| 0.063222
| 0.001881
| 0
| 0
| 0.000537
| 0.002513
| 0.00974
| 0
| null | null | 0.003247
| 0.035714
| null | null | 0.051948
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a975211bf46410d2e2a9a98de298bed52013baa
| 6,589
|
py
|
Python
|
lib/formatter/text.py
|
ylafon/redbot
|
87f4edcc8ccda35f556331abd1e76d5e9b79cdd0
|
[
"Unlicense"
] | null | null | null |
lib/formatter/text.py
|
ylafon/redbot
|
87f4edcc8ccda35f556331abd1e76d5e9b79cdd0
|
[
"Unlicense"
] | null | null | null |
lib/formatter/text.py
|
ylafon/redbot
|
87f4edcc8ccda35f556331abd1e76d5e9b79cdd0
|
[
"Unlicense"
] | 1
|
2021-06-01T12:08:29.000Z
|
2021-06-01T12:08:29.000Z
|
#!/usr/bin/env python
"""
HAR Formatter for REDbot.
"""
__author__ = "Jerome Renard <jerome.renard@gmail.com>"
__copyright__ = """\
Copyright (c) 2008-2010 Mark Nottingham
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import operator
import nbhttp.error as nberr
import redbot.speak as rs
from redbot.formatter import Formatter
nl = u"\n"
# TODO: errors and status on stderr with CLI?
class BaseTextFormatter(Formatter):
"""
Base class for text formatters."""
media_type = "text/plain"
msg_categories = [
rs.c.GENERAL, rs.c.CONNECTION, rs.c.CONNEG,
rs.c.CACHING, rs.c.VALIDATION, rs.c.RANGE
]
link_order = [
('link', 'Head Links'),
('script', 'Script Links'),
('frame', 'Frame Links'),
('iframe', 'IFrame Links'),
('img', 'Image Links'),
]
error_template = "Error: %s\n"
def __init__(self, *args, **kw):
Formatter.__init__(self, *args, **kw)
def start_output(self):
pass
def feed(self, red, chunk):
pass
def status(self, msg):
pass
def finish_output(self):
"Fill in the template with RED's results."
if self.red.res_complete:
self.output(self.format_headers(self.red) + nl + nl)
self.output(self.format_recommendations(self.red) + nl)
else:
if self.red.res_error == None:
pass
elif self.red.res_error['desc'] == nberr.ERR_CONNECT['desc']:
self.output(self.error_template % "Could not connect to the server (%s)" % \
self.red.res_error.get('detail', "unknown"))
elif self.red.res_error['desc'] == nberr.ERR_URL['desc']:
self.output(self.error_template % self.red.res_error.get(
'detail', "RED can't fetch that URL."))
elif self.red.res_error['desc'] == nberr.ERR_READ_TIMEOUT['desc']:
self.output(self.error_template % self.red.res_error['desc'])
elif self.red.res_error['desc'] == nberr.ERR_HTTP_VERSION['desc']:
self.output(self.error_template % "<code>%s</code> isn't HTTP." % \
self.red.res_error.get('detail', '')[:20])
else:
raise AssertionError, "Unidentified incomplete response error."
def format_headers(self, red):
out = [u"HTTP/%s %s %s" % (
red.res_version, red.res_status, red.res_phrase)]
return nl.join(out + [u"%s:%s" % h for h in red.res_hdrs])
def format_recommendations(self, red):
return "".join([self.format_recommendation(red, category) \
for category in self.msg_categories])
def format_recommendation(self, red, category):
messages = [msg for msg in red.messages if msg.category == category]
if not messages:
return ""
out = []
if [msg for msg in messages]:
out.append(u"* %s:" % category)
for m in messages:
out.append(
u" * %s" % (self.colorize(m.level, m.summary["en"] % m.vars))
)
smsgs = [msg for msg in getattr(m.subrequest, "messages", []) if msg.level in [rs.l.BAD]]
if smsgs:
out.append("")
for sm in smsgs:
out.append(
u" * %s" %
(self.colorize(sm.level, sm.summary["en"] % sm.vars))
)
out.append(nl)
out.append(nl)
return nl.join(out)
@staticmethod
def colorize(level, string):
# info
color_start = u"\033[0;32m"
color_end = u"\033[0;39m"
if level == "good":
color_start = u"\033[1;32m"
color_end = u"\033[0;39m"
if level == "bad":
color_start = u"\033[1;31m"
color_end = u"\033[0;39m"
if level == "warning":
color_start = u"\033[1;33m"
color_end = u"\033[0;39m"
if level == "uri":
color_start = u"\033[1;34m"
color_end = u"\033[0;39m"
return color_start + string + color_end
class TextFormatter(BaseTextFormatter):
"""
Format a RED object as text.
"""
name = "txt"
media_type = "text/plain"
def __init__(self, *args, **kw):
BaseTextFormatter.__init__(self, *args, **kw)
def finish_output(self):
BaseTextFormatter.finish_output(self)
self.done()
class TextListFormatter(BaseTextFormatter):
"""
Format multiple RED responses as a textual list.
"""
name = "txt"
media_type = "text/plain"
can_multiple = True
def __init__(self, *args, **kw):
BaseTextFormatter.__init__(self, *args, **kw)
def finish_output(self):
"Fill in the template with RED's results."
BaseTextFormatter.finish_output(self)
sep = "=" * 78
for hdr_tag, heading in self.link_order:
droids = [d[0] for d in self.red.link_droids if d[1] == hdr_tag]
self.output("%s\n%s (%d)\n%s\n" % (
sep, heading, len(droids), sep
))
if droids:
droids.sort(key=operator.attrgetter('uri'))
for droid in droids:
self.output(self.format_uri(droid) + nl + nl)
self.output(self.format_headers(droid) + nl + nl)
self.output(self.format_recommendations(droid) + nl + nl)
self.done()
def format_uri(self, red):
return self.colorize("uri", red.uri)
| 33.277778
| 101
| 0.587039
| 850
| 6,589
| 4.437647
| 0.303529
| 0.033404
| 0.026511
| 0.03579
| 0.27492
| 0.239926
| 0.170467
| 0.140774
| 0.095705
| 0.081919
| 0
| 0.015914
| 0.294278
| 6,589
| 197
| 102
| 33.446701
| 0.795269
| 0.010472
| 0
| 0.234483
| 0
| 0
| 0.26713
| 0.003947
| 0
| 0
| 0
| 0.005076
| 0.006897
| 0
| null | null | 0.027586
| 0.027586
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8a995f399ed25fbe111acb3f8ad5749b538eef0a
| 433
|
py
|
Python
|
python/re_user.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | 1
|
2017-10-14T04:23:45.000Z
|
2017-10-14T04:23:45.000Z
|
python/re_user.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | null | null | null |
python/re_user.py
|
seckcoder/lang-learn
|
1e0d6f412bbd7f89b1af00293fd907ddb3c1b571
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
#-*- coding=utf-8 -*-
#
# Copyright 2012 Jike Inc. All Rights Reserved.
# Author: liwei@jike.com
import re
from urlparse import urlparse
def parse1():
p = re.compile(r"/(?P<uid>\d+)/(?P<mid>\w+)")
o = urlparse("http://weibo.com/2827699110/yz62AlEjF")
m = p.search(o.path)
print m.group('uid')
print m.group('mid')
def parse2():
exc_type_str = "<type 'exceptions.IndexError'>"
parse1()
| 22.789474
| 57
| 0.637413
| 65
| 433
| 4.215385
| 0.707692
| 0.043796
| 0.080292
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.055249
| 0.163972
| 433
| 18
| 58
| 24.055556
| 0.701657
| 0.251732
| 0
| 0
| 0
| 0
| 0.310345
| 0.15674
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.181818
| null | null | 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8ab94a7177eff40dfe2d54daa4adb7bbd8788e95
| 1,084
|
py
|
Python
|
elm_mnist/elm_mnist.py
|
ahara/-blog
|
926ae4808ede6efb1e64381a19a210235a97ac36
|
[
"MIT"
] | null | null | null |
elm_mnist/elm_mnist.py
|
ahara/-blog
|
926ae4808ede6efb1e64381a19a210235a97ac36
|
[
"MIT"
] | null | null | null |
elm_mnist/elm_mnist.py
|
ahara/-blog
|
926ae4808ede6efb1e64381a19a210235a97ac36
|
[
"MIT"
] | null | null | null |
import cPickle
import numpy as np
from elm import ELMClassifier
from sklearn import linear_model
def load_mnist(path='../Data/mnist.pkl'):
with open(path, 'rb') as f:
return cPickle.load(f)
def get_datasets(data):
_train_x, _train_y = data[0][0], np.array(data[0][1]).reshape(len(data[0][1]), 1)
_val_x, _val_y = data[1][0], np.array(data[1][1]).reshape(len(data[1][1]), 1)
_test_x, _test_y = data[2][0], np.array(data[2][1]).reshape(len(data[2][1]), 1)
return _train_x, _train_y, _val_x, _val_y, _test_x, _test_y
if __name__ == '__main__':
# Load data sets
train_x, train_y, val_x, val_y, test_x, test_y = get_datasets(load_mnist())
# Build ELM
cls = ELMClassifier(n_hidden=7000,
alpha=0.93,
activation_func='multiquadric',
regressor=linear_model.Ridge(),
random_state=21398023)
cls.fit(train_x, train_y)
# Evaluate model
print 'Validation error:', cls.score(val_x, val_y)
print 'Test error:', cls.score(test_x, test_y)
| 32.848485
| 85
| 0.621771
| 171
| 1,084
| 3.631579
| 0.350877
| 0.016103
| 0.070853
| 0.077295
| 0.096618
| 0.096618
| 0.096618
| 0.096618
| 0.096618
| 0.096618
| 0
| 0.043373
| 0.234317
| 1,084
| 32
| 86
| 33.875
| 0.704819
| 0.035978
| 0
| 0
| 0
| 0
| 0.064361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.181818
| null | null | 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8ac941eb3b632a517433fbaf339a5dae04e7e556
| 6,534
|
py
|
Python
|
heatsink.py
|
sww1235/heatsink-calc
|
3f28ac33b629ab5a12ddea4964f6dbe7dbc3e759
|
[
"MIT"
] | 1
|
2020-11-20T07:09:00.000Z
|
2020-11-20T07:09:00.000Z
|
heatsink.py
|
sww1235/heatsink-calc
|
3f28ac33b629ab5a12ddea4964f6dbe7dbc3e759
|
[
"MIT"
] | null | null | null |
heatsink.py
|
sww1235/heatsink-calc
|
3f28ac33b629ab5a12ddea4964f6dbe7dbc3e759
|
[
"MIT"
] | null | null | null |
"""Class representations of heatsinks."""
import math
from scipy import constants as const
from materials import Aluminium_6063 as aluminium
class Heatsink:
"""
A Heatsink.
Extended by form factor subclasses
"""
def __init__(self, material, configuration):
"""Init material and configuration variables."""
self.material = material
self.configuration = configuration
class CylindricalAnnularFin(Heatsink):
"""Extend base heatsink class with a cylindrical annular fin heatsink."""
def __init__(self, material, finSpacing, finRadius,
finThickness, cylinderDiameter, numberOfFins,
ambAirTemp, maxJunctionTemp, maxSurfaceTemp):
"""
Init remainder of class variables.
NOTE: all models are based off of the finSpacing variable
NOTE: using the simplified model for calculation efficiency.
finSpacing : gap between adjacent fins
finRadius : radius of fin minus central support cylinder
(alternatively, fin depth)
finThickness : thickness of individual fin
cylinderDiameter: diameter of support cylinder
heatsinkLength : overall axial length of heatsink
overall diameter: outside diameter of heatsink including fins.
"""
self.finSpacing = finSpacing # in meters
self.finRadius = finRadius # in meters
self.finThickness = finThickness # in meters
self.cylinderDiameter = cylinderDiameter # in meters
self.numberOfFins = numberofFins
self.heatsinkLength = ((self.finThickness * self.numberOfFins)
+ ((self.numberOfFins - 1) * self.finSpacing))
self.overallDiameter = self.cylinderDiameter + (2 * finRadius)
self.ambAirTemp = ambAirTemp # degrees kelvin
self.maxJunctionTemp = maxJunctionTemp
self.maxSurfaceTemp = maxSurfaceTemp
"""
NOTE: in order to prevent ridiculously long variable names, all
Nusselt Numbers are abbreviated as follows:
nn = Nusselt Number
nn0 = Nusselt Number 0 (Diffusive Limit)
nnOut = Nusselt Number for outer surfaces
nnIn = Nusselt Number for inner surfaces
nnInT = Nusselt Number for the thin boundry layer of inner surface
nnInFD = Nusselt Number for fully developed regime inner surface
"""
# thermal diffusivity of air at atmospheric pressure at 25C
alpha = 22.39 * 10**(-6) # (meters^2) / seconds
# Volumetric coefficient of thermal expansion
beta = aluminium.expansionCoefficient # 1/kelvin
heatsinkSurfaceTemp = # TODO kelvin
# at atmospheric pressure at 25C
kinematicViscosity = 15.52 * 10**(-6) # meter^2/second
deltaT = heatsinkSurfaceTemp - ambAirTemp # kelvin
hLoD = self.heatsinkLength / self.overallDiameter
cDoD = self.cylinderDiameter / self.overallDiameter
oneChannelArea = (math.pi * (((self.overallDiameter**2
- self.cylinderDiameter**2) / 2)
+ (self.cylinderDiameter
* self.finSpacing)))
# area of circumscribed cylinder
areaCC = (math.pi * (((self.overallDiameter**2) / 2)
+ self.overallDiameter * self.heatsinkLength)) # meter^2
# inner surface area of heatsink
areaIn = (self.numberOfFins - 1) * oneChannelArea # meter^2
# outer surface area of heatsink
areaOut = (math.pi * (((self.overallDiameter**2) / 2)
+ (self.numberOfFins
* self.overallDiameter
* self.finThickness))) # meter^2
# overall area of heatsink
areaHS = areaIn + areaOut # meter^2
RayleighNbrFinSpacing = ((const.g
* beta
* deltaT
* self.finSpacing**4)
/ (kinematicViscosity
* alpha
* self.overallDiameter))
RayleighNbrOverallDiameter = ((const.g
* beta
* deltaT
* self.overallDiameter**3)
/ (kinematicViscosity * alpha))
if 0.1 <= hLoD <= 8:
self.nn0 = ((3.36 + (0.087 * hLoD))
* math.sqrt(areaCC)
* (self.finSpacing / areaHS)
)
if 0.1 <= (self.finThickness
* self.numberOfFins
/ self.overallDiameter) <= 8:
self.nnOut = ((0.499 - (0.026 * math.log(self.finThickness
* self.numberOfFins
/ self.overallDiameter)))
* math.pow(RayleighNbrFinSpacing, 0.25)
* (areaOut/areaHS)
)
if (0.1 <= cdoD <= 8) and (2.9 * 10**4
<= RayleighNbrOverallDiameter
<= 2.3 * 10**5):
nnInT = ((0.573-(0.184 * cdoD) + (0.0388 * cdoD**2))
* math.pow(RayleighNbrFinSpacing, 0.25))
nnInFD = (((0.0323
- (0.0517 * cdoD)
+ (0.11 * cdoD**2))
* math.pow(RayleighNbrFinSpacing, 0.25))
+ (0.0516 + (0.0154 * cdoD)
- (0.0433 * cdoD**2)
+ (0.0792 * cdoD**3)) * RayleighNbrFinSpacing)
n = 1
self.nnIn = (math.pow(math.pow(nnInT, -n)
+ math.pow(nnInFD, -n), (-1/n)
)
* (areaIn/areaHS)
)
self.nn = (self.nnIn + self.nnOut + self.nn0)
super(Child, self).__init__(material, self.__name__)
"""
Nusselt number = (Qconv * b) / (Ahs deltaT k)
Qconv = heat flow rate by convection (Watts)
b = finSpacing (meters)
Ahs = Area of heatsink (meter^2)
deltaT = temperature difference between surface temp of
heatsink and ambient air temp.
k = thermal conductivity of material (Watts / (meter kelvin))
"""
| 44.148649
| 78
| 0.520814
| 575
| 6,534
| 5.888696
| 0.34087
| 0.067336
| 0.014176
| 0.028352
| 0.124336
| 0.069699
| 0.039575
| 0
| 0
| 0
| 0
| 0.035732
| 0.396082
| 6,534
| 147
| 79
| 44.44898
| 0.822352
| 0.061371
| 0
| 0.091954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006803
| 0
| 0
| null | null | 0
| 0.034483
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8acb71f44d08977a58d847a4d25a262b4cc3e603
| 35,471
|
py
|
Python
|
src/parser.py
|
harkiratbehl/PyGM
|
e0a4e0b865afb607dfa0525ca386bfbe77bb6508
|
[
"MIT"
] | 2
|
2019-02-13T11:30:08.000Z
|
2021-02-14T04:20:44.000Z
|
src/parser.py
|
harkiratbehl/PyGM
|
e0a4e0b865afb607dfa0525ca386bfbe77bb6508
|
[
"MIT"
] | null | null | null |
src/parser.py
|
harkiratbehl/PyGM
|
e0a4e0b865afb607dfa0525ca386bfbe77bb6508
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from code import TreeNode
from code import ThreeAddressCode
from lexer import tokens
from random import *
from symbol_table import SymbolTable
from symbol_table import SymbolTableNode
import logging
import ply.lex as lex
import ply.yacc as yacc
import sys
from codegen import convert_tac
from code import Code
from codegen import generate_assembly
three_addr_code = ThreeAddressCode()
assembly_code = Code()
parsed = []
symbol_table = SymbolTable()
var_list = []
generated = {'temp': [], 'scope': ['scope_0'], 'label': [], 'str_list': []}
def gen(s):
if s not in generated.keys():
generated[s] = []
temp = s + '_' + str(len(generated[s]))
generated[s] += [temp]
return temp
def print_error(err):
print "*** Error: " + err + "! ***"
sys.exit(1)
def check_variable(TreeNode):
# return 2 values. first is the name for the variable, second is 0 if variable not found
# TreeNode.print_node()
# symbol_table.print_symbol_table()
if TreeNode.isLvalue == 1:
if TreeNode.data not in generated['temp']:
name = symbol_table.search_identifier(TreeNode.data)
if name == False:
name = symbol_table.search_function(TreeNode.data)
if name == False:
print_error("Variable " + TreeNode.data + " is undefined")
return TreeNode.data
else:
return name
else:
newNode = SymbolTableNode(name, TreeNode.input_type)
symbol_table.add_var(newNode)
if TreeNode.children == []:
return name
else:
return name + '[' + TreeNode.children + ']'
else:
newNode = SymbolTableNode(TreeNode.data, TreeNode.input_type)
symbol_table.add_var(newNode)
return TreeNode.data
else:
if TreeNode.input_type != 'STRING':
return TreeNode.data
else:
TreeNode.print_node()
return TreeNode.data
precedence = (
('left','IDENTIFIER'),
('right','ASSIGN_OP'),
('left','COMMA'),
('left','LSQUARE'),
('left','RSQUARE'),
('left','LCURLY'),
('left','RCURLY'),
('left','DDD'),
('left','DOT'),
('left','SEMICOLON'),
('left','COLON'),
('left','SINGLE_QUOTES'),
('left','DOUBLE_QUOTES'),
('left','DECIMAL_LIT'),
('left','OCTAL_LIT'),
('left','HEX_LIT'),
('left','FLOAT_LIT'),
('left','STRING_LIT'),
('left','NEWLINE'),
('left','BREAK'),
('left','CONTINUE'),
('left','RETURN'),
('left','RROUND'),
('left','LROUND'),
('left', 'OR_OR'),
('left', 'AMP_AMP'),
('left', 'EQ_EQ', 'NOT_EQ','LT','LT_EQ','GT','GT_EQ'),
('left', 'PLUS', 'MINUS','OR','CARET'),
('left', 'STAR', 'DIVIDE','MODULO','AMP','AND_OR','LS','RS'),
)
def p_SourceFile(p):
'''SourceFile : PACKAGE IDENTIFIER SEMICOLON ImportDeclList TopLevelDeclList
'''
parsed.append(p.slice)
# TODO: Ignoring package name and Imports for now
p[0] = p[5]
var_list = symbol_table.make_var_list()
three_addr_code = convert_tac(p[0].TAC)
symbol_table.fill_next_use(three_addr_code)
assembly_code = generate_assembly(three_addr_code,var_list,symbol_table)
# p[0].TAC.print_code()
# three_addr_code.print_code()
assembly_code.print_code()
# symbol_table.print_symbol_table()
return
def p_ImportDeclList(p):
'''ImportDeclList : ImportDecl SEMICOLON ImportDeclList
| empty
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_TopLevelDeclList(p):
'''TopLevelDeclList : TopLevelDecl SEMICOLON TopLevelDeclList
| empty
'''
parsed.append(p.slice)
if len(p) == 4:
if p[3] != None:
p[0] = TreeNode('TopLevelDeclList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
else:
p[0] = TreeNode('TopLevelDeclList', 0, 'INT', 0, [p[1]], p[1].TAC)
return
def p_TopLevelDecl(p):
'''TopLevelDecl : Declaration
| FunctionDecl
'''
parsed.append(p.slice)
p[0] = p[1]
return
def p_ImportDecl(p):
'''ImportDecl : IMPORT LROUND ImportSpecList RROUND
| IMPORT ImportSpec
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_ImportSpecList(p):
'''ImportSpecList : ImportSpec SEMICOLON ImportSpecList
| empty
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_ImportSpec(p):
'''ImportSpec : DOT string_lit
| IDENTIFIER string_lit
| empty string_lit
'''
parsed.append(p.slice)
# TODO: Ignoring Imports for now
return
def p_Block(p):
'''Block : LCURLY ScopeStart StatementList ScopeEnd RCURLY
'''
parsed.append(p.slice)
p[0] = p[3]
p[0].data = p[2].data
p[0].name = 'Block'
return
def p_ScopeStart(p):
'''ScopeStart : empty
'''
parsed.append(p.slice)
symbol_table.add_scope(gen('scope'))
p[0] = TreeNode('ScopeStart', symbol_table.current_scope, 'None')
return
def p_ScopeEnd(p):
'''ScopeEnd : empty
'''
parsed.append(p.slice)
symbol_table.end_scope()
return
def p_StatementList(p):
'''StatementList : Statement SEMICOLON StatementList
| empty
'''
parsed.append(p.slice)
if len(p) == 4:
p[0] = TreeNode('StatementList', 0, 'INT', 0, [p[1].data] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
else:
p[0] = TreeNode('StatementList', 0, 'INT')
return
def p_Statement(p):
'''Statement : Declaration
| SimpleStmt
| ReturnStmt
| Block
| IfStmt
| SwitchStmt
| ForStmt
| BreakStmt
| ContinueStmt
| GotoStmt
| PrintIntStmt
| PrintStrStmt
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Statement'
return
def p_PrintIntStmt(p):
'''PrintIntStmt : PRINTLN LROUND IDENTIFIER RROUND
| PRINTLN LROUND int_lit RROUND
'''
if hasattr(p[3], 'name') and p[3].name == 'int_lit':
p[0] = p[3]
# p[0].isLvalue = 0
else:
p[0] = TreeNode('IDENTIFIER', p[3], 'INT', 1, [])
p[0].TAC.add_line(['print_int', check_variable(p[0]), '', ''])
p[0].name = 'PrintIntStmt'
return
def p_PrintStrStmt(p):
'''PrintStrStmt : PRINTLN LROUND string_lit RROUND
'''
p[0] = p[3]
name = symbol_table.current_scope + '_' + gen('str_list')
parametersNode = SymbolTableNode(p[3].data, p[3].input_type)
newNode = SymbolTableNode(name, p[3].input_type, parameters = [parametersNode])
symbol_table.add_var(newNode)
p[0].TAC.add_line(['print_str', name, '', ''])
p[0].name = 'PrintStrStmt'
return
def p_Declaration(p):
'''Declaration : ConstDecl
| TypeDecl
| VarDecl
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Declaration'
return
def p_ConstDecl(p):
'''ConstDecl : CONST LROUND ConstSpecList RROUND
| CONST ConstSpec
'''
parsed.append(p.slice)
return
def p_ConstSpecList(p):
'''ConstSpecList : empty
| ConstSpecList ConstSpec SEMICOLON
'''
parsed.append(p.slice)
return
def p_ConstSpec(p):
'''ConstSpec : IDENTIFIER
| IdentifierList
| IDENTIFIER EQ Expression
| IdentifierList EQ ExpressionList
| IDENTIFIER Type EQ Expression
| IdentifierList Type EQ ExpressionList
'''
parsed.append(p.slice)
return
def p_IdentifierList(p):
'''IdentifierList : IDENTIFIER COMMA IdentifierBotList
'''
parsed.append(p.slice)
node = TreeNode('IDENTIFIER', p[1], 'INT', 1)
p[0] = TreeNode('IdentifierList', 0, 'None', 0, [node] + p[3].children, p[3].TAC)
return
def p_IdentifierBotList(p):
'''IdentifierBotList : IDENTIFIER COMMA IdentifierBotList
| IDENTIFIER
'''
parsed.append(p.slice)
if len(p) == 2:
node = TreeNode('IDENTIFIER', p[1], 'INT', 1)
p[0] = TreeNode('IdentifierBotList', 0, 'None', 0, [node])
elif len(p) == 4:
node = TreeNode('IDENTIFIER', p[1], 'INT', 1)
p[0] = TreeNode('IdentifierBotList', 0, 'None', 0, [node] + p[3].children, p[3].TAC)
return
def p_ExpressionList(p):
'''ExpressionList : Expression COMMA ExpressionBotList
'''
parsed.append(p.slice)
p[0] = TreeNode('ExpressionList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
return
def p_ExpressionBotList(p):
'''ExpressionBotList : Expression COMMA ExpressionBotList
| Expression
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = TreeNode('ExpressionBotList', 0, 'INT', 0, [p[1]], p[1].TAC)
elif len(p) == 4:
p[0] = TreeNode('ExpressionBotList', 0, 'INT', 0, [p[1]] + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
return
def p_TypeDecl(p):
'''TypeDecl : TYPE TypeSpecTopList
'''
parsed.append(p.slice)
return
def p_TypeSpecTopList(p):
'''TypeSpecTopList : TypeSpec
| LROUND TypeSpecList RROUND
'''
parsed.append(p.slice)
return
def p_TypeSpecList(p):
'''TypeSpecList : empty
| TypeSpecList TypeSpec SEMICOLON
'''
parsed.append(p.slice)
return
def p_TypeSpec(p):
'''TypeSpec : AliasDecl
| TypeDef
'''
parsed.append(p.slice)
return
def p_AliasDecl(p):
'''AliasDecl : IDENTIFIER EQ Type
'''
parsed.append(p.slice)
return
def p_TypeDef(p):
'''TypeDef : IDENTIFIER Type
'''
parsed.append(p.slice)
return
def p_Type(p):
'''Type : TypeLit
| StandardTypes
| LROUND Type RROUND
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].name = 'Type'
return
def p_StandardTypes(p):
'''StandardTypes : PREDEFINED_TYPES
'''
parsed.append(p.slice)
p[0] = TreeNode('StandardTypes', p[1], 'NONE')
return
def p_TypeLit(p):
'''TypeLit : ArrayType
| StructType
| FunctionType
| PointerType
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'TypeLit'
return
def p_PointerType(p):
'''PointerType : STAR Type
'''
parsed.append(p.slice)
return
def p_ArrayType(p):
'''ArrayType : LSQUARE ArrayLength RSQUARE Type
'''
parsed.append(p.slice)
p[0] = TreeNode('ArrayType', p[2].data, p[4].data)
return
def p_ArrayLength(p):
'''ArrayLength : Expression
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'ArrayLength'
return
def p_StructType(p):
'''StructType : STRUCT LCURLY FieldDeclList RCURLY
'''
parsed.append(p.slice)
return
def p_FieldDeclList(p):
'''FieldDeclList : empty
| FieldDeclList FieldDecl SEMICOLON
'''
parsed.append(p.slice)
return
def p_FieldDecl(p):
'''FieldDecl : IdentifierList Type TagTop
| IDENTIFIER Type TagTop
'''
parsed.append(p.slice)
return
def p_TagTop(p):
'''TagTop : empty
| Tag
'''
parsed.append(p.slice)
return
def p_Tag(p):
'''Tag : string_lit
'''
parsed.append(p.slice)
return
def p_FunctionType(p):
'''FunctionType : FUNC Signature
'''
parsed.append(p.slice)
return
def p_Signature(p):
'''Signature : Parameters
| Parameters Result
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Signature'
s = 'scope_' + str(len(generated['scope']))
symbol_table.new_scope(s)
for child in p[1].children:
symbol_table.add_identifier(child, s)
newNode = SymbolTableNode(s + '_' + child.data, child.input_type)
symbol_table.add_var(newNode, s)
# symbol_table.print_symbol_table()
if len(p) == 2:
p[0].input_type = TreeNode('Result', 0, 'None')
else:
p[0].input_type = p[2]
return
def p_Result(p):
'''Result : Parameters
| Type
'''
parsed.append(p.slice)
if p[1].name == 'Type':
p[0] = TreeNode('Result', 1, 'None', 0, [p[1]])
else:
p[0] = p[1]
p[0].name = 'Result'
return
def p_Parameters(p):
'''Parameters : LROUND RROUND
| LROUND ParameterList RROUND
'''
parsed.append(p.slice)
if len(p) == 3:
p[0] = TreeNode('Parameters', 0, 'None')
else:
p[0] = p[2]
p[0].name = 'Parameters'
return
def p_ParameterList(p):
'''ParameterList : ParameterDecl
| ParameterList COMMA ParameterDecl
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
p[0].name = 'ParameterList'
elif len(p) == 4:
p[0] = TreeNode('ParameterList', p[1].data + p[3].data, 'None', 0, p[1].children + p[3].children, p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
return
def p_ParameterDecl(p):
'''ParameterDecl : IdentifierList Type
| IDENTIFIER Type
| Type
'''
parsed.append(p.slice)
p[0] = TreeNode('ParameterDecl', 0, 'None')
if len(p) == 3:
if hasattr(p[1], 'name') and p[1].name == 'IdentifierList':
for node in p[1].children:
p[0].data += 1
node.input_type = p[2].data
p[0].children += [node]
else:
node = TreeNode('IDENTIFIER', p[1], p[2].data, 1)
p[0].data += 1
p[0].children += [node]
else:
p[0].data += 1
p[0].children += [p[1]]
return
def p_VarDecl(p):
'''VarDecl : VAR VarSpecTopList
'''
parsed.append(p.slice)
p[0] = p[2]
p[0].name = 'VarDecl'
return
def p_VarSpecTopList(p):
'''VarSpecTopList : VarSpec
| LROUND VarSpecList RROUND
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].name = 'VarSpecTopList'
return
def p_VarSpecList(p):
'''VarSpecList : empty
| VarSpecList VarSpec SEMICOLON
'''
return
def p_VarSpec(p):
'''VarSpec : IDENTIFIER Type
| IDENTIFIER EQ Expression
| IDENTIFIER Type EQ Expression
| IdentifierList Type
| IdentifierList EQ ExpressionList
| IdentifierList Type EQ ExpressionList
'''
# Insert into symbol table
p[0] = TreeNode('VarSpec', 0, 'NONE')
if hasattr(p[1], 'name') and p[1].name == 'IdentifierList':
zero_val = TreeNode('decimal_lit', 0, 'INT')
# l1 = len(p[1].children)
# if len(p) == 3:
# expr_list = TreeNode('Expr_List', 0, 'NONE', 0, [zero_val] * l1)
# elif len(p) == 4:
# expr_list = p[3]
# elif len(p) == 5:
# expr_list = p[4]
# l2 = len(expr_list.children)
# p[0].TAC.append_TAC(expr_list.TAC)
# p[0].TAC.append_TAC(p[1].TAC)
# if l1 == l2:
# for i in range(l1):
# p[0].TAC.add_line(['=', p[1].children[i], expr_list.children[i].data, ''])
# else:
# print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)")
else:
p[1] = TreeNode('IDENTIFIER',p[1],'INT',1)
if p[2].input_type != 'NONE':
# array case
# p[2].print_node()
if symbol_table.add_identifier(p[1], size = p[2].data) == False:
print_error("Unable to add to SymbolTable")
return
name = symbol_table.search_identifier(p[1].data)
newNode = SymbolTableNode(name, p[1].input_type,size = p[2].data)
symbol_table.add_var(newNode)
p[0] = TreeNode('VarSpec',p[1].data,'INT')
# expr = TreeNode('Expr', 0, 'NONE')
# if len(p) == 4:
# expr = p[3]
# p[0].TAC.append_TAC(p[3].TAC)
# p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), ''])
# elif len(p) == 5:
# expr = p[4]
# p[0].TAC.append_TAC(p[4].TAC)
# p[0].TAC.add_line(['=', check_variable(p[1]), check_variable(expr), ''])
return
def p_FunctionDecl(p):
'''FunctionDecl : FUNC FunctionName Signature
| FUNC FunctionName Signature FunctionBody
'''
parsed.append(p.slice)
# symbol_table.print_symbol_table()
p[0] = TreeNode('FunctionDecl', 0, 'INT')
# print symbol_table.current_scope
# p[4].TAC.print_code()
symbol_table.add_function(p[2].data, p[3].input_type, p[3].children)
if len(p) == 5:
noOfParams = 0
for f in symbol_table.symbol_table[symbol_table.current_scope]['functions']:
if f.name == p[2].data:
noOfParams = len(f.parameters)
p[0].TAC.add_line(['func', check_variable(p[2]), str(noOfParams), ''])
for child in reversed(p[3].children):
p[0].TAC.add_line(['getparam', p[4].data + '_' + child.data, '', ''])
p[0].TAC.add_line(['stack_push', '', '', ''])
p[0].TAC.append_TAC(p[4].TAC)
return
def p_FunctionName(p):
'''FunctionName : IDENTIFIER
'''
parsed.append(p.slice)
p[0] = TreeNode('FunctionName', p[1], 'INT', 1)
return
def p_FunctionBody(p):
'''FunctionBody : Block
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'FunctionBody'
return
def p_SimpleStmt(p):
'''SimpleStmt : Expression
| Assignment
| ShortVarDecl
| IncDecStmt
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'SimpleStmt'
return
def p_IncDecStmt(p):
'''IncDecStmt : Expression PLUS_PLUS
| Expression MINUS_MINUS
'''
parsed.append(p.slice)
one_val = TreeNode('IncDecStmt', '1', 'INT')
p[0] = p[1]
if p[1].isLvalue == 1:
if p[2] == '++':
p[0].TAC.add_line(['+', check_variable(p[1]), check_variable(p[1]), one_val.data])
else:
p[0].TAC.add_line(['-', check_variable(p[1]), check_variable(p[1]), one_val.data])
else:
print_error("Lvalue required")
p[0].name = 'IncDecStmt'
return
def p_ShortVarDecl(p):
'''ShortVarDecl : ExpressionList ASSIGN_OP ExpressionList
| Expression ASSIGN_OP Expression
'''
parsed.append(p.slice)
# TODO: Add in symbol table
p[0] = TreeNode('ShortVarDecl', 0, 'INT')
if p[1].name == 'ExpressionList':
l1 = len(p[1].children)
l2 = len(p[3].children)
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
if l1 == l2:
for i in range(l1):
if p[1].children[i].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.add_identifier(p[1].children[i]) == False:
print_error("Unable to add to SymbolTable")
return
p[0].TAC.add_line([p[2], check_variable(p[1].children[i]), check_variable(p[3].children[i]), ''])
else:
print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)")
elif p[1].name == 'Expression':
if p[1].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.add_identifier(p[1]) == False:
print_error("Unable to add to SymbolTable")
return
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.add_line([p[2], check_variable(p[1]), check_variable(p[3]), ''])
return
def p_Assignment(p):
'''Assignment : ExpressionList assign_op ExpressionList
| Expression assign_op Expression
'''
parsed.append(p.slice)
p[0] = TreeNode('Assignment', 0, 'INT')
if p[1].name == 'ExpressionList':
l1 = len(p[1].children)
l2 = len(p[3].children)
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
if l1 == l2:
for i in range(l1):
if p[1].children[i].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.search_identifier(p[1].children[i].data) == False and p[1].children[i].data not in generated['temp']:
print_error("Variable " + p[1].children[i].data + " is undefined")
return
if p[3].children[i].isLvalue == 1 and symbol_table.search_identifier(p[3].children[i].data) == False and p[3].children[i].data not in generated['temp']:
print_error("Variable " + p[3].children[i].data + " is undefined")
return
p[0].TAC.add_line([p[2].data, check_variable(p[1].children[i]), check_variable(p[3].children[i]), ''])
else:
print_error("Variable Declaration mismatch: " + str(l1) + " identifier(s) but " + str(l2) + " value(s)")
elif p[1].name == 'Expression':
if p[1].isLvalue == 0:
print_error("Lvalue required")
return
else:
if symbol_table.search_identifier(p[1].data) == False and p[1].data not in generated['temp']:
print_error("Variable " + p[1].data + " is undefined")
return
if p[3].isLvalue == 1 and symbol_table.search_identifier(p[3].data) == False and p[3].data not in generated['temp']:
print_error("Variable " + p[3].data + " is undefined")
return
# print symbol_table.current_scope
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.add_line([p[2].data, check_variable(p[1]), check_variable(p[3]), ''])
return
def p_assign_op(p):
'''assign_op : EQ
| PLUS_EQ
| MINUS_EQ
| OR_EQ
| CARET_EQ
| STAR_EQ
| DIVIDE_EQ
| MODULO_EQ
| LS_EQ
| RS_EQ
| AMP_EQ
| AND_OR_EQ
'''
parsed.append(p.slice)
p[0] = TreeNode('assign_op', p[1], 'OPERATOR')
return
def p_IfStmt(p):
'''IfStmt : IF Expression Block
| IF Expression Block ELSE elseTail
'''
parsed.append(p.slice)
if len(p) == 4:
l1 = gen('label')
p[0] = TreeNode('IfStmt', 0, 'INT')
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['ifgotoeq', check_variable(p[2]), '0', l1])
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line(['label', l1, '', ''])
if len(p) == 6:
l1 = gen('label')
l2 = gen('label')
p[0] = TreeNode('IfStmt', 0, 'INT')
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['ifgotoeq', check_variable(p[2]), '0', l1])
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line(['goto', l2, '', ''])
p[0].TAC.add_line(['label', l1, '', ''])
p[0].TAC.append_TAC(p[5].TAC)
p[0].TAC.add_line(['label', l2, '', ''])
return
def p_elseTail(p):
'''elseTail : IfStmt
| Block
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'elseTail'
return
def p_SwitchStmt(p):
'''SwitchStmt : ExprSwitchStmt
'''
parsed.append(p.slice)
p[0] = TreeNode('SwitchStmt', 0, 'INT', 0, [], p[1].TAC)
return
def p_ExprSwitchStmt(p):
'''ExprSwitchStmt : SWITCH SimpleStmt SEMICOLON LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
| SWITCH SimpleStmt SEMICOLON Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
| SWITCH LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
| SWITCH Expression LCURLY ScopeStart ExprCaseClauseList ScopeEnd RCURLY
'''
parsed.append(p.slice)
if len(p) == 8:
l1 = gen('label')
l2 = gen('label')
p[0] = TreeNode('ExprSwitchStmt', 0, 'INT')
p[0].TAC.append_TAC(p[2].TAC)
t1 = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1)
p[0].TAC.add_line(['=', check_variable(t1) , check_variable(p[2]), ''])
p[0].TAC.append_TAC(p[5].data)
for i in range(len(p[5].children)):
p[0].TAC.add_line(['ifgotoeq', check_variable(t1), p[5].children[i][0], p[5].children[i][1]])
p[0].TAC.add_line(['goto', l2, '', ''])
for i in range(p[5].TAC.length()):
if i in p[5].TAC.leaders[1:]:
p[0].TAC.add_line(['goto', l2, '', ''])
p[0].TAC.add_line(p[5].TAC.code[i])
p[0].TAC.add_line(['label', l2, '', ''])
return
def p_ExprCaseClauseList(p):
'''ExprCaseClauseList : empty
| ExprCaseClauseList ExprCaseClause
'''
parsed.append(p.slice)
TAC1 = ThreeAddressCode()
TAC2 = ThreeAddressCode()
if len(p) == 3:
TAC1 = p[1].data
TAC2 = p[2].data
p[0] = TreeNode('ExprCaseClauseList', TAC1, 'INT', 0, p[1].children + p[2].children, p[1].TAC)
p[0].TAC.add_leader(p[0].TAC.length())
p[0].TAC.append_TAC(p[2].TAC)
p[0].data.append_TAC(TAC2)
else:
p[0] = TreeNode('ExprCaseClauseList', TAC1, 'INT')
return
def p_ExprCaseClause(p):
'''ExprCaseClause : ExprSwitchCase COLON StatementList
'''
parsed.append(p.slice)
l1 = gen('label')
p[0] = TreeNode('ExprCaseClause', 0, 'INT')
# p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.add_line(['label', l1, '', ''])
# p[0].TAC.add_line(['ifgotoneq', p[1].children, p[1].children, l1])
p[0].TAC.append_TAC(p[3].TAC)
p[0].children = [[p[1].data,l1]]
p[0].data = p[1].TAC
return
def p_ExprSwitchCase(p):
'''ExprSwitchCase : CASE ExpressionList
| DEFAULT
| CASE Expression
'''
parsed.append(p.slice)
p[0] = TreeNode('ExprSwitchCase', 0, 'INT')
if len(p) == 3:
p[0].data = p[2].data
p[0].TAC = p[2].TAC
return
def p_ForStmt(p):
'''ForStmt : FOR Expression Block
| FOR Block
'''
parsed.append(p.slice)
p[0] = TreeNode('ForStmt', 0, 'INT')
if len(p) == 4:
l1 = gen('label')
l2 = gen('label')
p[0].TAC.add_line(['label', l1, '', ''])
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['ifgotoeq',check_variable(p[2]), '0', l2])
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line(['goto', l1, '', ''])
p[0].TAC.add_line(['label', l2, '', ''])
if len(p) == 3:
l1 = gen('label')
# l2 = gen('label')
p[0].TAC.add_line(['label', l1, '', ''])
p[0].TAC.append_TAC(p[2].TAC)
p[0].TAC.add_line(['goto', l1, '', ''])
# p[0].TAC.add_line([l2])
return
def p_ReturnStmt(p):
'''ReturnStmt : RETURN
| RETURN Expression
| RETURN ExpressionList
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = TreeNode('ReturnStmt', 0, 'None')
p[0].TAC.add_line(['return', '', '', ''])
if len(p) == 3:
if p[2].name == 'Expression':
p[0] = p[2]
p[0].name = 'ReturnStmt'
p[0].TAC.add_line(['return', check_variable(p[2]), '', ''])
return
def p_BreakStmt(p):
'''BreakStmt : BREAK IDENTIFIER
'''
parsed.append(p.slice)
return
def p_ContinueStmt(p):
'''ContinueStmt : CONTINUE IDENTIFIER
'''
parsed.append(p.slice)
return
def p_GotoStmt(p):
'''GotoStmt : GOTO IDENTIFIER
'''
parsed.append(p.slice)
return
def p_Expression(p):
'''Expression : UnaryExpr
| Expression OR_OR Expression
| Expression AMP_AMP Expression
| Expression EQ_EQ Expression
| Expression NOT_EQ Expression
| Expression LT Expression
| Expression LT_EQ Expression
| Expression GT Expression
| Expression GT_EQ Expression
| Expression PLUS Expression
| Expression MINUS Expression
| Expression OR Expression
| Expression CARET Expression
| Expression STAR Expression
| Expression DIVIDE Expression
| Expression MODULO Expression
| Expression LS Expression
| Expression RS Expression
| Expression AMP Expression
| Expression AND_OR Expression
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
elif len(p) == 4:
p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1, [], p[1].TAC)
p[0].TAC.append_TAC(p[3].TAC)
p[0].TAC.add_line([p[2],check_variable(p[0]), check_variable(p[1]), check_variable(p[3])])
p[0].name = 'Expression'
return
def p_UnaryExpr(p):
'''UnaryExpr : PrimaryExpr
| unary_op UnaryExpr
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
elif len(p) == 3:
p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1)
p[0].TAC.add_line([check_variable(p[1]), check_variable(p[0]), check_variable(p[2]), ''])
p[0].name = 'UnaryExpr'
return
def p_unary_op(p):
'''unary_op : PLUS
| MINUS
| NOT
| CARET
| STAR
| AMP
| LT_MINUS
'''
parsed.append(p.slice)
p[0] = TreeNode('unary_op', p[1], 'OPERATOR')
return
def p_PrimaryExpr(p):
'''PrimaryExpr : Operand
| IDENTIFIER
| PrimaryExpr Selector
| PrimaryExpr Index
| PrimaryExpr Arguments
'''
parsed.append(p.slice)
if len(p) == 2:
if p.slice[1].type == 'IDENTIFIER':
p[0] = TreeNode('IDENTIFIER', p[1], 'INT', 1)
elif p[1].name == 'Operand':
p[0] = p[1]
elif len(p) == 3:
if p[2].name == 'Index':
p[0] = TreeNode('IDENTIFIER', p[1].data, 'INT', 1, p[2].data)
elif p[2].name == 'Arguments':
p[0] = TreeNode('IDENTIFIER', gen('temp'), 'INT', 1)
p[0].TAC.append_TAC(p[1].TAC)
p[0].TAC.append_TAC(p[2].TAC)
# p[1].print_node()
func = check_variable(p[1]).split("_")
scope, funcName = "_".join(func[:2]), "_".join(func[2:])
temp = 0
for f in symbol_table.symbol_table[scope]['functions']:
if f.name == funcName:
temp = len(f.parameters)
# p[2].print_node()
for child in p[2].children:
p[0].TAC.add_line(['putparam', check_variable(child), '', ''])
if temp != p[2].data:
print_error('Function ' + funcName + ' requires ' + str(temp) + ' parameters but ' + str(p[2].data) + ' supplied')
p[0].TAC.add_line(['call', check_variable(p[1]), str(p[2].data), ''])
p[0].TAC.add_line(['return_value', check_variable(p[0]), '', ''])
p[0].name = 'PrimaryExpr'
return
def p_Operand(p):
'''Operand : Literal
| LROUND Expression RROUND
'''
parsed.append(p.slice)
if len(p) == 2:
p[0] = p[1]
else:
p[0] = p[2]
p[0].name = 'Operand'
return
def p_Literal(p):
'''Literal : BasicLit
| FunctionLit
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'Literal'
return
def p_BasicLit(p):
'''BasicLit : int_lit
| float_lit
| string_lit
| rune_lit
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'BasicLit'
return
def p_int_lit(p):
'''int_lit : decimal_lit
| octal_lit
| hex_lit
'''
parsed.append(p.slice)
p[0] = p[1]
p[0].name = 'int_lit'
return
def p_decimal_lit(p):
'''decimal_lit : DECIMAL_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('decimal_lit', p[1], 'INT')
return
def p_octal_lit(p):
'''octal_lit : OCTAL_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('octal_lit', p[1], 'OCT')
return
def p_hex_lit(p):
'''hex_lit : HEX_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('hex_lit', p[1], 'HEX')
return
def p_float_lit(p):
'''float_lit : FLOAT_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('float_lit', p[1], 'FLOAT')
return
def p_FunctionLit(p):
'''FunctionLit : FUNC Signature FunctionBody
'''
parsed.append(p.slice)
# Anonymous Function
# Not implemented yet
return
def p_Selector(p):
'''Selector : DOT IDENTIFIER
'''
parsed.append(p.slice)
return
def p_Index(p):
'''Index : LSQUARE Expression RSQUARE
'''
parsed.append(p.slice)
p[0] = p[2]
p[0].name = 'Index'
return
def p_Arguments(p):
'''Arguments : LROUND RROUND
| LROUND ExpressionList RROUND
| LROUND Expression RROUND
| LROUND Type RROUND
| LROUND Type COMMA ExpressionList RROUND
| LROUND Type COMMA Expression RROUND
'''
# print p.slice
parsed.append(p.slice)
if len(p) == 3:
p[0] = TreeNode('Arguments', 0, 'None')
if len(p) == 4:
if p[2].name == 'Expression':
p[0] = TreeNode('Arguments', 1, 'None', 0, [p[2]], p[2].TAC)
if p[2].name == 'ExpressionList':
p[0] = p[2]
p[0].name = 'Arguments'
p[0].data = len(p[2].children)
return
def p_string_lit(p):
'''string_lit : STRING_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('string_lit', p[1], 'STRING')
return
def p_rune_lit(p):
'''rune_lit : RUNE_LIT
'''
parsed.append(p.slice)
p[0] = TreeNode('rune_lit', p[1], 'RUNE')
return
def p_empty(p):
'empty :'
pass
def p_error(p):
print p
if p == None:
print str(sys.argv[1]) + " :: You missed something at the end"
else:
print str(sys.argv[1]) + " :: Syntax error in line no " + str(p.lineno)
# Standard Logger
logging.basicConfig(
level = logging.DEBUG,
filename = "parselog.txt",
filemode = "w",
format = "%(filename)10s:%(lineno)4d:%(message)s"
)
log = logging.getLogger()
yacc.yacc(debug=True, debuglog=log)
input_file = sys.argv[1]
import os
if os.path.isfile(input_file) is False:
print('Input file ' + input_file + ' does not exist')
sys.exit(1)
input_code = open(input_file, 'r').read()
if input_code[len(input_code)-1] != '\n':
input_code += '\n'
yacc.parse(input_code, debug=log, tracking=True)
| 29.050778
| 172
| 0.5361
| 4,440
| 35,471
| 4.178153
| 0.072072
| 0.02264
| 0.046898
| 0.080535
| 0.482076
| 0.438251
| 0.37529
| 0.314107
| 0.258854
| 0.240149
| 0
| 0.026433
| 0.305686
| 35,471
| 1,220
| 173
| 29.07459
| 0.726815
| 0.048744
| 0
| 0.511502
| 0
| 0
| 0.103066
| 0.001517
| 0
| 0
| 0
| 0.004918
| 0
| 0
| null | null | 0.001353
| 0.024357
| null | null | 0.035183
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
76f6512f7d0f9be2b22c77b6be1aa4a85a8c2498
| 1,530
|
py
|
Python
|
utils/setAddress.py
|
wedvjin/rs485-moist-sensor
|
90930a34d0e6ec977f6083e70cc4bd931d7453fb
|
[
"Apache-2.0"
] | 1
|
2019-03-04T13:24:42.000Z
|
2019-03-04T13:24:42.000Z
|
utils/setAddress.py
|
wedvjin/rs485-moist-sensor
|
90930a34d0e6ec977f6083e70cc4bd931d7453fb
|
[
"Apache-2.0"
] | null | null | null |
utils/setAddress.py
|
wedvjin/rs485-moist-sensor
|
90930a34d0e6ec977f6083e70cc4bd931d7453fb
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
"""Looks for sensor on the bus and changes it's address to the one specified on command line"""
import argparse
import minimalmodbus
import serial
from time import sleep
parser = argparse.ArgumentParser()
parser.add_argument('address', metavar='ADDR', type=int, choices=range(1, 248), help='An address to set')
args = parser.parse_args()
ADDRESS1 = 1
ADDRESS2 = args.address
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True
minimalmodbus.PARITY=serial.PARITY_NONE
minimalmodbus.STOPBITS = 2
minimalmodbus.BAUDRATE=19200
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True
def scanModbus():
for i in range(1, 248):
try:
print('Trying address: ' + str(i))
sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i)
addressRead = sensor.read_register(0, functioncode=3)
if(i == addressRead):
print('FOUND!')
return (True, i)
except (IOError):
print("nope...")
pass
return (False, 0)
# sensor.debug=True
(found, i) = scanModbus()
if found:
print('Found sensor at address: ' + str(i))
try:
sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=i)
print("writing new address: " + str(ADDRESS2))
sensor.write_register(0, value=ADDRESS2, functioncode=6)
sleep(0.2)
sensor = minimalmodbus.Instrument('/dev/ttyUSB5', slaveaddress=ADDRESS2)
print("reading address from holding register: ")
print(sensor.read_register(0, functioncode=3))
except:
print "Could not change the address. Check your connections"
else:
print('No sensor on the bus found')
| 27.321429
| 105
| 0.733333
| 207
| 1,530
| 5.352657
| 0.483092
| 0.027076
| 0.07852
| 0.086643
| 0.268051
| 0.268051
| 0.16426
| 0
| 0
| 0
| 0
| 0.02439
| 0.142484
| 1,530
| 56
| 106
| 27.321429
| 0.820122
| 0.022222
| 0
| 0.142857
| 0
| 0
| 0.182857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.02381
| 0.095238
| null | null | 0.214286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
76f8632c56e75a6a31f710898b1568e855cfd849
| 9,238
|
py
|
Python
|
apps/interactor/tests/commander/commands/test_animations.py
|
Djelibeybi/photons
|
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
|
[
"MIT"
] | 51
|
2020-07-03T08:34:48.000Z
|
2022-03-16T10:56:08.000Z
|
apps/interactor/tests/commander/commands/test_animations.py
|
delfick/photons
|
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
|
[
"MIT"
] | 81
|
2020-07-03T08:13:59.000Z
|
2022-03-31T23:02:54.000Z
|
apps/interactor/tests/commander/commands/test_animations.py
|
Djelibeybi/photons
|
bc0aa91771d8e88fd3c691fb58f18cb876f292ec
|
[
"MIT"
] | 8
|
2020-07-24T23:48:20.000Z
|
2021-05-24T17:20:16.000Z
|
# coding: spec
from interactor.commander.store import store, load_commands
from photons_app.mimic.event import Events
from photons_app import helpers as hp
from photons_canvas.points.simple_messages import Set64
from unittest import mock
import pytest
@pytest.fixture()
def store_clone():
load_commands()
return store.clone()
@pytest.fixture()
def final_future():
fut = hp.create_future()
try:
yield fut
finally:
fut.cancel()
@pytest.fixture()
async def sender(devices, final_future):
async with devices.for_test(final_future) as sender:
yield sender
@pytest.fixture()
async def make_server(store_clone, server_wrapper, FakeTime, MockedCallLater, sender, final_future):
with FakeTime() as t:
async with MockedCallLater(t) as m:
async with server_wrapper(store_clone, sender, final_future) as server:
yield server, m
@pytest.fixture()
def server(make_server):
return make_server[0]
@pytest.fixture()
def m(make_server):
return make_server[1]
@pytest.fixture(autouse=True)
def set_async_timeout(request):
request.applymarker(pytest.mark.async_timeout(15))
describe "Animation Commands":
async it "can get info and help", server, m:
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
json_output={"animations": {}, "paused": []},
)
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/help"},
)
assert b"Available animations include" in got
assert b"* dice" in got
assert b"To see options for a particular animation, run this again" in got
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/help", "args": {"animation_name": "dice"}},
)
assert b"dice animation" in got
assert b"This animation has the following options:" in got
assert b"colour range options" in got
async it "can control an animation", server, m:
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
json_output={"animations": {}, "paused": []},
)
identity = "first"
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/start", "args": {"identity": identity}},
)
assert "animations" in got
assert got["animations"] == [identity]
assert got["started"] == identity
identity2 = "second"
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/start", "args": {"identity": identity2}},
)
assert "animations" in got
identities = [identity, identity2]
assert got["animations"] == identities
assert got["started"] == identity2
info = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
)
assert info == {"animations": {identity: mock.ANY, identity2: mock.ANY}, "paused": []}
# pause
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": identity}},
json_output={"animations": identities, "paused": [identity], "pausing": [identity]},
)
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": identity2}},
json_output={
"animations": identities,
"paused": identities,
"pausing": [identity2],
},
)
# resume
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/resume", "args": {"resume": identity2}},
json_output={
"animations": identities,
"paused": [identity],
"resuming": [identity2],
},
)
# pause multiple
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": identities}},
json_output={"animations": identities, "paused": identities, "pausing": identities},
)
# resume
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/resume", "args": {"resume": identities}},
json_output={
"animations": identities,
"paused": [],
"resuming": identities,
},
)
# pause
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": identity}},
json_output={"animations": identities, "paused": [identity], "pausing": [identity]},
)
# info
info = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
)
assert info["animations"] == {identity: mock.ANY, identity2: mock.ANY}
assert info["paused"] == [identity]
# stop
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/stop", "args": {"stop": identity}},
json_output={
"animations": [identity, identity2],
"paused": [identity],
"stopping": [identity],
},
)
await m.add(0.5)
# info
info = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
)
assert info["animations"] == {identity2: mock.ANY}
assert info["paused"] == []
async it "pausing an animation actually pauses the animation", devices, server, m:
tile = devices["tile"]
io = tile.io["MEMORY"]
store = devices.store(tile)
store.clear()
first_set_64 = tile.attrs.event_waiter.wait_for_incoming(io, Set64)
# start
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/start", "args": {"animations": [["balls", {"every": 3}]]}},
)
identity = got["started"]
await first_set_64
now = store.count(Events.INCOMING(tile, io, pkt=Set64))
assert now > 0
await m.add(5)
now2 = store.count(Events.INCOMING(tile, io, pkt=Set64))
assert now2 > now
identity = got["started"]
await m.add(5)
assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > now
# pause
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/pause", "args": {"pause": [identity]}},
)
await m.add(5)
store.clear()
await m.add(5)
assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0
# resume
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/resume", "args": {"resume": [identity]}},
)
await m.add(5)
assert store.count(Events.INCOMING(tile, io, pkt=Set64)) > 0
# stop
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/stop", "args": {"stop": [identity]}},
)
store.clear()
await m.add(5)
store.clear()
await m.add(5)
assert store.count(Events.INCOMING(tile, io, pkt=Set64)) == 0
# info
await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/info"},
json_output={"animations": {}, "paused": []},
)
async it "can get information", server, m:
# start
got = await server.assertCommand(
"/v1/lifx/command",
{"command": "animation/start", "args": {"animations": [["balls", {"every": 0.3}]]}},
)
identity = got["started"]
info = await server.assertCommand("/v1/lifx/command", {"command": "animation/info"})
assert info["paused"] == []
assert identity in info["animations"]
assert info["animations"][identity]["animations_ran"] == 1
assert info["animations"][identity]["current_animation"] == {
"name": "balls",
"options": {
"ball_colors": "<ManyColor:[((0, 360), (1000.0, 1000.0), (1000.0, 1000.0), (3500.0, 3500.0))]>",
"fade_amount": 0.02,
"num_balls": 5,
"rate": "<Rate 0.9 -> 1>",
},
"started": mock.ANY,
}
assert info["animations"][identity]["options"]["combined"]
assert "unlocked" in info["animations"][identity]["options"]["pauser"]
assert info["animations"][identity]["options"]["noisy_network"] == 0
specific = await server.assertCommand(
"/v1/lifx/command", {"command": "animation/info", "args": {"identity": identity}}
)
info["animations"][identity]["current_animation"]["started"] = mock.ANY
assert info["animations"][identity] == specific
| 31.209459
| 112
| 0.544923
| 904
| 9,238
| 5.512168
| 0.173673
| 0.05298
| 0.115593
| 0.125226
| 0.589203
| 0.553482
| 0.514148
| 0.476018
| 0.476018
| 0.44692
| 0
| 0.018894
| 0.306776
| 9,238
| 295
| 113
| 31.315254
| 0.759213
| 0.01115
| 0
| 0.408889
| 0
| 0.004444
| 0.230508
| 0
| 0
| 0
| 0
| 0
| 0.244444
| 0
| null | null | 0
| 0.026667
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a36ce830d4011a6336f73093bb61b54abdb2cbd
| 7,782
|
py
|
Python
|
pypy/interpreter/test/test_generator.py
|
m4sterchain/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
[
"Apache-2.0",
"OpenSSL"
] | 381
|
2018-08-18T03:37:22.000Z
|
2022-02-06T23:57:36.000Z
|
pypy/interpreter/test/test_generator.py
|
m4sterchain/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
[
"Apache-2.0",
"OpenSSL"
] | 16
|
2018-09-22T18:12:47.000Z
|
2022-02-22T20:03:59.000Z
|
pypy/interpreter/test/test_generator.py
|
m4sterchain/mesapy
|
ed546d59a21b36feb93e2309d5c6b75aa0ad95c9
|
[
"Apache-2.0",
"OpenSSL"
] | 30
|
2018-08-20T03:16:34.000Z
|
2022-01-12T17:39:22.000Z
|
class AppTestGenerator:
def test_generator(self):
def f():
yield 1
assert f().next() == 1
def test_generator2(self):
def f():
yield 1
g = f()
assert g.next() == 1
raises(StopIteration, g.next)
def test_attributes(self):
def f():
yield 1
assert g.gi_running
g = f()
assert g.gi_code is f.__code__
assert g.__name__ == 'f'
assert g.gi_frame is not None
assert not g.gi_running
g.next()
assert not g.gi_running
raises(StopIteration, g.next)
assert not g.gi_running
assert g.gi_frame is None
assert g.gi_code is f.__code__
assert g.__name__ == 'f'
def test_generator3(self):
def f():
yield 1
g = f()
assert list(g) == [1]
def test_generator4(self):
def f():
yield 1
g = f()
assert [x for x in g] == [1]
def test_generator5(self):
d = {}
exec """if 1:
def f():
v = (yield )
yield v
g = f()
g.next()
""" in d
g = d['g']
assert g.send(42) == 42
def test_throw1(self):
def f():
yield 2
g = f()
# two arguments version
raises(NameError, g.throw, NameError, "Error")
def test_throw2(self):
def f():
yield 2
g = f()
# single argument version
raises(NameError, g.throw, NameError("Error"))
def test_throw3(self):
def f():
try:
yield 1
yield 2
except:
yield 3
g = f()
assert g.next() == 1
assert g.throw(NameError("Error")) == 3
raises(StopIteration, g.next)
def test_throw4(self):
d = {}
exec """if 1:
def f():
try:
yield 1
v = (yield 2)
except:
yield 3
g = f()
""" in d
g = d['g']
assert g.next() == 1
assert g.next() == 2
assert g.throw(NameError("Error")) == 3
raises(StopIteration, g.next)
def test_throw5(self):
def f():
try:
yield 1
except:
x = 3
try:
yield x
except:
pass
g = f()
g.next()
# String exceptions are not allowed anymore
raises(TypeError, g.throw, "Error")
assert g.throw(Exception) == 3
raises(StopIteration, g.throw, Exception)
def test_throw6(self):
def f():
yield 2
g = f()
raises(NameError, g.throw, NameError, "Error", None)
def test_throw_fail(self):
def f():
yield 1
g = f()
raises(TypeError, g.throw, NameError("Error"), "error")
def test_throw_fail2(self):
def f():
yield 1
g = f()
raises(TypeError, g.throw, list())
def test_throw_fail3(self):
def f():
yield 1
g = f()
raises(TypeError, g.throw, NameError("Error"), None, "not tb object")
def test_throw_finishes_generator(self):
def f():
yield 1
g = f()
assert g.gi_frame is not None
raises(ValueError, g.throw, ValueError)
assert g.gi_frame is None
def test_throw_bug(self):
def f():
try:
x.throw(IndexError) # => "generator already executing"
except ValueError:
yield 1
x = f()
res = list(x)
assert res == [1]
def test_throw_on_finished_generator(self):
def f():
yield 1
g = f()
res = g.next()
assert res == 1
raises(StopIteration, g.next)
raises(NameError, g.throw, NameError)
def test_close(self):
def f():
yield 1
g = f()
assert g.close() is None
def test_close2(self):
def f():
try:
yield 1
except GeneratorExit:
raise StopIteration
g = f()
g.next()
assert g.close() is None
def test_close3(self):
def f():
try:
yield 1
except GeneratorExit:
raise NameError
g = f()
g.next()
raises(NameError, g.close)
def test_close_fail(self):
def f():
try:
yield 1
except GeneratorExit:
yield 2
g = f()
g.next()
raises(RuntimeError, g.close)
def test_close_on_collect(self):
## we need to exec it, else it won't run on python2.4
d = {}
exec """
def f():
try:
yield
finally:
f.x = 42
""".strip() in d
g = d['f']()
g.next()
del g
import gc
gc.collect()
assert d['f'].x == 42
def test_generator_raises_typeerror(self):
def f():
yield 1
g = f()
raises(TypeError, g.send) # one argument required
raises(TypeError, g.send, 1) # not started, must send None
def test_generator_explicit_stopiteration(self):
def f():
yield 1
raise StopIteration
g = f()
assert [x for x in g] == [1]
def test_generator_propagate_stopiteration(self):
def f():
it = iter([1])
while 1: yield it.next()
g = f()
assert [x for x in g] == [1]
def test_generator_restart(self):
def g():
i = me.next()
yield i
me = g()
raises(ValueError, me.next)
def test_generator_expression(self):
exec "res = sum(i*i for i in range(5))"
assert res == 30
def test_generator_expression_2(self):
d = {}
exec """
def f():
total = sum(i for i in [x for x in z])
return total, x
z = [1, 2, 7]
res = f()
""" in d
assert d['res'] == (10, 7)
def test_repr(self):
def myFunc():
yield 1
g = myFunc()
r = repr(g)
assert r.startswith("<generator object myFunc at 0x")
assert list(g) == [1]
assert repr(g) == r
def test_unpackiterable_gen(self):
g = (i*i for i in range(-5, 3))
assert set(g) == set([0, 1, 4, 9, 16, 25])
assert set(g) == set()
assert set(i for i in range(0)) == set()
def test_explicit_stop_iteration_unpackiterable(self):
def f():
yield 1
raise StopIteration
assert tuple(f()) == (1,)
def test_exception_is_cleared_by_yield(self):
def f():
try:
foobar
except NameError:
yield 5
raise # should raise "no active exception to re-raise"
gen = f()
next(gen) # --> 5
try:
next(gen)
except TypeError:
pass
def test_multiple_invalid_sends(self):
def mygen():
yield 42
g = mygen()
raises(TypeError, g.send, 2)
raises(TypeError, g.send, 2)
def test_should_not_inline(space):
from pypy.interpreter.generator import should_not_inline
w_co = space.appexec([], '''():
def g(x):
yield x + 5
return g.__code__
''')
assert should_not_inline(w_co) == False
w_co = space.appexec([], '''():
def g(x):
yield x + 5
yield x + 6
return g.__code__
''')
assert should_not_inline(w_co) == True
| 24.092879
| 77
| 0.467489
| 945
| 7,782
| 3.730159
| 0.167196
| 0.069504
| 0.056738
| 0.062695
| 0.495035
| 0.428936
| 0.371915
| 0.272057
| 0.230922
| 0.140426
| 0
| 0.023841
| 0.423285
| 7,782
| 322
| 78
| 24.167702
| 0.761586
| 0.035209
| 0
| 0.592857
| 0
| 0
| 0.104575
| 0
| 0
| 0
| 0
| 0
| 0.146429
| 0
| null | null | 0.007143
| 0.007143
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a42fad82c7026120ddbfdc222f7f45f5ba001fc
| 8,219
|
py
|
Python
|
seqenv/ontology.py
|
xapple/seqenv
|
a898b936b64b51340f439b05fc8909f4ed826247
|
[
"MIT"
] | 7
|
2016-12-02T09:28:00.000Z
|
2021-11-04T13:47:16.000Z
|
seqenv/ontology.py
|
xapple/seqenv
|
a898b936b64b51340f439b05fc8909f4ed826247
|
[
"MIT"
] | 7
|
2016-04-07T17:00:50.000Z
|
2018-05-14T12:16:06.000Z
|
seqenv/ontology.py
|
xapple/seqenv
|
a898b936b64b51340f439b05fc8909f4ed826247
|
[
"MIT"
] | 4
|
2016-03-15T16:41:12.000Z
|
2021-12-06T09:30:35.000Z
|
# Built-in modules #
# Internal modules #
from seqenv import module_dir
from seqenv.common.cache import property_cached
# Third party modules #
import sh, networkx
import matplotlib.colors
# A list of envos to help test this module #
test_envos = [
"ENVO:00000033",
"ENVO:00000043",
"ENVO:00000067",
"ENVO:00000143",
"ENVO:00000210",
"ENVO:00000215",
"ENVO:00000475",
]
################################################################################
class Ontology(object):
"""A object that gives you access to the graph (network with nodes and edges)
of the ENVO ontology from the OBO file's path.
Other libraries not used here that could be added:
* graphviz: http://graphviz.readthedocs.org/en/latest/api.html#digraph
* pydot: https://github.com/erocarrera/pydot
"""
def __init__(self, path=None):
"""Give the path to the OBO file"""
if path is None: path = module_dir + 'data_envo/envo.obo'
self.path = path
# --------------------------- In this section --------------------------- #
# orange_obo
# goatools
# orange_network
# pygraphviz
# networkx
@property_cached
def orange_obo(self):
"""The ontology loaded by the `orange` library.
* http://orange.biolab.si
* http://orange-bioinformatics.readthedocs.org/en/latest/
* https://github.com/biolab/orange-bio
* https://bitbucket.org/biolab/orange-bioinformatics
To install: $ pip install Orange-Bioinformatics
"""
from orangecontrib.bio.ontology import OBOOntology
return OBOOntology(self.path)
@property_cached
def goatools(self):
"""The network loaded into goatools' format.
* https://github.com/tanghaibao/goatools
To install: $ pip install goatools
"""
from goatools import obo_parser
return obo_parser.GODag(self.path)
@property_cached
def orange_network(self):
"""The network converted to `orange network` format.
Doesn't seem to work until they update PyPI.
* https://bitbucket.org/biolab/orange-network/
* http://orange-network.readthedocs.org/en/latest/
To install: $ pip install orange-network
"""
return self.orange_obo.to_network()
@property_cached
def pygraphviz(self):
"""The network converted to `pygraphviz` format.
* http://pygraphviz.github.io/documentation/pygraphviz-1.3rc1/
To install: $ pip install pygraphviz
"""
g = self.orange_obo.to_graphviz()
assert g.is_directed()
assert g.is_strict()
return g
@property_cached
def networkx(self):
"""The network converted to `networkx` format.
Seems like it looses directionality.
* https://networkx.readthedocs.org/en/stable/
To install: $ pip install networkx
"""
g = self.orange_obo.to_networkx()
assert networkx.is_directed_acyclic_graph(g)
return g
# --------------------------- In this section --------------------------- #
# test
# get_subgraph
# add_weights
# draw_to_pdf
# write_to_dot
def get_subgraph(self, envos=None):
"""Given a list of ENVO terms, get the subgraph that contains them all
and all their ancestors, up to the root.
Outputs a networkx DiGraph object."""
# Testing mode #
if envos is None: envos = test_envos
# All nodes #
nodes = set(n for e in envos for n in networkx.descendants(self.networkx, e))
nodes.update(envos)
nodes = list(nodes)
# Return #
return self.networkx.subgraph(nodes)
def add_weights(self, g, weights=None):
"""Input a networkx DiGraph object.
Outputs a pygraphviz AGraph object."""
g = networkx.nx_agraph.to_agraph(g)
if weights is None: return g
for envo in weights:
node = g.get_node(envo)
weight = weights[envo]
color = matplotlib.colors.rgb2hex((1.0, 1.0 - weight, 0.0))
node.attr['fillcolor'] = color
return g
def add_style(self, g):
"""Input a pygraphviz AGraph object.
Outputs a pygraphviz AGraph object."""
for node in g.nodes():
text = node.attr['name']
node.attr['label'] = text.replace(' ','\\n')
node.attr['name'] = ''
node.attr['shape'] = 'Mrecord'
node.attr['style'] = 'filled'
# To add the envo id to each node, uncomment:
#envo = node.attr['label']
#node.attr['label'] = "{<f0> %s|<f1> %s}" % (envo, text)
for edge in g.edges():
if edge.attr['label'] == 'located_in': edge.attr['color'] = 'turquoise4'
edge.attr['label'] = ''
return g
def write_to_dot(self, g, path):
"""Input a pygraphviz AGraph object."""
with open(path, 'w') as handle: handle.write(g.to_string())
def add_legend(self, path):
"""Input the path to a dot file."""
legend_txt = """
digraph {
rankdir=LR
node [shape=plaintext,fontname="helvetica"]
subgraph cluster_01 {
label = "NB: darker nodes weigh more";
key [label=<<table border="0" cellpadding="2" cellspacing="0" cellborder="0">
<tr><td align="right" port="i1">Is</td></tr>
<tr><td align="right" port="i2">Part</td></tr>
<tr><td align="right" port="i3">Located</td></tr>
</table>>];
key2 [label=<<table border="0" cellpadding="2" cellspacing="0" cellborder="0">
<tr><td port="i1">a</td></tr>
<tr><td port="i2">of</td></tr>
<tr><td port="i3">in</td></tr>
</table>>];
key:i1:e -> key2:i1:w [color=red];
key:i2:e -> key2:i2:w [color=blue];
key:i3:e -> key2:i3:w [color=turquoise4];
}"""
orig_txt = [line.rstrip('\n') for line in open(path, 'r') if line]
new_text = [line.lstrip() for line in legend_txt.split('\n') if line]
new_text = '\n'.join(new_text + orig_txt[2:])
with open(path, 'w') as handle: handle.write(new_text)
def draw_to_pdf(self, in_path, out_path):
"""Input a path to a dot file."""
sh.dot(in_path, '-Tpdf', '-o', out_path)
# --------------------------- In this section --------------------------- #
# descends
def descends(self, e, root):
"""Does the envo term `e` descend from the node `root`?
Returns True or False."""
# Auto conversion #
if isinstance(e, int): e = "ENVO:%08d" % e
if isinstance(root, int): root = "ENVO:%08d" % root
# Return #
return e in networkx.ancestors(self.networkx, root)
# --------------------------- In this section --------------------------- #
# print_test
# draw_with_networkx
# draw_with_pygraphviz
def print_test(self, e=None):
"""Just a method to see a bit how the different libraries work."""
# Test node #
if e is None: e = test_envos[0]
# Goa #
print "Goa: "
print self.goatools[e]
# Pygraphviz #
print "pygraphviz: "
print self.pygraphviz[e]
print self.pygraphviz.successors(e)
print self.pygraphviz.predecessors(e)
print self.pygraphviz.get_node(e)
# Networkx #
import networkx
print "networkx: "
print self.networkx[e]
print self.networkx.successors(e)
print self.networkx.predecessors(e)
print networkx.ancestors(self.networkx, e) # same as predecessors
print networkx.descendants(self.networkx, e) # almost as child_to_parents
def draw_with_networkx(self, g, path):
"""Input a networkx DiGraph object."""
from matplotlib import pyplot
networkx.draw(g)
pyplot.savefig(path)
pyplot.close()
def draw_with_pygraphviz(self, g, path):
"""Input a pygraphviz AGraph object."""
with open(path, 'w') as handle:
handle.write(g.to_string())
| 35.426724
| 90
| 0.565397
| 1,002
| 8,219
| 4.558882
| 0.257485
| 0.021016
| 0.018608
| 0.020797
| 0.20359
| 0.091068
| 0.075306
| 0.065674
| 0.058669
| 0.058669
| 0
| 0.01723
| 0.279718
| 8,219
| 231
| 91
| 35.580087
| 0.754392
| 0.100742
| 0
| 0.090164
| 0
| 0.016393
| 0.223596
| 0.03455
| 0
| 0
| 0
| 0
| 0.02459
| 0
| null | null | 0
| 0.065574
| null | null | 0.114754
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a554fb894afeaf01a54f7e6b34139ca26334475
| 862
|
py
|
Python
|
dbschema/revertDBinstall.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
dbschema/revertDBinstall.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | null | null | null |
dbschema/revertDBinstall.py
|
leschzinerlab/myami-3.2-freeHand
|
974b8a48245222de0d9cfb0f433533487ecce60d
|
[
"MIT"
] | 1
|
2019-09-05T20:58:37.000Z
|
2019-09-05T20:58:37.000Z
|
#!/usr/bin/env python
from sinedon import dbupgrade, dbconfig
import updatelib
project_dbupgrade = dbupgrade.DBUpgradeTools('projectdata', drop=True)
if __name__ == "__main__":
updatelib_inst = updatelib.UpdateLib(project_dbupgrade)
checkout_version = raw_input('Revert to checkout version, for example, 2.1 -->')
if checkout_version != 'trunk':
try:
map((lambda x:int(x)),checkout_version.split('.')[:2])
except:
print "valid versions are 'trunk', '2.1', or '2.1.2' etc"
raise
checkout_revision = int(raw_input('Revert to checkout revision, for example, 16500 -->'))
updatelib_inst.updateDatabaseVersion(checkout_version)
print "\033[35mVersion Updated in the database %s\033[0m" % checkout_version
updatelib_inst.updateDatabaseRevision(checkout_revision)
print "\033[35mRevision Updated in the database as %d\033[0m" % checkout_revision
| 41.047619
| 90
| 0.759861
| 114
| 862
| 5.54386
| 0.517544
| 0.142405
| 0.079114
| 0.050633
| 0.075949
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040789
| 0.118329
| 862
| 20
| 91
| 43.1
| 0.790789
| 0.023202
| 0
| 0
| 0
| 0
| 0.326992
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.117647
| null | null | 0.176471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a56c8065ff434f391ba424536df2984e5ef9221
| 3,396
|
py
|
Python
|
notebooks/classical_clustering.py
|
prise6/smart-iss-posts
|
fc913078e7fbe6343fd36ec6ca9852322247da5d
|
[
"MIT"
] | null | null | null |
notebooks/classical_clustering.py
|
prise6/smart-iss-posts
|
fc913078e7fbe6343fd36ec6ca9852322247da5d
|
[
"MIT"
] | 10
|
2020-01-28T23:15:20.000Z
|
2022-03-12T00:12:31.000Z
|
notebooks/classical_clustering.py
|
prise6/smart-iss-posts
|
fc913078e7fbe6343fd36ec6ca9852322247da5d
|
[
"MIT"
] | null | null | null |
#%% [markdown]
# # Clustering classique
#%% [markdown]
# ## import classique
import os
#%%
%load_ext autoreload
%autoreload 2
os.chdir('/home/jovyan/work')
#%% [markdown]
# ## Import iss
#%%
from iss.tools import Config
from iss.tools import Tools
from iss.models import SimpleConvAutoEncoder
from iss.clustering import ClassicalClustering
from iss.clustering import AdvancedClustering
from dotenv import find_dotenv, load_dotenv
import numpy as np
#%% [markdown]
# ## Chargement de la config
#%%
load_dotenv(find_dotenv())
cfg = Config(project_dir = os.getenv("PROJECT_DIR"), mode = os.getenv("MODE"))
#%% [markdown]
# ## Chargement du modèle
#%%
## charger le modèle
model_type = 'simple_conv'
cfg.get('models')[model_type]['model_name'] = 'model_colab'
model = SimpleConvAutoEncoder(cfg.get('models')[model_type])
#%% [markdown]
## Chargement des images
#%%
filenames = Tools.list_directory_filenames('data/processed/models/autoencoder/train/k/')
generator_imgs = Tools.generator_np_picture_from_filenames(filenames, target_size = (27, 48), batch = 496, nb_batch = 10)
#%%
pictures_id, pictures_preds = Tools.encoded_pictures_from_generator(generator_imgs, model)
#%%
intermediate_output = pictures_preds.reshape((pictures_preds.shape[0], 3*6*16))
#%% [markdown]
# ## ACP
# Réduction de la dimension
#%%
clustering = ClassicalClustering(cfg.get('clustering')['classical'], pictures_id, intermediate_output)
#%%
clustering.compute_pca()
#%% [markdown]
# ## Kmeans
# Premiers clusters
#%%
clustering.compute_kmeans()
clustering.compute_kmeans_centers()
#%% [markdown]
# ## CAH
# Seconds clusters
#%%
clustering.compute_cah()
clustering.compute_cah_labels()
#%% [markdown]
# ## Résultats
#%% [markdown]
# ### Clusters intermediaires
#%%
fig = plt.figure(1, figsize=(12, 7))
plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = clustering.kmeans_labels)
#%% [markdown]
# ### Clusters finaux
#%%
plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = clustering.final_labels)
#%% [markdown]
# ### Sauvegarde des modèles
#%%
clustering.save()
#%%
# clustering = ClassicalClustering(cfg.get('clustering')['classical'])
clustering.load()
#%% [markdown]
# ## Visualisation des clusters
#%%
def select_cluster(clustering, id_cluster):
return [os.path.join('data/processed/models/autoencoder/train/k/', res[0] + '.jpg') for res in clustering.get_zip_results() if res[2] == id_cluster]
#%%
from IPython.display import Image
#%%
for cl in range(0,19):
print("Cluster %s" % (cl))
res_tmp = select_cluster(clustering, cl)
print(len(res_tmp))
image_array = [Tools.read_np_picture(f, target_size = (54, 96)) for f in res_tmp[:100]]
# img = Tools.display_mosaic(image_array, nrow = 10)
# fig = plt.figure(1, figsize=(12, 7))
# plt.imshow(img, aspect = 'auto')
# plt.show()
#%% [markdown]
# ## Zoom sur le cluster 0
#%%
res_tmp = select_cluster(clustering, 1)
#%%
print(len(res_tmp))
image_array = [Tools.read_np_picture(f, target_size = (54, 96)) for f in res_tmp]
#%%
Tools.display_mosaic(image_array, nrow = 18)
#%%
col = [1 if l == 1 else 0 for l in clustering.kmeans_labels]
plt.scatter(clustering.pca_reduction[:, 0], clustering.pca_reduction[:, 1], c = col)
#%%
plt.scatter(clustering.pca_reduction[np.array(col) == 1, 0], clustering.pca_reduction[np.array(col) == 1, 1])
| 22.196078
| 152
| 0.707008
| 441
| 3,396
| 5.272109
| 0.342404
| 0.044731
| 0.075699
| 0.03957
| 0.343226
| 0.295914
| 0.190968
| 0.162581
| 0.141505
| 0.141505
| 0
| 0.019614
| 0.12927
| 3,396
| 152
| 153
| 22.342105
| 0.766655
| 0.232332
| 0
| 0.043478
| 0
| 0
| 0.076801
| 0.033426
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.195652
| null | null | 0.065217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a585a8c735b3266210fbee5416e533aa2feb0c6
| 8,847
|
py
|
Python
|
desktop/core/src/desktop/auth/views.py
|
bopopescu/hue-5
|
665c275d0c0570b1a4a34a293503cc72ec35695c
|
[
"Apache-2.0"
] | 1
|
2018-05-07T05:40:36.000Z
|
2018-05-07T05:40:36.000Z
|
desktop/core/src/desktop/auth/views.py
|
lockhart39/HueQualityAndIngestionApp
|
c75e55a43a8bdeb7aa0f5bf2101ec72b01dcac1c
|
[
"Apache-2.0"
] | null | null | null |
desktop/core/src/desktop/auth/views.py
|
lockhart39/HueQualityAndIngestionApp
|
c75e55a43a8bdeb7aa0f5bf2101ec72b01dcac1c
|
[
"Apache-2.0"
] | 1
|
2022-03-21T09:41:35.000Z
|
2022-03-21T09:41:35.000Z
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import oauth2 as oauth
except:
oauth = None
import cgi
import logging
import urllib
from datetime import datetime
from axes.decorators import watch_login
import django.contrib.auth.views
from django.core import urlresolvers
from django.core.exceptions import SuspiciousOperation
from django.contrib.auth import login, get_backends, authenticate
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from desktop.auth import forms as auth_forms
from desktop.lib.django_util import render
from desktop.lib.django_util import login_notrequired
from desktop.lib.django_util import JsonResponse
from desktop.log.access import access_warn, last_access_map
from desktop.conf import LDAP, OAUTH, DEMO_ENABLED
from hadoop.fs.exceptions import WebHdfsException
from useradmin.models import get_profile
from useradmin.views import ensure_home_directory, require_change_password
LOG = logging.getLogger(__name__)
def get_current_users():
"""Return dictionary of User objects and
a dictionary of the user's IP address and last access time"""
current_users = { }
for session in Session.objects.all():
try:
uid = session.get_decoded().get(django.contrib.auth.SESSION_KEY)
except SuspiciousOperation:
# If secret_key changed, this resolution won't work.
uid = None
if uid is not None:
try:
userobj = User.objects.get(pk=uid)
current_users[userobj] = last_access_map.get(userobj.username, { })
except User.DoesNotExist:
LOG.debug("User with id=%d does not exist" % uid)
return current_users
def first_login_ever():
backends = get_backends()
for backend in backends:
if hasattr(backend, 'is_first_login_ever') and backend.is_first_login_ever():
return True
return False
def get_backend_names():
return get_backends and [backend.__class__.__name__ for backend in get_backends()]
@login_notrequired
@watch_login
def dt_login(request, from_modal=False):
redirect_to = request.REQUEST.get('next', '/')
is_first_login_ever = first_login_ever()
backend_names = get_backend_names()
is_active_directory = 'LdapBackend' in backend_names and ( bool(LDAP.NT_DOMAIN.get()) or bool(LDAP.LDAP_SERVERS.get()) )
if is_active_directory:
UserCreationForm = auth_forms.LdapUserCreationForm
AuthenticationForm = auth_forms.LdapAuthenticationForm
else:
UserCreationForm = auth_forms.UserCreationForm
AuthenticationForm = auth_forms.AuthenticationForm
if request.method == 'POST':
request.audit = {
'operation': 'USER_LOGIN',
'username': request.POST.get('username')
}
# For first login, need to validate user info!
first_user_form = is_first_login_ever and UserCreationForm(data=request.POST) or None
first_user = first_user_form and first_user_form.is_valid()
if first_user or not is_first_login_ever:
auth_form = AuthenticationForm(data=request.POST)
if auth_form.is_valid():
# Must login by using the AuthenticationForm.
# It provides 'backends' on the User object.
user = auth_form.get_user()
userprofile = get_profile(user)
login(request, user)
if request.session.test_cookie_worked():
request.session.delete_test_cookie()
auto_create_home_backends = ['AllowAllBackend', 'LdapBackend', 'SpnegoDjangoBackend']
if is_first_login_ever or any(backend in backend_names for backend in auto_create_home_backends):
# Create home directory for first user.
try:
ensure_home_directory(request.fs, user.username)
except (IOError, WebHdfsException), e:
LOG.error(_('Could not create home directory.'), exc_info=e)
request.error(_('Could not create home directory.'))
if require_change_password(userprofile):
return HttpResponseRedirect(urlresolvers.reverse('useradmin.views.edit_user', kwargs={'username': user.username}))
userprofile.first_login = False
userprofile.last_activity = datetime.now()
userprofile.save()
msg = 'Successful login for user: %s' % user.username
request.audit['operationText'] = msg
access_warn(request, msg)
if from_modal or request.REQUEST.get('fromModal', 'false') == 'true':
return JsonResponse({'auth': True})
else:
return HttpResponseRedirect(redirect_to)
else:
request.audit['allowed'] = False
msg = 'Failed login for user: %s' % request.POST.get('username')
request.audit['operationText'] = msg
access_warn(request, msg)
if from_modal or request.REQUEST.get('fromModal', 'false') == 'true':
return JsonResponse({'auth': False})
else:
first_user_form = None
auth_form = AuthenticationForm()
if DEMO_ENABLED.get() and not 'admin' in request.REQUEST:
user = authenticate(username=request.user.username, password='HueRocks')
login(request, user)
ensure_home_directory(request.fs, user.username)
return HttpResponseRedirect(redirect_to)
if not from_modal:
request.session.set_test_cookie()
renderable_path = 'login.mako'
if from_modal:
renderable_path = 'login_modal.mako'
return render(renderable_path, request, {
'action': urlresolvers.reverse('desktop.auth.views.dt_login'),
'form': first_user_form or auth_form,
'next': redirect_to,
'first_login_ever': is_first_login_ever,
'login_errors': request.method == 'POST',
'backend_names': backend_names,
'active_directory': is_active_directory
})
def dt_logout(request, next_page=None):
"""Log out the user"""
username = request.user.get_username()
request.audit = {
'username': username,
'operation': 'USER_LOGOUT',
'operationText': 'Logged out user: %s' % username
}
backends = get_backends()
if backends:
for backend in backends:
if hasattr(backend, 'logout'):
response = backend.logout(request, next_page)
if response:
return response
return django.contrib.auth.views.logout(request, next_page)
def profile(request):
"""
Dumps JSON for user-profile information.
"""
return render(None, request, _profile_dict(request.user))
def _profile_dict(user):
return dict(
username=user.username,
first_name=user.first_name,
last_name=user.last_name,
last_login=str(user.last_login), # datetime object needs to be converted
email=user.email)
# OAuth is based on Twitter as example.
@login_notrequired
def oauth_login(request):
assert oauth is not None
consumer = oauth.Consumer(OAUTH.CONSUMER_KEY.get(), OAUTH.CONSUMER_SECRET.get())
client = oauth.Client(consumer)
resp, content = client.request(OAUTH.REQUEST_TOKEN_URL.get(), "POST", body=urllib.urlencode({
'oauth_callback': 'http://' + request.get_host() + '/login/oauth_authenticated/'
}))
if resp['status'] != '200':
raise Exception(_("Invalid response from OAuth provider: %s") % resp)
request.session['request_token'] = dict(cgi.parse_qsl(content))
url = "%s?oauth_token=%s" % (OAUTH.AUTHENTICATE_URL.get(), request.session['request_token']['oauth_token'])
return HttpResponseRedirect(url)
@login_notrequired
def oauth_authenticated(request):
consumer = oauth.Consumer(OAUTH.CONSUMER_KEY.get(), OAUTH.CONSUMER_SECRET.get())
token = oauth.Token(request.session['request_token']['oauth_token'], request.session['request_token']['oauth_token_secret'])
client = oauth.Client(consumer, token)
resp, content = client.request(OAUTH.ACCESS_TOKEN_URL.get(), "GET")
if resp['status'] != '200':
raise Exception(_("Invalid response from OAuth provider: %s") % resp)
access_token = dict(cgi.parse_qsl(content))
user = authenticate(access_token=access_token)
login(request, user)
redirect_to = request.REQUEST.get('next', '/')
return HttpResponseRedirect(redirect_to)
| 34.158301
| 126
| 0.722505
| 1,136
| 8,847
| 5.442782
| 0.242958
| 0.019408
| 0.022643
| 0.018114
| 0.19214
| 0.172893
| 0.123403
| 0.110464
| 0.096232
| 0.082161
| 0
| 0.001513
| 0.178026
| 8,847
| 258
| 127
| 34.290698
| 0.848735
| 0.119363
| 0
| 0.222857
| 0
| 0
| 0.11319
| 0.01041
| 0
| 0
| 0
| 0
| 0.005714
| 0
| null | null | 0.017143
| 0.131429
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
6a57cefd47f3150e0a9d0bbdcd3affcfe90d72c9
| 15,520
|
py
|
Python
|
legtool/tabs/servo_tab.py
|
jpieper/legtool
|
ab3946051bd16817b61d3073ce7be8bd27af90d0
|
[
"Apache-2.0"
] | 10
|
2015-09-23T19:28:06.000Z
|
2021-04-27T02:32:27.000Z
|
legtool/tabs/servo_tab.py
|
jpieper/legtool
|
ab3946051bd16817b61d3073ce7be8bd27af90d0
|
[
"Apache-2.0"
] | null | null | null |
legtool/tabs/servo_tab.py
|
jpieper/legtool
|
ab3946051bd16817b61d3073ce7be8bd27af90d0
|
[
"Apache-2.0"
] | 9
|
2015-10-16T07:26:18.000Z
|
2021-01-13T07:18:35.000Z
|
# Copyright 2014 Josh Pieper, jjp@pobox.com.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import trollius as asyncio
from trollius import Task, From, Return
import PySide.QtCore as QtCore
import PySide.QtGui as QtGui
from ..servo import selector
from .common import BoolContext
from . import gazebo_config_dialog
def spawn(callback):
def start():
Task(callback())
return start
class ServoTab(object):
def __init__(self, ui, status):
self.ui = ui
self.status = status
self.servo_controls = []
self.monitor_thread = None
self.servo_model = ''
self.servo_name_map = {}
self.ui.statusText.setText('not connected')
self.ui.connectButton.clicked.connect(
spawn(self.handle_connect_clicked))
self.ui.typeCombo.currentIndexChanged.connect(self.handle_type_change)
self.handle_type_change()
self.ui.configureGazeboButton.clicked.connect(
self.handle_configure_gazebo)
servo_layout = QtGui.QVBoxLayout()
servo_layout.setSpacing(0)
servo_layout.setContentsMargins(0, 0, 0, 0)
self.ui.scrollContents.setLayout(servo_layout)
self.ui.servoCountSpin.valueChanged.connect(self.handle_servo_count)
self.handle_servo_count()
self.ui.powerCombo.currentIndexChanged.connect(
spawn(self.handle_power))
self.ui.captureCurrentButton.clicked.connect(
spawn(self.handle_capture_current))
self.update_connected(False)
self.ui.addPoseButton.clicked.connect(self.handle_add_pose)
self.ui.removePoseButton.clicked.connect(self.handle_remove_pose)
self.ui.moveToPoseButton.clicked.connect(
spawn(self.handle_move_to_pose))
self.ui.updatePoseButton.clicked.connect(self.handle_update_pose)
self.ui.poseList.currentItemChanged.connect(
self.handle_poselist_current_changed)
self.controller = None
self.servo_update = BoolContext()
def resizeEvent(self, event):
pass
def poses(self):
result = []
for i in range(self.ui.poseList.count()):
result.append(self.ui.poseList.item(i).text())
return result
def pose(self, name):
for i in range(self.ui.poseList.count()):
if self.ui.poseList.item(i).text() == name:
return self.ui.poseList.item(i).data(QtCore.Qt.UserRole)
return dict([(i, 0.0) for i in range(self.ui.servoCountSpin.value())])
@asyncio.coroutine
def handle_connect_clicked(self):
val = self.ui.typeCombo.currentText().lower()
try:
self.controller = yield From(
selector.select_servo(
val,
serial_port=self.ui.serialPortCombo.currentText(),
model_name=self.servo_model,
servo_name_map=self.servo_name_map))
self.ui.statusText.setText('connected')
self.update_connected(True)
except Exception as e:
self.ui.statusText.setText('error: %s' % str(e))
self.update_connected(False)
def handle_type_change(self):
val = self.ui.typeCombo.currentText().lower()
self.ui.serialPortCombo.setEnabled(val == 'herkulex')
self.ui.configureGazeboButton.setEnabled(val == 'gazebo')
def handle_configure_gazebo(self):
servo_name_map = self.servo_name_map.copy()
for x in range(self.ui.servoCountSpin.value()):
if not x in servo_name_map:
servo_name_map[x] = ''
dialog = gazebo_config_dialog.GazeboConfigDialog(
self.servo_model, servo_name_map)
dialog.setModal(True)
result = dialog.exec_()
if result == QtGui.QDialog.Rejected:
return
self.servo_model = dialog.model_name()
self.servo_name_map = dialog.servo_name_map()
def handle_servo_count(self):
count = self.ui.servoCountSpin.value()
while len(self.servo_controls) > count:
# Remove the last one
last = self.servo_controls[-1]
widget = last['widget']
self.ui.scrollContents.layout().removeWidget(widget)
widget.deleteLater()
self.servo_controls = self.servo_controls[:-1]
while len(self.servo_controls) < count:
# Add a new one.
servo_id = len(self.servo_controls)
label = QtGui.QLabel()
label.setText('ID %d:' % servo_id)
slider = QtGui.QSlider(QtCore.Qt.Horizontal)
slider.setRange(-180, 180)
doublespin = QtGui.QDoubleSpinBox()
doublespin.setRange(-180, 180)
doublespin.setDecimals(1)
save = QtGui.QPushButton()
save.setText("Save")
move = QtGui.QPushButton()
move.setText("Move")
current = QtGui.QLabel()
current.setText('N/A')
current.setMinimumWidth(60)
widget = QtGui.QWidget()
layout = QtGui.QHBoxLayout(widget)
layout.addWidget(label)
layout.addWidget(slider)
layout.addWidget(doublespin)
layout.addWidget(save)
layout.addWidget(move)
layout.addWidget(current)
slider.valueChanged.connect(
functools.partial(self.handle_servo_slider, servo_id))
doublespin.valueChanged.connect(
functools.partial(self.handle_servo_spin, servo_id))
save.clicked.connect(
functools.partial(self.handle_servo_save, servo_id))
move.clicked.connect(
functools.partial(self.handle_servo_move, servo_id))
self.ui.scrollContents.layout().addWidget(widget)
self.servo_controls.append({
'widget': widget,
'label': label,
'slider': slider,
'doublespin': doublespin,
'save': save,
'move': move,
'current': current})
@asyncio.coroutine
def handle_power(self):
text = self.ui.powerCombo.currentText().lower()
value = None
if text == 'free':
value = selector.POWER_FREE
elif text == 'brake':
value = selector.POWER_BRAKE
elif text == 'drive':
value = selector.POWER_ENABLE
else:
raise NotImplementedError()
yield From(self.controller.enable_power(value))
def update_connected(self, value):
self.ui.controlGroup.setEnabled(value)
self.ui.posesGroup.setEnabled(value)
if self.monitor_thread is not None:
self.monitor_thread.cancel()
self.monitor_thread = None
if value:
self.handle_power()
self.monitor_thread = Task(self.monitor_status())
@asyncio.coroutine
def monitor_status(self):
voltages = {}
temperatures = {}
ident = 0
while True:
if (self.controller is not None and
hasattr(self.controller, 'get_voltage')):
try:
ident = (ident + 1) % len(self.servo_controls)
this_voltage = yield From(
self.controller.get_voltage([ident]))
voltages.update(this_voltage)
# Get all temperatures.
this_temp = yield From(
self.controller.get_temperature([ident]))
temperatures.update(this_temp)
def non_None(value):
return [x for x in value if x is not None]
message = "Servo status: "
if len(non_None(voltages.values())):
message += "%.1f/%.1fV" % (
min(non_None(voltages.values())),
max(non_None(voltages.values())))
if len(non_None(temperatures.values())):
message += " %.1f/%.1fC" % (
min(non_None(temperatures.values())),
max(non_None(temperatures.values())))
self.status.showMessage(message, 10000)
except Exception as e:
traceback.print_exc()
print "Error reading servo:", type(e), e
yield From(asyncio.sleep(2.0))
@asyncio.coroutine
def set_single_pose(self, servo_id, value):
yield From(
self.controller.set_single_pose(servo_id, value, pose_time=0.2))
def handle_servo_slider(self, servo_id, event):
if self.servo_update.value:
return
with self.servo_update:
control = self.servo_controls[servo_id]
value = control['slider'].value()
control['doublespin'].setValue(value)
Task(self.set_single_pose(servo_id, value))
def handle_servo_spin(self, servo_id, event):
if self.servo_update.value:
return
with self.servo_update:
control = self.servo_controls[servo_id]
value = control['doublespin'].value()
control['slider'].setSliderPosition(int(value))
Task(self.set_single_pose(servo_id, value))
def handle_servo_save(self, servo_id):
if self.ui.poseList.currentRow() < 0:
return
current_data = self.ui.poseList.currentItem().data(
QtCore.Qt.UserRole)
current_data[servo_id] = (
self.servo_controls[servo_id]['doublespin'].value())
self.ui.poseList.currentItem().setData(
QtCore.Qt.UserRole, current_data)
self.handle_poselist_current_changed(None, None)
def handle_servo_move(self, servo_id):
if self.ui.poseList.currentRow() < 0:
return
data = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole)
self.servo_controls[servo_id]['doublespin'].setValue(data[servo_id])
@asyncio.coroutine
def handle_capture_current(self):
with self.servo_update:
results = yield From(
self.controller.get_pose(range(len(self.servo_controls))))
for ident, angle in results.iteritems():
if angle is None:
continue
control = self.servo_controls[ident]
control['slider'].setSliderPosition(int(angle))
control['doublespin'].setValue(angle)
def add_list_pose(self, name):
self.ui.poseList.addItem(name)
item = self.ui.poseList.item(self.ui.poseList.count() - 1)
item.setFlags(QtCore.Qt.ItemIsEnabled |
QtCore.Qt.ItemIsSelectable |
QtCore.Qt.ItemIsEditable |
QtCore.Qt.ItemIsSelectable)
return item
def get_new_pose_name(self):
poses = set([self.ui.poseList.item(x).text()
for x in range(self.ui.poseList.count())])
count = 0
while True:
name = 'new_pose_%d' % count
if name not in poses:
return name
count += 1
def generate_pose_data(self):
return dict(
[ (i, control['doublespin'].value())
for i, control in enumerate(self.servo_controls) ])
def handle_add_pose(self):
pose_name = self.get_new_pose_name()
item = self.add_list_pose(pose_name)
item.setData(QtCore.Qt.UserRole, self.generate_pose_data())
self.ui.poseList.editItem(item)
def handle_remove_pose(self):
if self.ui.poseList.currentRow() < 0:
return
pose_name = self.ui.poseList.currentItem().text()
del self.poses[pose_name]
self.ui.poseList.takeItem(self.ui.poseList.currentRow())
@asyncio.coroutine
def handle_move_to_pose(self):
if self.ui.poseList.currentRow() < 0:
return
values = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole)
yield From(self.controller.set_pose(values, pose_time=1.0))
with self.servo_update:
for ident, angle_deg in values.iteritems():
control = self.servo_controls[ident]
control['slider'].setSliderPosition(int(angle_deg))
control['doublespin'].setValue(angle_deg)
def handle_update_pose(self):
if self.ui.poseList.currentRow() < 0:
return
self.ui.poseList.currentItem().setData(
QtCore.Qt.UserRole, self.generate_pose_data())
self.handle_poselist_current_changed(None, None)
def handle_poselist_current_changed(self, current, previous):
if self.ui.poseList.currentRow() < 0:
return
data = self.ui.poseList.currentItem().data(QtCore.Qt.UserRole)
for i, control in enumerate(self.servo_controls):
control['current'].setText('%.1f' % data[i])
def read_settings(self, config):
if not config.has_section('servo'):
return
self.ui.typeCombo.setCurrentIndex(config.getint('servo', 'type'))
self.ui.serialPortCombo.setEditText(config.get('servo', 'port'))
self.ui.servoCountSpin.setValue(config.getint('servo', 'count'))
self.servo_model = config.get('servo', 'model')
if config.has_section('servo.names'):
self.servo_name_map = {}
for name, value in config.items('servo.names'):
self.servo_name_map[int(name)] = value
if config.has_section('servo.poses'):
for name, value in config.items('servo.poses'):
this_data = {}
for element in value.split(','):
ident, angle_deg = element.split('=')
this_data[int(ident)] = float(angle_deg)
item = self.add_list_pose(name)
item.setData(QtCore.Qt.UserRole, this_data)
def write_settings(self, config):
config.add_section('servo')
config.add_section('servo.poses')
config.add_section('servo.names')
config.set('servo', 'type', self.ui.typeCombo.currentIndex())
config.set('servo', 'port', self.ui.serialPortCombo.currentText())
config.set('servo', 'count', self.ui.servoCountSpin.value())
config.set('servo', 'model', self.servo_model)
for key, value in self.servo_name_map.iteritems():
config.set('servo.names', str(key), value)
for row in range(self.ui.poseList.count()):
item = self.ui.poseList.item(row)
pose_name = item.text()
values = item.data(QtCore.Qt.UserRole)
config.set(
'servo.poses', pose_name,
','.join(['%d=%.2f' % (ident, angle_deg)
for ident, angle_deg in values.iteritems()]))
| 35.514874
| 78
| 0.593814
| 1,727
| 15,520
| 5.188188
| 0.169658
| 0.042857
| 0.045313
| 0.014286
| 0.333259
| 0.252232
| 0.206027
| 0.152567
| 0.11529
| 0.086496
| 0
| 0.005517
| 0.299227
| 15,520
| 436
| 79
| 35.59633
| 0.818316
| 0.040013
| 0
| 0.168675
| 0
| 0
| 0.032926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.003012
| 0.024096
| null | null | 0.006024
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
6a8f3e25920be24fb569cc55eff90ae879efa647
| 73,328
|
py
|
Python
|
ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
|
cas-packone/ambari-chs
|
68033fbd4b810b6642853f2ad9128cbbd4e0cb7b
|
[
"Apache-2.0"
] | 3
|
2019-06-20T11:49:36.000Z
|
2020-12-11T10:44:29.000Z
|
ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
|
cas-packone/ambari-chs
|
68033fbd4b810b6642853f2ad9128cbbd4e0cb7b
|
[
"Apache-2.0"
] | null | null | null |
ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py
|
cas-packone/ambari-chs
|
68033fbd4b810b6642853f2ad9128cbbd4e0cb7b
|
[
"Apache-2.0"
] | 1
|
2019-03-20T08:36:17.000Z
|
2019-03-20T08:36:17.000Z
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import socket
from unittest import TestCase
from mock.mock import patch, MagicMock
class TestHDP206StackAdvisor(TestCase):
def setUp(self):
import imp
import os
testDirectory = os.path.dirname(os.path.abspath(__file__))
stackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/stack_advisor.py')
hdp206StackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
hdp206StackAdvisorClassName = 'HDP206StackAdvisor'
with open(stackAdvisorPath, 'rb') as fp:
stack_advisor = imp.load_module( 'stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE) )
with open(hdp206StackAdvisorPath, 'rb') as fp:
self.stack_advisor_impl = imp.load_module('stack_advisor_impl', fp, hdp206StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
clazz = getattr(self.stack_advisor_impl, hdp206StackAdvisorClassName)
self.stackAdvisor = clazz()
self.maxDiff = None
# substitute method in the instance
self.get_system_min_uid_real = self.stackAdvisor.get_system_min_uid
self.stackAdvisor.get_system_min_uid = self.get_system_min_uid_magic
@patch('__builtin__.open')
@patch('os.path.exists')
def get_system_min_uid_magic(self, exists_mock, open_mock):
class MagicFile(object):
def read(self):
return """
#test line UID_MIN 200
UID_MIN 500
"""
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __enter__(self):
return self
exists_mock.return_value = True
open_mock.return_value = MagicFile()
return self.get_system_min_uid_real()
def test_recommendationCardinalityALL(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [{"name": "GANGLIA_MONITOR", "cardinality": "ALL", "category": "SLAVE", "is_master": False}]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.recommendComponentLayout(services, hosts)
expectedComponentsHostsMap = {
"GANGLIA_MONITOR": ["host1", "host2"]
}
self.assertHostLayout(expectedComponentsHostsMap, result)
def test_recommendOnAllHosts(self):
""" Recommend on all hosts for cardinality ALL even if the component has been installed in the cluster before """
servicesInfo = [
{
"name": "GANGLIA",
"components": [{"name": "GANGLIA_MONITOR", "cardinality": "ALL", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]}]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.recommendComponentLayout(services, hosts)
expectedComponentsHostsMap = {
"GANGLIA_MONITOR": ["host1", "host2"]
}
self.assertHostLayout(expectedComponentsHostsMap, result)
def test_recommendationIsNotPreferableOnAmbariServer(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [{"name": "GANGLIA_SERVER", "cardinality": "ALL", "category": "MASTER", "is_master": True}]
}
]
services = self.prepareServices(servicesInfo)
localhost = socket.getfqdn()
hosts = self.prepareHosts([localhost, "host2"])
result = self.stackAdvisor.recommendComponentLayout(services, hosts)
expectedComponentsHostsMap = {
"GANGLIA_SERVER": ["host2"]
}
self.assertHostLayout(expectedComponentsHostsMap, result)
def test_validationNamenodeAndSecondaryNamenode2Hosts_noMessagesForSameHost(self):
servicesInfo = [
{
"name": "HDFS",
"components": [
{"name": "NAMENODE", "cardinality": "1-2", "category": "MASTER", "is_master": True, "hostnames": ["host1"]},
{"name": "SECONDARY_NAMENODE", "cardinality": "1", "category": "MASTER", "is_master": True, "hostnames": ["host1"]}]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Host is not used", "level": "ERROR", "host": "host2"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinalityALL(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "display_name": "Ganglia Monitor", "cardinality": "ALL", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]},
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "1-2", "category": "MASTER", "is_master": True, "hostnames": ["host2", "host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Ganglia Monitor component should be installed on all hosts in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinalityExactAmount(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "display_name": "Ganglia Monitor", "cardinality": "2", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]},
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "2", "category": "MASTER", "is_master": True, "hostnames": ["host2", "host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Exactly 2 Ganglia Monitor components should be installed in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinalityAtLeast(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "display_name": "Ganglia Monitor", "cardinality": "1+", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]},
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "3+", "category": "MASTER", "is_master": True, "hostnames": ["host2", "host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "At least 3 Ganglia Server components should be installed in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationWarnMessagesIfLessThanDefault(self):
servicesInfo = [
{
"name": "YARN",
"components": []
}
]
services = self.prepareServices(servicesInfo)
services["configurations"] = {"yarn-site":{"properties":{"yarn.nodemanager.resource.memory-mb": "0",
"yarn.scheduler.minimum-allocation-mb": "str"}}}
hosts = self.prepareHosts([])
result = self.stackAdvisor.validateConfigurations(services, hosts)
expectedItems = [
{"message": "Value is less than the recommended default of 512", "level": "WARN"},
{'message': 'Value should be set for yarn.nodemanager.linux-container-executor.group', 'level': 'ERROR'},
{"message": "Value should be integer", "level": "ERROR"},
{"message": "Value should be set", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationYARNServicecheckQueueName(self):
servicesInfo = [
{
"name": "YARN",
"components": []
}
]
services = self.prepareServices(servicesInfo)
services["configurations"] = {"yarn-env":{"properties":{"service_check.queue.name": "default"}},
"capacity-scheduler":{"properties":{"capacity-scheduler": "yarn.scheduler.capacity.root.queues=ndfqueue\n"}}}
hosts = self.prepareHosts([])
result = self.stackAdvisor.validateConfigurations(services, hosts)
expectedItems = [
{'message': 'Queue is not exist, or not corresponds to existing YARN leaf queue', 'level': 'ERROR'}
]
self.assertValidationResult(expectedItems, result)
services["configurations"]["yarn-env"]["properties"]["service_check.queue.name"] = "ndfqueue"
expectedItems = []
result = self.stackAdvisor.validateConfigurations(services, hosts)
self.assertValidationResult(expectedItems, result)
def test_validationMinMax(self):
configurations = {
"mapred-site": {
"properties": {
"mapreduce.task.io.sort.mb": "4096",
"some_float_value": "0.5",
"no_min_or_max_attribute_property": "STRING_VALUE"
}
}
}
recommendedDefaults = {
"mapred-site": {
"properties": {
"mapreduce.task.io.sort.mb": "2047",
"some_float_value": "0.8",
"no_min_or_max_attribute_property": "STRING_VALUE"
},
"property_attributes": {
'mapreduce.task.io.sort.mb': {'maximum': '2047'},
'some_float_value': {'minimum': '0.8'}
}
}
}
items = []
self.stackAdvisor.validateMinMax(items, recommendedDefaults, configurations)
expectedItems = [
{
'message': 'Value is greater than the recommended maximum of 2047 ',
'level': 'WARN',
'config-type': 'mapred-site',
'config-name': 'mapreduce.task.io.sort.mb',
'type': 'configuration'
},
{
'message': 'Value is less than the recommended minimum of 0.8 ',
'level': 'WARN',
'config-type': 'mapred-site',
'config-name': 'some_float_value',
'type': 'configuration'
}
]
self.assertEquals(expectedItems, items)
def test_validationHostIsNotUsedForNonValuableComponent(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "cardinality": "ALL", "category": "SLAVE", "is_master": False, "hostnames": ["host1", "host2"]},
{"name": "GANGLIA_SERVER", "cardinality": "1", "category": "MASTER", "is_master": True, "hostnames": ["host2"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Host is not used", "host": "host1", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinality01TwoHostsAssigned(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "0-1", "category": "MASTER", "is_master": True, "hostnames": ["host1", "host2"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Between 0 and 1 Ganglia Server components should be installed in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationHostIsNotUsed(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_SERVER", "cardinality": "1", "category": "MASTER", "is_master": True, "hostnames": ["host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Host is not used", "host": "host2", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_getConfigurationClusterSummary_withHBaseAnd6gbRam(self):
servicesList = ["HBASE"]
components = []
hosts = {
"items" : [
{
"Hosts" : {
"cpu_count" : 8,
"total_mem" : 6291456,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
]
}
expected = {
"hBaseInstalled": True,
"components": components,
"cpu": 8,
"disk": 8,
"ram": 6,
"reservedRam": 2,
"hbaseRam": 1,
"minContainerSize": 512,
"totalAvailableRam": 3072,
"containers": 6,
"ramPerContainer": 512,
"mapMemory": 512,
"reduceMemory": 512,
"amMemory": 512,
"referenceHost": hosts["items"][0]["Hosts"]
}
# Test - Cluster data with 1 host
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(result, expected)
# Test - Cluster data with 2 hosts - pick minimum memory
servicesList.append("YARN")
services = services = {"services":
[{"StackServices":
{"service_name" : "YARN",
"service_version" : "2.6.0.2.2"
},
"components":[
{
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1+",
"component_category":"SLAVE",
"component_name":"NODEMANAGER",
"custom_commands":[
],
"display_name":"NodeManager",
"is_client":"false",
"is_master":"false",
"service_name":"YARN",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1",
"host2"
]
},
"dependencies":[
]
}
],
}],
"configurations": {}
}
hosts["items"][0]["Hosts"]["host_name"] = "host1"
hosts["items"].append({
"Hosts": {
"cpu_count" : 4,
"total_mem" : 500000,
"host_name" : "host2",
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
})
expected["referenceHost"] = hosts["items"][1]["Hosts"]
expected["referenceNodeManagerHost"] = hosts["items"][1]["Hosts"]
expected["amMemory"] = 170.66666666666666
expected["containers"] = 3.0
expected["cpu"] = 4
expected["totalAvailableRam"] = 512
expected["mapMemory"] = 170
expected["minContainerSize"] = 256
expected["reduceMemory"] = 170.66666666666666
expected["ram"] = 0
expected["ramPerContainer"] = 170.66666666666666
expected["reservedRam"] = 1
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, services)
self.assertEquals(result, expected)
def test_getConfigurationClusterSummary_withHBaseAnd48gbRam(self):
servicesList = ["HBASE"]
components = []
hosts = {
"items" : [
{
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
]
}
expected = {
"hBaseInstalled": True,
"components": components,
"cpu": 6,
"disk": 6,
"ram": 48,
"reservedRam": 6,
"hbaseRam": 8,
"minContainerSize": 2048,
"totalAvailableRam": 34816,
"containers": 11,
"ramPerContainer": 3072,
"mapMemory": 3072,
"reduceMemory": 3072,
"amMemory": 3072,
"referenceHost": hosts["items"][0]["Hosts"]
}
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(result, expected)
def test_recommendStormConfigurations(self):
# no AMS
configurations = {}
services = {
"services": [
],
"configurations": configurations
}
expected = {
"storm-site": {
"properties": {
}
},
}
self.stackAdvisor.recommendStormConfigurations(configurations, None, services, None)
self.assertEquals(configurations, expected)
# with AMS
configurations = {}
services = {
"services": [
{
"StackServices": {
"service_name": "AMBARI_METRICS"
}
}
],
"configurations": configurations
}
expected = {
"storm-site": {
"properties": {
"metrics.reporter.register": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"
}
},
}
self.stackAdvisor.recommendStormConfigurations(configurations, None, services, None)
self.assertEquals(configurations, expected)
def test_recommendYARNConfigurations(self):
configurations = {}
services = {"configurations": configurations, "services": []}
clusterData = {
"containers" : 5,
"ramPerContainer": 256
}
expected = {
"yarn-env": {
"properties": {
"min_user_id": "500",
'service_check.queue.name': 'default'
}
},
"yarn-site": {
"properties": {
"yarn.nodemanager.linux-container-executor.group": "hadoop",
"yarn.nodemanager.resource.memory-mb": "1280",
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "1280"
}
}
}
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
def test_recommendMapReduce2Configurations_mapMemoryLessThan2560(self):
configurations = {}
clusterData = {
"mapMemory": 567,
"reduceMemory": 345.6666666666666,
"amMemory": 123.54
}
expected = {
"mapred-site": {
"properties": {
'mapreduce.job.queuename': 'default',
"yarn.app.mapreduce.am.resource.mb": "123",
"yarn.app.mapreduce.am.command-opts": "-Xmx99m",
"mapreduce.map.memory.mb": "567",
"mapreduce.reduce.memory.mb": "345",
"mapreduce.map.java.opts": "-Xmx454m",
"mapreduce.reduce.java.opts": "-Xmx277m",
"mapreduce.task.io.sort.mb": "227"
}
}
}
self.stackAdvisor.recommendMapReduce2Configurations(configurations, clusterData, None, None)
self.assertEquals(configurations, expected)
def test_getConfigurationClusterSummary_noHostsWithoutHBase(self):
servicesList = []
components = []
hosts = {
"items" : []
}
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
expected = {
"hBaseInstalled": False,
"components": components,
"cpu": 0,
"disk": 0,
"ram": 0,
"reservedRam": 1,
"hbaseRam": 1,
"minContainerSize": 256,
"totalAvailableRam": 512,
"containers": 3,
"ramPerContainer": 170.66666666666666,
"mapMemory": 170,
"reduceMemory": 170.66666666666666,
"amMemory": 170.66666666666666
}
self.assertEquals(result, expected)
def prepareHosts(self, hostsNames):
hosts = { "items": [] }
for hostName in hostsNames:
nextHost = {"Hosts":{"host_name" : hostName}}
hosts["items"].append(nextHost)
return hosts
def prepareServices(self, servicesInfo):
services = { "Versions" : { "stack_name" : "HDP", "stack_version" : "2.0.6" } }
services["services"] = []
for serviceInfo in servicesInfo:
nextService = {"StackServices":{"service_name" : serviceInfo["name"]}}
nextService["components"] = []
for component in serviceInfo["components"]:
nextComponent = {
"StackServiceComponents": {
"component_name": component["name"],
"cardinality": component["cardinality"],
"component_category": component["category"],
"is_master": component["is_master"]
}
}
try:
nextComponent["StackServiceComponents"]["hostnames"] = component["hostnames"]
except KeyError:
nextComponent["StackServiceComponents"]["hostnames"] = []
try:
nextComponent["StackServiceComponents"]["display_name"] = component["display_name"]
except KeyError:
nextComponent["StackServiceComponents"]["display_name"] = component["name"]
nextService["components"].append(nextComponent)
services["services"].append(nextService)
return services
def assertHostLayout(self, componentsHostsMap, recommendation):
blueprintMapping = recommendation["recommendations"]["blueprint"]["host_groups"]
bindings = recommendation["recommendations"]["blueprint_cluster_binding"]["host_groups"]
actualComponentHostsMap = {}
for hostGroup in blueprintMapping:
hostGroupName = hostGroup["name"]
hostsInfos = [binding["hosts"] for binding in bindings if binding["name"] == hostGroupName][0]
hosts = [info["fqdn"] for info in hostsInfos]
for component in hostGroup["components"]:
componentName = component["name"]
try:
actualComponentHostsMap[componentName]
except KeyError, err:
actualComponentHostsMap[componentName] = []
for host in hosts:
if host not in actualComponentHostsMap[componentName]:
actualComponentHostsMap[componentName].append(host)
for componentName in componentsHostsMap.keys():
expectedHosts = componentsHostsMap[componentName]
actualHosts = actualComponentHostsMap[componentName]
self.checkEqual(expectedHosts, actualHosts)
def checkEqual(self, l1, l2):
if not len(l1) == len(l2) or not sorted(l1) == sorted(l2):
raise AssertionError("list1={0}, list2={1}".format(l1, l2))
def assertValidationResult(self, expectedItems, result):
actualItems = []
for item in result["items"]:
next = {"message": item["message"], "level": item["level"]}
try:
next["host"] = item["host"]
except KeyError, err:
pass
actualItems.append(next)
self.checkEqual(expectedItems, actualItems)
def test_recommendHbaseConfigurations(self):
servicesList = ["HBASE"]
configurations = {}
components = []
host_item = {
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
hosts = {
"items" : [host_item for i in range(1, 300)]
}
services = {
"services" : [
],
"configurations": {
"hbase-site": {
"properties": {
"hbase.superuser": "hbase"
}
},
"hbase-env": {
"properties": {
"hbase_user": "hbase123"
}
}
}
}
expected = {
'hbase-site': {
'properties': {
'hbase.superuser': 'hbase123'
}
},
"hbase-env": {
"properties": {
"hbase_master_heapsize": "4096",
"hbase_regionserver_heapsize": "8192",
}
}
}
clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(clusterData['hbaseRam'], 8)
self.stackAdvisor.recommendHbaseConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_recommendRangerConfigurations(self):
clusterData = {}
# Recommend for not existing DB_FLAVOR and http enabled, HDP-2.3
services = {
"Versions" : {
"stack_version" : "2.3",
},
"services": [
{
"StackServices": {
"service_name": "RANGER",
"service_version": "0.5.0"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_ADMIN",
"hostnames": ["host1"]
}
}
]
},
{
"StackServices": {
"service_name": "HDFS"
},
"components": [
{
"StackServiceComponents": {
"component_name": "NAMENODE",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": {
"admin-properties": {
"properties": {
"DB_FLAVOR": "NOT_EXISTING",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
}
}
}
}
expected = {
"admin-properties": {
"properties": {
"policymgr_external_url": "http://host1:7777"
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test for not existing DB_FLAVOR and http enabled, HDP-2.3")
# Recommend for DB_FLAVOR POSTGRES and https enabled, HDP-2.3
configurations = {
"admin-properties": {
"properties": {
"DB_FLAVOR": "POSTGRES",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.https.port": "7777",
"ranger.service.http.enabled": "false",
}
}
}
services['configurations'] = configurations
expected = {
"admin-properties": {
"properties": {
"policymgr_external_url": "https://host1:7777"
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test for DB_FLAVOR POSTGRES and https enabled, HDP-2.3")
# Recommend for DB_FLAVOR ORACLE and https enabled, HDP-2.2
configurations = {
"admin-properties": {
"properties": {
"DB_FLAVOR": "ORACLE",
}
},
"ranger-site": {
"properties": {
"http.enabled": "false",
"https.service.port": "8888",
}
}
}
services['configurations'] = configurations
expected = {
"admin-properties": {
"properties": {
"policymgr_external_url": "https://host1:8888"
}
},
"ranger-env": {"properties": {}}
}
recommendedConfigurations = {}
services['services'][0]['StackServices']['service_version'] = "0.4.0"
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test for DB_FLAVOR ORACLE and https enabled, HDP-2.2")
# Test Recommend LDAP values
services["ambari-server-properties"] = {
"ambari.ldap.isConfigured" : "true",
"authentication.ldap.bindAnonymously" : "false",
"authentication.ldap.baseDn" : "dc=apache,dc=org",
"authentication.ldap.groupNamingAttr" : "cn",
"authentication.ldap.primaryUrl" : "c6403.ambari.apache.org:636",
"authentication.ldap.userObjectClass" : "posixAccount",
"authentication.ldap.secondaryUrl" : "c6403.ambari.apache.org:636",
"authentication.ldap.usernameAttribute" : "uid",
"authentication.ldap.dnAttribute" : "dn",
"authentication.ldap.useSSL" : "true",
"authentication.ldap.managerPassword" : "/etc/ambari-server/conf/ldap-password.dat",
"authentication.ldap.groupMembershipAttr" : "memberUid",
"authentication.ldap.groupObjectClass" : "posixGroup",
"authentication.ldap.managerDn" : "uid=hdfs,ou=people,ou=dev,dc=apache,dc=org"
}
services["configurations"] = {}
expected = {
'admin-properties': {
'properties': {
'policymgr_external_url': 'http://host1:6080',
}
},
'ranger-env': {'properties': {}},
'usersync-properties': {
'properties': {
'SYNC_LDAP_URL': 'ldaps://c6403.ambari.apache.org:636',
'SYNC_LDAP_BIND_DN': 'uid=hdfs,ou=people,ou=dev,dc=apache,dc=org',
'SYNC_LDAP_USER_OBJECT_CLASS': 'posixAccount',
'SYNC_LDAP_USER_NAME_ATTRIBUTE': 'uid'
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test Recommend LDAP values")
# Test Ranger Audit properties
del services["ambari-server-properties"]
services["configurations"] = {
"core-site": {
"properties": {
"fs.defaultFS": "hdfs://host1:8080",
}
},
"ranger-env": {
"properties": {
"xasecure.audit.destination.db": "true",
"xasecure.audit.destination.hdfs":"false",
"xasecure.audit.destination.hdfs.dir":"hdfs://localhost:8020/ranger/audit/%app-type%/%time:yyyyMMdd%"
}
},
"ranger-hdfs-plugin-properties": {
"properties": {}
}
}
expected = {
'admin-properties': {
'properties': {
'policymgr_external_url': 'http://host1:6080'
}
},
'ranger-hdfs-plugin-properties': {
'properties': {
'XAAUDIT.HDFS.IS_ENABLED': 'false',
'XAAUDIT.HDFS.DESTINATION_DIRECTORY': 'hdfs://host1:8080/ranger/audit/%app-type%/%time:yyyyMMdd%',
'XAAUDIT.DB.IS_ENABLED': 'true'
}
},
'ranger-env': {
'properties': {
'xasecure.audit.destination.hdfs.dir': 'hdfs://host1:8080/ranger/audit/%app-type%/%time:yyyyMMdd%'
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test Ranger Audit properties")
def test_recommendHDFSConfigurations(self):
configurations = {
"hadoop-env": {
"properties": {
"hdfs_user": "hdfs",
"proxyuser_group": "users"
}
},
"hive-env": {
"properties": {
"webhcat_user": "webhcat",
"hive_user": "hive"
}
},
"oozie-env": {
"properties": {
"oozie_user": "oozie"
}
},
"falcon-env": {
"properties": {
"falcon_user": "falcon"
}
}
}
hosts = {
"items": [
{
"href": "/api/v1/hosts/host1",
"Hosts": {
"cpu_count": 1,
"host_name": "c6401.ambari.apache.org",
"os_arch": "x86_64",
"os_type": "centos6",
"ph_cpu_count": 1,
"public_host_name": "c6401.ambari.apache.org",
"rack_info": "/default-rack",
"total_mem": 2097152,
"disk_info": [{
"size": '8',
"mountpoint": "/"
}]
}
},
{
"href": "/api/v1/hosts/host2",
"Hosts": {
"cpu_count": 1,
"host_name": "c6402.ambari.apache.org",
"os_arch": "x86_64",
"os_type": "centos6",
"ph_cpu_count": 1,
"public_host_name": "c6402.ambari.apache.org",
"rack_info": "/default-rack",
"total_mem": 1048576,
"disk_info": [{
"size": '8',
"mountpoint": "/"
}]
}
},
]}
services = {
"services": [
{
"StackServices": {
"service_name": "HDFS"
}, "components": []
},
{
"StackServices": {
"service_name": "FALCON"
}, "components": []
},
{
"StackServices": {
"service_name": "HIVE"
}, "components": [{
"href": "/api/v1/stacks/HDP/versions/2.0.6/services/HIVE/components/HIVE_SERVER",
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "HIVE_SERVER",
"custom_commands": [],
"display_name": "Hive Server",
"is_client": "false",
"is_master": "true",
"service_name": "HIVE",
"stack_name": "HDP",
"stack_version": "2.0.6",
"hostnames": ["c6401.ambari.apache.org","c6402.ambari.apache.org"]
}},
{
"href": "/api/v1/stacks/HDP/versions/2.0.6/services/HIVE/components/WEBHCAT_SERVER",
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "WEBHCAT_SERVER",
"custom_commands": [],
"display_name": "WebHCat Server",
"is_client": "false",
"is_master": "true",
"service_name": "HIVE",
"stack_name": "HDP",
"stack_version": "2.0.6",
"hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
}}]
},
{
"StackServices": {
"service_name": "OOZIE"
}, "components": [{
"href": "/api/v1/stacks/HDP/versions/2.0.6/services/HIVE/components/OOZIE_SERVER",
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "OOZIE_SERVER",
"custom_commands": [],
"display_name": "Oozie Server",
"is_client": "false",
"is_master": "true",
"service_name": "HIVE",
"stack_name": "HDP",
"stack_version": "2.0.6",
"hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
}, }]
}],
"configurations": configurations,
"ambari-server-properties": {"ambari-server.user":"ambari_user"}
}
clusterData = {
"totalAvailableRam": 2048
}
ambariHostName = socket.getfqdn()
expected = {'oozie-env':
{'properties':
{'oozie_user': 'oozie'}},
'core-site':
{'properties':
{'hadoop.proxyuser.ambari_user.groups': '*',
'hadoop.proxyuser.ambari_user.hosts': ambariHostName,
'hadoop.proxyuser.oozie.groups': '*',
'hadoop.proxyuser.hive.groups': '*',
'hadoop.proxyuser.webhcat.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.hosts': '*',
'hadoop.proxyuser.webhcat.groups': '*',
'hadoop.proxyuser.hdfs.groups': '*',
'hadoop.proxyuser.hdfs.hosts': '*',
'hadoop.proxyuser.hive.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.oozie.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.groups': '*'}},
'falcon-env':
{'properties':
{'falcon_user': 'falcon'}},
'hdfs-site':
{'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '1024'}},
'hive-env':
{'properties':
{'hive_user': 'hive',
'webhcat_user': 'webhcat'}},
'hadoop-env':
{'properties':
{'hdfs_user': 'hdfs',
'namenode_heapsize': '1024',
'proxyuser_group': 'users',
'namenode_opt_maxnewsize': '256',
'namenode_opt_newsize': '256'}}}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
configurations["hadoop-env"]["properties"]['hdfs_user'] = "hdfs1"
changedConfigurations = [{"type":"hadoop-env",
"name":"hdfs_user",
"old_value":"hdfs"}]
services["changed-configurations"] = changedConfigurations
services['configurations'] = configurations
expected = {'oozie-env':
{'properties':
{'oozie_user': 'oozie'}},
'core-site': {'properties':
{'hadoop.proxyuser.ambari_user.groups': '*',
'hadoop.proxyuser.ambari_user.hosts': ambariHostName,
'hadoop.proxyuser.oozie.groups': '*',
'hadoop.proxyuser.hive.groups': '*',
'hadoop.proxyuser.hdfs1.groups': '*',
'hadoop.proxyuser.hdfs1.hosts': '*',
'hadoop.proxyuser.webhcat.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.hosts': '*',
'hadoop.proxyuser.webhcat.groups': '*',
'hadoop.proxyuser.hdfs.groups': '*',
'hadoop.proxyuser.hdfs.hosts': '*',
'hadoop.proxyuser.hive.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.oozie.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.groups': '*'},
'property_attributes':
{'hadoop.proxyuser.hdfs.groups': {'delete': 'true'},
'hadoop.proxyuser.hdfs.hosts': {'delete': 'true'}}},
'falcon-env':
{'properties':
{'falcon_user': 'falcon'}},
'hive-env':
{'properties':
{'hive_user': 'hive',
'webhcat_user': 'webhcat'}},
'hdfs-site':
{'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '1024'}},
'hadoop-env':
{'properties':
{'hdfs_user': 'hdfs1',
'namenode_heapsize': '1024',
'proxyuser_group': 'users',
'namenode_opt_maxnewsize': '256',
'namenode_opt_newsize': '256'}}}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Verify dfs.namenode.rpc-address is recommended to be deleted when NN HA
configurations["hdfs-site"]["properties"]['dfs.internal.nameservices'] = "mycluster"
configurations["hdfs-site"]["properties"]['dfs.ha.namenodes.mycluster'] = "nn1,nn2"
services['configurations'] = configurations
expected["hdfs-site"] = {
'properties': {
'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '1024',
'dfs.internal.nameservices': 'mycluster',
'dfs.ha.namenodes.mycluster': 'nn1,nn2'
},
'property_attributes': {
'dfs.namenode.rpc-address': {
'delete': 'true'
}
}
}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_getHostNamesWithComponent(self):
services = {
"services": [
{
"StackServices": {
"service_name": "SERVICE"
},
"components": [
{
"StackServiceComponents": {
"component_name": "COMPONENT",
"hostnames": ["host1","host2","host3"]
}
}
]
}
],
"configurations": {}
}
result = self.stackAdvisor.getHostNamesWithComponent("SERVICE","COMPONENT", services)
expected = ["host1","host2","host3"]
self.assertEquals(result, expected)
def test_getZKHostPortString(self):
configurations = {
"zoo.cfg": {
"properties": {
'clientPort': "2183"
}
}
}
services = {
"services": [
{
"StackServices": {
"service_name": "ZOOKEEPER"
},
"components": [
{
"StackServiceComponents": {
"component_name": "ZOOKEEPER_SERVER",
"hostnames": ["zk.host1","zk.host2","zk.host3"]
}
}, {
"StackServiceComponents": {
"component_name": "ZOOKEEPER_CLIENT",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": configurations
}
result = self.stackAdvisor.getZKHostPortString(services)
expected = "zk.host1:2183,zk.host2:2183,zk.host3:2183"
self.assertEquals(result, expected)
def test_validateHDFSConfigurations(self):
configurations = {}
services = ''
hosts = ''
#Default configuration
recommendedDefaults = {'dfs.datanode.du.reserved': '1024'}
properties = {'dfs.datanode.du.reserved': '1024'}
res = self.stackAdvisor.validateHDFSConfigurations(properties,
recommendedDefaults, configurations, services, hosts)
self.assertFalse(res)
#Value is less then expected
recommendedDefaults = {'dfs.datanode.du.reserved': '1024'}
properties = {'dfs.datanode.du.reserved': '512'}
res = self.stackAdvisor.validateHDFSConfigurations(properties,
recommendedDefaults, configurations, services, hosts)
self.assertTrue(res)
#Value is begger then expected
recommendedDefaults = {'dfs.datanode.du.reserved': '1024'}
properties = {'dfs.datanode.du.reserved': '2048'}
res = self.stackAdvisor.validateHDFSConfigurations(properties,
recommendedDefaults, configurations, services, hosts)
self.assertFalse(res)
def test_validateHDFSConfigurationsEnv(self):
configurations = {}
# 1) ok: namenode_heapsize > recommended
recommendedDefaults = {'namenode_heapsize': '1024',
'namenode_opt_newsize' : '256',
'namenode_opt_maxnewsize' : '256'}
properties = {'namenode_heapsize': '2048',
'namenode_opt_newsize' : '300',
'namenode_opt_maxnewsize' : '300'}
res_expected = []
res = self.stackAdvisor.validateHDFSConfigurationsEnv(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
# 2) fail: namenode_heapsize, namenode_opt_maxnewsize < recommended
properties['namenode_heapsize'] = '1022'
properties['namenode_opt_maxnewsize'] = '255'
res_expected = [{'config-type': 'hadoop-env',
'message': 'Value is less than the recommended default of 1024',
'type': 'configuration',
'config-name': 'namenode_heapsize',
'level': 'WARN'},
{'config-name': 'namenode_opt_maxnewsize',
'config-type': 'hadoop-env',
'level': 'WARN',
'message': 'Value is less than the recommended default of 256',
'type': 'configuration'}]
res = self.stackAdvisor.validateHDFSConfigurationsEnv(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
def test_validateAmsHbaseSiteConfigurations(self):
configurations = {
"hdfs-site": {
"properties": {
'dfs.datanode.data.dir': "/hadoop/data"
}
},
"core-site": {
"properties": {
"fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
}
},
"ams-site": {
"properties": {
"timeline.metrics.service.operation.mode": "embedded"
}
}
}
recommendedDefaults = {
'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
properties = {
'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
host = {
"href" : "/api/v1/hosts/host1",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host1",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host1",
"rack_info" : "/default-rack",
"total_mem" : 2097152,
"disk_info": [
{
"available": str(15<<30), # 15 GB
"type": "ext4",
"mountpoint": "/"
}
]
}
}
hosts = {
"items" : [
host
]
}
services = {
"services": [
{
"StackServices": {
"service_name": "AMBARI_METRICS"
},
"components": [
{
"StackServiceComponents": {
"component_name": "METRICS_COLLECTOR",
"hostnames": ["host1"]
}
}, {
"StackServiceComponents": {
"component_name": "METRICS_MONITOR",
"hostnames": ["host1"]
}
}
]
},
{
"StackServices": {
"service_name": "HDFS"
},
"components": [
{
"StackServiceComponents": {
"component_name": "DATANODE",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": configurations
}
# only 1 partition, enough disk space, no warnings
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = []
self.assertEquals(res, expected)
# 1 partition, no enough disk space
host['Hosts']['disk_info'] = [
{
"available" : '1',
"type" : "ext4",
"mountpoint" : "/"
}
]
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = [
{'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'Ambari Metrics disk space requirements not met. '
'\nRecommended disk space for partition / is 10G',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
# 2 partitions
host['Hosts']['disk_info'] = [
{
"available": str(15<<30), # 15 GB
"type" : "ext4",
"mountpoint" : "/grid/0"
},
{
"available" : str(15<<30), # 15 GB
"type" : "ext4",
"mountpoint" : "/"
}
]
recommendedDefaults = {
'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
properties = {
'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = []
self.assertEquals(res, expected)
# dfs.dir & hbase.rootdir crosscheck + root partition + hbase.rootdir == hbase.tmp.dir warnings
properties = {
'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = [
{
'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'It is not recommended to use root partition for hbase.rootdir',
'type': 'configuration'
},
{
'config-name': 'hbase.tmp.dir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'Consider not using / partition for storing metrics temporary data. '
'/ partition is already used as hbase.rootdir to store metrics data',
'type': 'configuration'
},
{
'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'Consider not using / partition for storing metrics data. '
'/ is already used by datanode to store HDFS data',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
# incorrect hbase.rootdir in distributed mode
properties = {
'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
configurations['ams-site']['properties']['timeline.metrics.service.operation.mode'] = 'distributed'
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = [
{
'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'In distributed mode hbase.rootdir should point to HDFS.',
'type': 'configuration'
},
{
'config-name': 'hbase.cluster.distributed',
'config-type': 'ams-hbase-site',
'level': 'ERROR',
'message': 'hbase.cluster.distributed property should be set to true for distributed mode',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
def test_validateStormSiteConfigurations(self):
configurations = {
"storm-site": {
"properties": {
'metrics.reporter.register': "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"
}
}
}
recommendedDefaults = {
'metrics.reporter.register': 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter',
}
properties = {
'metrics.reporter.register': 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter',
}
services = {
"services": [
{
"StackServices": {
"service_name": "AMBARI_METRICS"
}
}
],
"configurations": configurations
}
# positive
res = self.stackAdvisor.validateStormConfigurations(properties, recommendedDefaults, configurations, services, None)
expected = []
self.assertEquals(res, expected)
properties['metrics.reporter.register'] = ''
res = self.stackAdvisor.validateStormConfigurations(properties, recommendedDefaults, configurations, services, None)
expected = [
{'config-name': 'metrics.reporter.register',
'config-type': 'storm-site',
'level': 'WARN',
'message': 'Should be set to org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter '
'to report the metrics to Ambari Metrics service.',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
def test_getHostsWithComponent(self):
services = {"services":
[{"StackServices":
{"service_name" : "HDFS",
"service_version" : "2.6.0.2.2"
},
"components":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/DATANODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1+",
"component_category":"SLAVE",
"component_name":"DATANODE",
"custom_commands":[
],
"display_name":"DataNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1",
"host2"
]
},
"dependencies":[
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"0+",
"component_category":"SLAVE",
"component_name":"JOURNALNODE",
"custom_commands":[
],
"display_name":"JournalNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1"
]
},
"dependencies":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE/dependencies/HDFS_CLIENT",
"Dependencies":{
"component_name":"HDFS_CLIENT",
"dependent_component_name":"JOURNALNODE",
"dependent_service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2"
}
}
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/NAMENODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1-2",
"component_category":"MASTER",
"component_name":"NAMENODE",
"custom_commands":[
"DECOMMISSION",
"REBALANCEHDFS"
],
"display_name":"NameNode",
"is_client":"false",
"is_master":"true",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host2"
]
},
"dependencies":[
]
},
],
}],
"configurations": {}
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/host1",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host1",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host1",
"rack_info" : "/default-rack",
"total_mem" : 2097152
}
},
{
"href" : "/api/v1/hosts/host2",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host2",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host2",
"rack_info" : "/default-rack",
"total_mem" : 1048576
}
},
]
}
datanodes = self.stackAdvisor.getHostsWithComponent("HDFS", "DATANODE", services, hosts)
self.assertEquals(len(datanodes), 2)
self.assertEquals(datanodes, hosts["items"])
datanode = self.stackAdvisor.getHostWithComponent("HDFS", "DATANODE", services, hosts)
self.assertEquals(datanode, hosts["items"][0])
namenodes = self.stackAdvisor.getHostsWithComponent("HDFS", "NAMENODE", services, hosts)
self.assertEquals(len(namenodes), 1)
# [host2]
self.assertEquals(namenodes, [hosts["items"][1]])
namenode = self.stackAdvisor.getHostWithComponent("HDFS", "NAMENODE", services, hosts)
# host2
self.assertEquals(namenode, hosts["items"][1])
# not installed
nodemanager = self.stackAdvisor.getHostWithComponent("YARN", "NODEMANAGER", services, hosts)
self.assertEquals(nodemanager, None)
# unknown component
unknown_component = self.stackAdvisor.getHostWithComponent("YARN", "UNKNOWN", services, hosts)
self.assertEquals(nodemanager, None)
# unknown service
unknown_component = self.stackAdvisor.getHostWithComponent("UNKNOWN", "NODEMANAGER", services, hosts)
self.assertEquals(nodemanager, None)
def test_mergeValidators(self):
childValidators = {
"HDFS": {"hdfs-site": "validateHDFSConfigurations2.3"},
"HIVE": {"hiveserver2-site": "validateHiveServer2Configurations2.3"},
"HBASE": {"hbase-site": "validateHBASEConfigurations2.3",
"newconf": "new2.3"},
"NEWSERVICE" : {"newserviceconf": "abc2.3"}
}
parentValidators = {
"HDFS": {"hdfs-site": "validateHDFSConfigurations2.2",
"hadoop-env": "validateHDFSConfigurationsEnv2.2"},
"YARN": {"yarn-env": "validateYARNEnvConfigurations2.2"},
"HIVE": {"hiveserver2-site": "validateHiveServer2Configurations2.2",
"hive-site": "validateHiveConfigurations2.2",
"hive-env": "validateHiveConfigurationsEnv2.2"},
"HBASE": {"hbase-site": "validateHBASEConfigurations2.2",
"hbase-env": "validateHBASEEnvConfigurations2.2"},
"MAPREDUCE2": {"mapred-site": "validateMapReduce2Configurations2.2"},
"TEZ": {"tez-site": "validateTezConfigurations2.2"}
}
expected = {
"HDFS": {"hdfs-site": "validateHDFSConfigurations2.3",
"hadoop-env": "validateHDFSConfigurationsEnv2.2"},
"YARN": {"yarn-env": "validateYARNEnvConfigurations2.2"},
"HIVE": {"hiveserver2-site": "validateHiveServer2Configurations2.3",
"hive-site": "validateHiveConfigurations2.2",
"hive-env": "validateHiveConfigurationsEnv2.2"},
"HBASE": {"hbase-site": "validateHBASEConfigurations2.3",
"hbase-env": "validateHBASEEnvConfigurations2.2",
"newconf": "new2.3"},
"MAPREDUCE2": {"mapred-site": "validateMapReduce2Configurations2.2"},
"TEZ": {"tez-site": "validateTezConfigurations2.2"},
"NEWSERVICE" : {"newserviceconf": "abc2.3"}
}
self.stackAdvisor.mergeValidators(parentValidators, childValidators)
self.assertEquals(expected, parentValidators)
def test_getProperMountPoint(self):
hostInfo = None
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
hostInfo = {"some_key": []}
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
hostInfo["disk_info"] = []
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# root mountpoint with low space available
hostInfo["disk_info"].append(
{
"available" : "1",
"type" : "ext4",
"mountpoint" : "/"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# tmpfs with more space available
hostInfo["disk_info"].append(
{
"available" : "2",
"type" : "tmpfs",
"mountpoint" : "/dev/shm"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# /boot with more space available
hostInfo["disk_info"].append(
{
"available" : "3",
"type" : "tmpfs",
"mountpoint" : "/boot/grub"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# /boot with more space available
hostInfo["disk_info"].append(
{
"available" : "4",
"type" : "tmpfs",
"mountpoint" : "/mnt/external_hdd"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# virtualbox fs with more space available
hostInfo["disk_info"].append(
{
"available" : "5",
"type" : "vboxsf",
"mountpoint" : "/vagrant"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# proper mountpoint with more space available
hostInfo["disk_info"].append(
{
"available" : "6",
"type" : "ext4",
"mountpoint" : "/grid/0"
}
)
self.assertEquals(["/grid/0", "/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# proper mountpoint with more space available
hostInfo["disk_info"].append(
{
"available" : "7",
"type" : "ext4",
"mountpoint" : "/grid/1"
}
)
self.assertEquals(["/grid/1", "/grid/0", "/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
def test_validateNonRootFs(self):
hostInfo = {"disk_info": [
{
"available" : "2",
"type" : "ext4",
"mountpoint" : "/"
}
]}
properties = {"property1": "file:///var/dir"}
recommendedDefaults = {"property1": "file:///var/dir"}
# only / mountpoint - no warning
self.assertTrue(self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo) == None)
# More preferable /grid/0 mountpoint - warning
hostInfo["disk_info"].append(
{
"available" : "3",
"type" : "ext4",
"mountpoint" : "/grid/0"
}
)
recommendedDefaults = {"property1": "file:///grid/0/var/dir"}
warn = self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo)
self.assertTrue(warn != None)
self.assertEquals({'message': 'It is not recommended to use root partition for property1', 'level': 'WARN'}, warn)
# Set by user /var mountpoint, which is non-root , but not preferable - no warning
hostInfo["disk_info"].append(
{
"available" : "1",
"type" : "ext4",
"mountpoint" : "/var"
}
)
self.assertTrue(self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo) == None)
def test_validatorEnoughDiskSpace(self):
reqiuredDiskSpace = 1048576
errorMsg = "Ambari Metrics disk space requirements not met. \n" \
"Recommended disk space for partition / is 1G"
# local FS, enough space
hostInfo = {"disk_info": [
{
"available" : "1048578",
"type" : "ext4",
"mountpoint" : "/"
}
]}
properties = {"property1": "file:///var/dir"}
self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
# local FS, no enough space
hostInfo = {"disk_info": [
{
"available" : "1",
"type" : "ext4",
"mountpoint" : "/"
}
]}
warn = self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace)
self.assertTrue(warn != None)
self.assertEquals({'message': errorMsg, 'level': 'WARN'}, warn)
# non-local FS, HDFS
properties = {"property1": "hdfs://h1"}
self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
# non-local FS, WASB
properties = {"property1": "wasb://h1"}
self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
def test_round_to_n(self):
self.assertEquals(self.stack_advisor_impl.round_to_n(0), 0)
self.assertEquals(self.stack_advisor_impl.round_to_n(1000), 1024)
self.assertEquals(self.stack_advisor_impl.round_to_n(2000), 2048)
self.assertEquals(self.stack_advisor_impl.round_to_n(4097), 4096)
def test_getMountPointForDir(self):
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("/var/log", ["/"]), "/")
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("/var/log", ["/var", "/"]), "/var")
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("file:///var/log", ["/var", "/"]), "/var")
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("hdfs:///hdfs_path", ["/var", "/"]), None)
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("relative/path", ["/var", "/"]), None)
def test_getValidatorEqualsToRecommendedItem(self):
properties = {"property1": "value1"}
recommendedDefaults = {"property1": "value1"}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), None)
properties = {"property1": "value1"}
recommendedDefaults = {"property1": "value2"}
expected = {'message': 'It is recommended to set value value2 for property property1', 'level': 'WARN'}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), expected)
properties = {}
recommendedDefaults = {"property1": "value2"}
expected = {'level': 'ERROR', 'message': 'Value should be set for property1'}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), expected)
properties = {"property1": "value1"}
recommendedDefaults = {}
expected = {'level': 'ERROR', 'message': 'Value should be recommended for property1'}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), expected)
def test_getServicesSiteProperties(self):
import imp, os
testDirectory = os.path.dirname(os.path.abspath(__file__))
hdp206StackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
stack_advisor = imp.load_source('stack_advisor', hdp206StackAdvisorPath)
services = {
"services": [
{
"StackServices": {
"service_name": "RANGER"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_ADMIN",
"hostnames": ["host1"]
}
}
]
},
],
"configurations": {
"admin-properties": {
"properties": {
"DB_FLAVOR": "NOT_EXISTING",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
}
}
}
}
expected = {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
}
siteProperties = stack_advisor.getServicesSiteProperties(services, "ranger-admin-site")
self.assertEquals(siteProperties, expected)
def test_createComponentLayoutRecommendations_addService_1freeHost(self):
"""
Test that already installed slaves are not added to any free hosts (not having any component installed)
as part of recommendation received during Add service operation.
For already installed services, recommendation for installed components should match the existing layout
"""
services = {
"services" : [
{
"StackServices" : {
"service_name" : "HDFS"
},
"components" : [ {
"StackServiceComponents" : {
"cardinality" : "1+",
"component_category" : "SLAVE",
"component_name" : "DATANODE",
"hostnames" : [ "c6401.ambari.apache.org" ]
}
} ]
} ]
}
hosts = self.prepareHosts(["c6401.ambari.apache.org", "c6402.ambari.apache.org"])
recommendations = self.stackAdvisor.createComponentLayoutRecommendations(services, hosts)
"""
Recommendation received should be as below:
{
'blueprint': {
'host_groups': [{
'name': 'host-group-1',
'components': []
}, {
'name': 'host-group-2',
'components': [{
'name': 'DATANODE'
}]
}]
},
'blueprint_cluster_binding': {
'host_groups': [{
'hosts': [{
'fqdn': 'c6402.ambari.apache.org'
}],
'name': 'host-group-1'
}, {
'hosts': [{
'fqdn': 'c6401.ambari.apache.org'
}],
'name': 'host-group-2'
}]
}
}
"""
# Assert that the list is empty for host-group-1
self.assertFalse(recommendations['blueprint']['host_groups'][0]['components'])
# Assert that DATANODE is placed on host-group-2
self.assertEquals(recommendations['blueprint']['host_groups'][1]['components'][0]['name'], 'DATANODE')
| 36.13997
| 166
| 0.551836
| 5,880
| 73,328
| 6.787245
| 0.115476
| 0.031672
| 0.011652
| 0.007517
| 0.639555
| 0.586584
| 0.534165
| 0.493473
| 0.45496
| 0.410509
| 0
| 0.023864
| 0.307972
| 73,328
| 2,028
| 167
| 36.157791
| 0.762597
| 0.021179
| 0
| 0.463305
| 0
| 0.003922
| 0.333962
| 0.112355
| 0
| 0
| 0
| 0
| 0.052661
| 0
| null | null | 0.001681
| 0.003361
| null | null | 0.002801
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
6a9907c6e19624e9a00da0b3cff99ba87e746680
| 3,206
|
py
|
Python
|
models2.py
|
Lydia-Tan/MindLife
|
644f1a3834f337d51c99650c3924df99c5200d06
|
[
"MIT"
] | 1
|
2020-01-20T19:49:07.000Z
|
2020-01-20T19:49:07.000Z
|
models2.py
|
lindaweng/Mindlife
|
30be070b39728fb3fe149d4c95e5bce280a3b6a7
|
[
"MIT"
] | null | null | null |
models2.py
|
lindaweng/Mindlife
|
30be070b39728fb3fe149d4c95e5bce280a3b6a7
|
[
"MIT"
] | null | null | null |
import nltk
import re
import sys
from sys import argv
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def ajay(ans):
ajay = SentimentIntensityAnalyzer()
completeScore = 0
questionWeights = [0.05, 0.20, 0.05, 0.05, 0.05, 0.20, 0.05, 0.05, 0.20, 0.10]
print ans
ansList = ans.split("$")
for j in range(10):
print ansList[j]
for i in range(10):
results = []
score = 0
count = 0
# print (count)
for paragraph in ansList:
for line in paragraph:
#Split Paragraph on basis of '.' or ? or !.
for l in re.split(r"\.|\?|\!",paragraph):
# print(l)
ss = ajay.polarity_scores(l)
results.append(ss);
# print(ss['compound'])
score += ss['compound']
count += 1
completeScore += (score/count)*questionWeights[i]
#print(completeScore)
if (completeScore >= 0.1):
return "False Alarm! You don't have Depression."
elif (completeScore >= -0.1):
return ("Seasonal affective disorder (SAD). This type of depression " +
"emerges as days get shorter in the fall and winter. The mood "
+ "change may result from alterations in the body's natural daily "
+ "rhythms, in the eyes' sensitivity to light, or in how chemical "
+ "messengers like serotonin and melatonin function. The leading "
+ "treatment is light therapy, which involves daily sessions sitting "
+ "close to an especially intense light source. The usual treatments "
+ "for depression, such as psychotherapy and medication, may also be "
+ "effective.");
elif (completeScore >= -0.4):
return ("Persistent depressive disorder. Formerly called dysthymia, this "
+ "type of depression refers to low mood that has lasted for at least "
+ "two years but may not reach the intensity of major depression. Many "
+ "people with this type of depression type are able to function day to "
+ "but feel low or joyless much of the time. Some depressive symptoms, "
+ "such as appetite and sleep changes, low energy, low self-esteem, or "
+ "hopelessness, are usually part of the picture.")
else:
return ("The classic depression type, major depression is a state where a dark "
+ "mood is all-consuming and one loses interest in activities, even ones "
+ "that are usually pleasurable. Symptoms of this type of depression "
+ "include trouble sleeping, changes in appetite or weight, loss of energy, "
+ "and feeling worthless. Thoughts of death or suicide may occur. It is "
+ "usually treated with psychotherapy and medication. For some people with "
+ "severe depression that isn't alleviated with psychotherapy or antidepressant "
+ "medications, electroconvulsive therapy may be effective.")
| 51.709677
| 98
| 0.585153
| 379
| 3,206
| 4.94723
| 0.490765
| 0.0096
| 0.0128
| 0.042667
| 0.014933
| 0.014933
| 0.014933
| 0.013333
| 0.013333
| 0
| 0
| 0.020648
| 0.335309
| 3,206
| 62
| 99
| 51.709677
| 0.859221
| 0.033063
| 0
| 0
| 0
| 0
| 0.510204
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.092593
| null | null | 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
6a9d299ac035789dcfbdc5b67b56e5ebe19176e2
| 33,321
|
py
|
Python
|
bin/ADFRsuite/CCSBpckgs/mglutil/gui/BasicWidgets/Tk/Dial.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | null | null | null |
bin/ADFRsuite/CCSBpckgs/mglutil/gui/BasicWidgets/Tk/Dial.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | null | null | null |
bin/ADFRsuite/CCSBpckgs/mglutil/gui/BasicWidgets/Tk/Dial.py
|
AngelRuizMoreno/Jupyter_Dock_devel
|
6d23bc174d5294d1e9909a0a1f9da0713042339e
|
[
"MIT"
] | 1
|
2021-11-04T21:48:14.000Z
|
2021-11-04T21:48:14.000Z
|
################################################################################
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## (C) Copyrights Dr. Michel F. Sanner and TSRI 2016
##
################################################################################
#########################################################################
#
# Date: Mai 2001 Authors: Michel Sanner, Daniel Stoffler
#
# sanner@scripps.edu
# stoffler@scripps.edu
#
# Copyright: Michel Sanner, Daniel Stoffler and TSRI
#
#########################################################################
import Tkinter
import math
import types
import sys
import os
from mglutil.util.callback import CallbackManager
from mglutil.util.misc import ensureFontCase
from optionsPanel import OptionsPanel
from KeyboardEntry import KeyboardEntry
class Dial(Tkinter.Frame, KeyboardEntry):
"""This class implements a Dial widget.
The widget has a pointer that can be moved around a circle.
The range corresponding to one full turn can be specified as well as the min
and max values that are allowed. By defaults these are set to None meaning that
there is no min and no max. One turn corresponds to 360 units by default.
A dial can also operate in discrete mode (if self.increment is set to x). In
this mode the values will be restrained to be multiples of self.increment.
The Widget has a Callback manager. Callback functions get called at every value
change if self.contiguous is set to 1, else they get called when the mouse
button is released. They always get called with the current value as an
argument.
An optional label can be displayed at the center of the Dial widget.
The size of the dial has to be specified at instanciation. Other parameters
can be set after the widget has been created.
The widget tried to adjust automatically the size of the arrow according to
the size of the dial.
The widget has a configure() method: type, min, max, increment, precision,
showLabel, value, continuous, oneTurn can be set this way.
master, labCfg and size can be passed only to the constructor.
a lock() method is used to disable the various gui components of the
options panel. Usage: <instance>.lock(<component>=<value>)
components see configure(). value is 0 or 1. 1 disables,
0 enables.
Setting values with increment enabled:
if using the method set(), the actual value will 'snap' to the next increment.
i.e., if the value is set to 3, and the increment is set to 2, setting the
value to 6 will actually result in 7 (3,5,7,9,.....)
To still be able to set the value, disregarding the current active increment,
the set method understands the optional keyword force=True, i.e.
dial.set(<value>, force=True)), which will set the value to <value>. The
increment will now be added to this new <value>
"""
def __init__(self, master=None, type='float',
labCfg={'fg':'black','side':'left', 'text':None},
min=None, max=None, increment=.0, precision=2,
showLabel=1, value=0.0, continuous=1, oneTurn=360.,
size=50, callback=None,
lockMin=0, lockBMin=0, lockMax=0, lockBMax=0,
lockIncrement=0, lockBIncrement=0,
lockPrecision=0, lockShowLabel=0, lockValue=0,
lockType=0, lockContinuous=0, lockOneTurn=0, **kw):
Tkinter.Frame.__init__(self, master)
Tkinter.Pack.config(self)
self.callbacks = CallbackManager() # object to manage callback
# functions. They get called with the
# current value as an argument
# initialize various attributes with default values
self.precision = 2 # decimal places
self.min = None # minimum value
self.max = None # maximum value
self.increment = increment # value increment
self.minOld = 0. # used to store old values
self.maxOld = 0.
self.incrementOld = increment
self.size = 50 # defines widget size
self.offsetValue = 0. # used to set increment correctly
self.lab = None # label
self.callback = None # user specified callback
self.opPanel = None # option panel widget
self.oneTurn = 360. # value increment for 1 full turn
self.value = 0.0 # current value of widget
self.oldValue = 0.0 # old value of widget
self.showLabel = 1 # turn on to display label on
self.continuous = 1 # set to 1 to call callbacks at
# each value change, else gets called
# on button release event
self.angle = 0. # angle corresponding to value
self.labCfg = labCfg # Tkinter Label options
self.labelFont = (
ensureFontCase('helvetica'), 14, 'bold') # label font
self.labelColor = 'yellow' # label color
self.canvas = None # the canvas to create the widget in
self.usedArcColor = '#aaaaaa' # filled arc color of used portion
self.unusedArcColor = '#cccccc' # filled arc color of unused portion
self.pyOver180 = math.pi/180.0 # constants used in various places
self.threeSixtyOver1turn = 1
self.piOver1turn = math.pi/360.
self.lockMin = lockMin # lock<X> vars are used in self.lock()
self.lockMax = lockMax # to lock/unlock entries in optionpanel
self.lockIncrement = lockIncrement
self.lockBMin = lockBMin
self.lockBMax = lockBMax
self.lockBIncrement = lockBIncrement
self.lockPrecision = lockPrecision
self.lockShowLabel = lockShowLabel
self.lockValue = lockValue
self.lockType = lockType
self.lockContinuous = lockContinuous
self.lockOneTurn = lockOneTurn
self.setArrow()
# configure with user-defined values
self.setSize(size)
self.setCallback(callback)
self.setContinuous(continuous)
self.setType(type)
self.setPrecision(precision)
self.setOneTurn(oneTurn)
self.setMin(min)
self.setMax(max)
self.setIncrement(increment)
self.setShowLabel(showLabel)
self.setValue(value)
self.setLabel(self.labCfg)
self.createCanvas(master)
canvas = self.canvas
canvas.bind("<ButtonPress-1>", self.mouseDown)
canvas.bind("<ButtonRelease-1>", self.mouseUp)
canvas.bind("<B1-Motion>", self.mouseMove)
canvas.bind("<Button-3>", self.toggleOptPanel)
if os.name == 'nt': #sys.platform == 'win32':
canvas.bind("<MouseWheel>", self.mouseWheel)
else:
canvas.bind("<Button-4>", self.mouseWheel)
canvas.bind("<Button-5>", self.mouseWheel)
KeyboardEntry.__init__(self, (canvas,), self.setFromEntry)
self.opPanel = OptionsPanel(master = self, title="Dial Options")
## if self.callback:
## self.callbacks.AddCallback(self.callback)
def setFromEntry(self, valueString):
try:
self.set(self.type(valueString))
except ValueError:
# fixme we would like to pop this up in a window maybe
import traceback
traceback.print_stack()
traceback.print_exc()
def handleKeyStroke(self, event):
# handle key strokes for numbers only in widget keyboard entry label
key = event.keysym
if key.isdigit() or key=='period' or key=='minus' or key=='plus':
if key == 'period':
key = '.'
elif key == 'minus':
key = '-'
elif key == 'plus':
key = '+'
self.typedValue += key
self.typedValueTK.configure(text=self.typedValue)
else:
KeyboardEntry.handleKeyStroke(self, event)
def setSize(self, size):
"""Set widget size. Size must be of type int and greater than 0"""
assert isinstance(size, types.IntType),\
"Illegal size: expected type %s, got %s"%(type(1), type(size) )
assert size > 0, "Illegal size: must be > 0, got %s"%size
self.size = size
def setCallback(self, cb):
"""Set widget callback. Must be callable function. Callback is called
every time the widget value is set/modified"""
assert cb is None or callable(cb) or type(cb) is types.ListType,\
"Illegal callback: must be either None or callable, or list. Got %s"%cb
if cb is None: return
elif type(cb) is types.ListType:
for func in cb:
assert callable(func), "Illegal callback must be callable. Got %s"%func
self.callbacks.AddCallback(func)
else:
self.callbacks.AddCallback(cb)
self.callback = cb
def toggleOptPanel(self, event=None):
if self.opPanel.flag:
self.opPanel.Dismiss_cb()
else:
if not hasattr(self.opPanel, 'optionsForm'):
self.opPanel.displayPanel(create=1)
else:
self.opPanel.displayPanel(create=0)
def setArrow(self, size=None):
if size is not None:
self.setSize(size)
aS = self.size/40
self.arrowLength = max(3, 3*aS) # arrow head length
self.arrowWidth = max(2, aS) # half the arrow body width
self.arrowBorderwidth = max(1, self.arrowWidth/2) # width of arrow
# shadow lines
self.arrowHeadWidth = 2*self.arrowWidth # width of arrow head base
def mouseDown(self, event):
# remember where the mouse went down
self.lastx = event.x
self.lasty = event.y
def mouseUp(self, event):
# call callbacks if not in continuous mode
if not self.continuous:
self.callbacks.CallCallbacks(self.opPanel.valInput.get())
if self.showLabel == 2:
# no widget labels on mouse release
self.canvas.itemconfigure(self.labelId2, text='')
self.canvas.itemconfigure(self.labelId, text='')
def mouseMove(self, event):
dx = event.x-self.xm
dy = self.ym-event.y
n = math.sqrt(dx*dx+dy*dy)
if n == 0.0: v = [0.0, 0.0]
else: v = [dx/n, dy/n]
# find the cosine of the angle between new hand position and previous
# hand position
ma = v[0]*self.vector[0] + v[1]*self.vector[1]
# assure no rounding errors
if ma > 1.0: ma = 1.0
elif ma < -1.0: ma = -1.0
# compute angle increment compared to current vector
ang = math.acos(ma)
# find the sign of the rotation, sign of z component of vector prod.
oldv = self.vector
normz = oldv[0]*v[1] - oldv[1]*v[0]
if normz>0: ang = -1. * ang
# compute the new value
val = self.value + ang*self.oneTurnOver2pi
self.set(val)
self.lastx = event.x
self.lasty = event.y
def mouseWheel(self, event):
#print "mouseWheel", event, event.num
if os.name == 'nt': #sys.platform == 'win32':
if event.delta > 0:
lEventNum = 4
else:
lEventNum = 5
else:
lEventNum = event.num
if lEventNum == 4:
self.set(self.value+self.oneTurn)
else:
self.set(self.value-self.oneTurn)
def get(self):
return self.type(self.value)
def printLabel(self):
if self.canvas is None:
return
self.canvas.itemconfigure(self.labelId2,
text=self.labelFormat%self.value)#newVal)
self.canvas.itemconfigure(self.labelId,
text=self.labelFormat%self.value)#newVal)
def set(self, val, update=1, force=0):
# if force is set to 1, we call this method regardless of the
# widget configuration. This is for example the case if the dial
# is set to continuous=0, but the value is set in the options panel
# snap to closest increment
if self.increment is not None and self.increment != 0. and not force:
offset = self.offsetValue%self.increment
dval = round(val/self.increment) * self.increment
if val < dval:
dval = dval + offset - self.increment
else:
dval = dval + offset
if self.min is not None and dval < self.min:
dval = self.min
elif self.max is not None and dval > self.max:
dval = self.max
# recompute vector and angle corresponding to val
self.angle = (dval%self.oneTurn)*self.threeSixtyOver1turn
if dval <0.0:
self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.value = dval
self.offsetValue = dval
else:
# 'regular' mode, i.e. no step-wise increment
if self.min is not None and val < self.min: val = self.min
elif self.max is not None and val > self.max: val = self.max
# recompute vector and angle corresponding to val
self.angle = (val%self.oneTurn)*self.threeSixtyOver1turn
if val <0.0: self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.value = val
self.offsetValue = val
#update arrow in display
self.drawArrow()
newVal = self.get()
if self.continuous or force:
if update and self.oldValue != newVal or force:
self.oldValue = newVal
self.callbacks.CallCallbacks(newVal)
if self.showLabel==2:
self.printLabel()
else:
if self.showLabel==2:
self.printLabel()
if self.showLabel==1:
self.printLabel()
if self.opPanel:
self.opPanel.valInput.set(self.labelFormat%newVal)
def drawArrow(self):
if self.canvas is None:
return
# end point
x1 = self.xm + self.vector[0]*self.rad
y1 = self.ym - self.vector[1]*self.rad
# point at arrow head base
xb = self.xm + self.vector[0]*self.radNoArrow
yb = self.xm - self.vector[1]*self.radNoArrow
# vector orthogonal to arrow
n = [-self.vector[1], -self.vector[0]]
pts1 = [ self.xm+n[0]*self.arrowWidth, self.ym+n[1]*self.arrowWidth,
xb+n[0]*self.arrowWidth, yb+n[1]*self.arrowWidth,
xb+n[0]*self.arrowHeadWidth, yb+n[1]*self.arrowHeadWidth,
x1, y1 ]
pts2 = [ x1, y1,
xb-n[0]*self.arrowHeadWidth, yb-n[1]*self.arrowHeadWidth,
xb-n[0]*self.arrowWidth, yb-n[1]*self.arrowWidth,
self.xm-n[0]*self.arrowWidth, self.ym-n[1]*self.arrowWidth ]
canvas = self.canvas
if self.vector[0] > 0.0:
col1 = '#DDDDDD'
col2 = 'black'
else:
col1 = 'black'
col2 = '#DDDDDD'
apply( canvas.coords, (self.arrowPolId,) + tuple(pts1+pts2) )
apply( canvas.coords, (self.arrowPolborder1,) + tuple(pts1) )
canvas.itemconfigure( self.arrowPolborder1, fill=col1 )
apply( canvas.coords, (self.arrowPolborder2,) + tuple(pts2) )
canvas.itemconfigure( self.arrowPolborder2, fill=col2 )
canvas.itemconfigure(self.arcId, extent = 0.0-self.angle)
def createCanvas(self, master):
size = self.size
self.frame = Tkinter.Frame(self, borderwidth=3, relief='sunken')
self.canvas = Tkinter.Canvas(self.frame, width=size+2, height=size+2)
self.xm = self.ym = size/2+2
self.rad = size/2
self.radNoArrow = self.rad-self.arrowLength
self.vector = [0, 1]
x1 = self.xm + self.vector[0]*self.rad
y1 = self.ym + self.vector[1]*self.rad
canvas = self.canvas
self.circleId = canvas.create_oval(2,2,size,size, width=1,
fill=self.unusedArcColor)
self.arcId = canvas.create_arc(2,2,size,size, start=90.,
extent=0, fill=self.usedArcColor)
canvas.create_line(2, self.ym, size+2, self.ym)
canvas.create_line(self.xm, 2, self.ym, size+2)
self.arrowPolId = canvas.create_polygon( 0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
fill='gray75' )
self.arrowPolborder1 = canvas.create_line( 0,0,0,0,0,0,0,0,
fill='black',
width = self.arrowBorderwidth)
self.arrowPolborder2 = canvas.create_line( 0,0,0,0,0,0,0,0,
fill='white',
width = self.arrowBorderwidth )
r = size/20
off = self.arrowBorderwidth
canvas.create_oval(self.xm-r,self.ym-r-off/2,self.xm+r,self.ym+r-off/2,
fill='#DDDDDD', outline='white')
canvas.create_oval(self.xm-r,self.ym-r+off,self.xm+r,self.ym+r+off,
fill='black', outline='black')
canvas.create_oval(self.xm-r,self.ym-r,self.xm+r,self.ym+r,
fill='gray70', outline='#DDDDDD')
self.labelId2 = canvas.create_text(self.xm+2, self.ym+2,
fill='black',
justify='center', text='',
font = self.labelFont)
self.labelId = canvas.create_text(self.xm, self.ym,
fill=self.labelColor,
justify='center', text='',
font = self.labelFont)
self.drawArrow()
self.opPanel = OptionsPanel(master = self, title="Dial Options")
# pack em up
self.canvas.pack(side=Tkinter.TOP)
self.frame.pack(expand=1, fill='x')
self.toggleWidgetLabel(self.showLabel)
def toggleWidgetLabel(self, val):
if val == 0:
# no widget labels
self.showLabel=0
self.canvas.itemconfigure(self.labelId2,
text='')
self.canvas.itemconfigure(self.labelId,
text='')
if val == 1:
# show always widget labels
self.showLabel=1
self.printLabel()
if val == 2:
# show widget labels only when mouse moves
self.showLabel=2
self.canvas.itemconfigure(self.labelId2,
text='')
self.canvas.itemconfigure(self.labelId,
text='')
def setValue(self, val):
if type(val) == types.StringType:
val = float(val)
assert type(val) in [types.IntType, types.FloatType],\
"Illegal type for value: expected %s or %s, got %s"%(
type(1), type(1.0), type(val) )
# setValue does NOT call a callback!
if self.min is not None and val < self.min: val = self.min
if self.max is not None and val > self.max: val = self.max
self.value = self.type(val)
self.offsetValue=self.value
self.oldValue = self.value
#update arrow in display
self.angle = (self.value%self.oneTurn)*self.threeSixtyOver1turn
if self.value <0.0: self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.drawArrow()
if self.showLabel == 1:
self.printLabel()
if self.opPanel:
self.opPanel.valInput.set(self.labelFormat%self.value)
def setLabel(self, labCfg):
self.labCfg = labCfg
text = labCfg.get('text', None)
if text is None or text=='':
return
d={}
for k, w in self.labCfg.items():
if k == 'side': continue
else: d[k] = w
if not 'side' in self.labCfg.keys():
self.labCfg['side'] = 'left'
if not self.lab:
self.lab = Tkinter.Label(self, d)
self.lab.pack(side=self.labCfg['side'])
self.lab.bind("<Button-3>", self.toggleOptPanel)
else:
self.lab.configure(text)
#####################################################################
# the 'configure' methods:
#####################################################################
def configure(self, **kw):
for key,value in kw.items():
# the 'set' parameter callbacks
if key=='labCfg': self.setLabel(value)
elif key=='type': self.setType(value)
elif key=='min': self.setMin(value)
elif key=='max': self.setMax(value)
elif key=='increment': self.setIncrement(value)
elif key=='precision': self.setPrecision(value)
elif key=='showLabel': self.setShowLabel(value)
elif key=='continuous': self.setContinuous(value)
elif key=='oneTurn': self.setOneTurn(value)
# the 'lock' entries callbacks
elif key=='lockType': self.lockTypeCB(value)
elif key=='lockMin': self.lockMinCB(value)
elif key=='lockBMin': self.lockBMinCB(value)
elif key=='lockMax': self.lockMaxCB(value)
elif key=='lockBMax': self.lockBMaxCB(value)
elif key=='lockIncrement': self.lockIncrementCB(value)
elif key=='lockBIncrement': self.lockBIncrementCB(value)
elif key=='lockPrecision': self.lockPrecisionCB(value)
elif key=='lockShowLabel': self.lockShowLabelCB(value)
elif key=='lockValue': self.lockValueCB(value)
elif key=='lockContinuous': self.lockContinuousCB(value)
elif key=='lockOneTurn': self.lockOneTurnCB(value)
def setType(self, Type):
assert type(Type) in [types.StringType, types.TypeType],\
"Illegal type for datatype. Expected %s or %s, got %s"%(
type('a'), type(type), type(Type) )
if type(Type) == type(""): # type str
assert Type in ('int','float'),\
"Illegal type descriptor. Expected 'int' or 'float', got '%s'"%Type
self.type = eval(Type)
else:
self.type = Type
if self.type == int:
self.labelFormat = "%d"
self.int_value = self.value
else:
self.labelFormat = "%."+str(self.precision)+"f"
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togIntFloat']['widget']
if self.type == int:
w.setvalue('int')
elif self.type == 'float':
w.setvalue('float')
if self.opPanel:
self.opPanel.updateDisplay()
# and update the printed label
if self.canvas and self.showLabel == 1:
self.printLabel()
def setMin(self, min):
if min is not None:
assert type(min) in [types.IntType, types.FloatType],\
"Illegal type for minimum. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(min) )
if self.max and min > self.max:
min = self.max
self.min = self.type(min)
if self.showLabel == 1:
self.printLabel()
if self.value < self.min:
self.set(self.min)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.minInput.set(self.labelFormat%self.min)
self.opPanel.toggleMin.set(1)
self.opPanel.min_entry.configure(state='normal', fg='gray0')
self.minOld = self.min
else:
self.min = None
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleMin.set(0)
self.opPanel.min_entry.configure(state='disabled',
fg='gray40')
def setMax(self, max):
if max is not None:
assert type(max) in [types.IntType, types.FloatType],\
"Illegal type for maximum. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(max) )
if self.min and max < self.min:
max = self.min
self.max = self.type(max)
if self.showLabel == 1:
self.printLabel()
if self.value > self.max:
self.set(self.max)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.maxInput.set(self.labelFormat%self.max)
self.opPanel.toggleMax.set(1)
self.opPanel.max_entry.configure(state='normal', fg='gray0')
self.maxOld = self.max
else:
self.max = None
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleMax.set(0)
self.opPanel.max_entry.configure(state='disabled', fg='gray40')
def setIncrement(self, incr):
if incr is not None:
assert type(incr) in [types.IntType, types.FloatType],\
"Illegal type for increment. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(incr) )
self.increment = self.type(incr)
self.offsetValue = self.value
self.incrementOld = self.increment
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.incrInput.set(self.labelFormat%self.increment)
self.opPanel.toggleIncr.set(1)
self.opPanel.incr_entry.configure(state='normal', fg='gray0')
else:
self.increment = self.type(0)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleIncr.set(0)
self.opPanel.incrInput.set(self.labelFormat%0)
self.opPanel.incr_entry.configure(state='disabled',
fg='gray40')
def setPrecision(self, val):
assert type(val) in [types.IntType, types.FloatType],\
"Illegal type for precision. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(val) )
val = int(val)
if val > 10:
val = 10
if val < 1:
val = 1
self.precision = val
if self.type == float:
self.labelFormat = "%."+str(self.precision)+"f"
else:
self.labelFormat = "%d"
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['selPrec']['widget']
w.setvalue(val)
if self.opPanel:
self.opPanel.updateDisplay()
# and update the printed label
if self.canvas and self.showLabel == 1:
self.printLabel()
def setContinuous(self, cont):
""" cont can be None, 0 or 1 """
assert cont in [None, 0, 1],\
"Illegal value for continuous: expected None, 0 or 1, got %s"%cont
if cont != 1:
cont = None
self.continuous = cont
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togCont']['widget']
if cont:
w.setvalue('on')#i=1
else:
w.setvalue('off')#i=0
if self.opPanel:
self.opPanel.updateDisplay()
def setShowLabel(self, val):
"""Show label can be 0, 1 or 2
0: no label
1: label is always shown
2: show label only when value changes"""
assert val in [0,1,2],\
"Illegal value for showLabel. Expected 0, 1 or 2, got %s"%val
if val != 0 and val != 1 and val != 2:
print "Illegal value. Must be 0, 1 or 2"
return
self.showLabel = val
self.toggleWidgetLabel(val)
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togLabel']['widget']
if self.showLabel == 0:
label = 'never'
elif self.showLabel == 1:
label = 'always'
elif self.showLabel == 2:
label = 'move'
w.setvalue(label)
if self.opPanel:
self.opPanel.updateDisplay()
def setOneTurn(self, oneTurn):
assert type(oneTurn) in [types.IntType, types.FloatType],\
"Illegal type for oneTurn. Expected %s or %s, got %s"%(
type(0), type(0.0), type(oneTurn) )
self.oneTurn = oneTurn
self.threeSixtyOver1turn = 360./oneTurn
self.piOver1turn = math.pi/oneTurn
self.oneTurnOver2pi = oneTurn / (2*math.pi)
if self.opPanel:
self.opPanel.updateDisplay()
#####################################################################
# the 'lock' methods:
#####################################################################
def lockTypeCB(self, mode):
if mode != 0: mode = 1
self.lockType = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockMinCB(self, mode): #min entry field
if mode != 0: mode = 1
self.lockMin = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBMinCB(self, mode): # min checkbutton
if mode != 0: mode = 1
self.lockBMin = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockMaxCB(self, mode): # max entry field
if mode != 0: mode = 1
self.lockMax = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBMaxCB(self, mode): # max checkbutton
if mode != 0: mode = 1
self.lockBMax = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockIncrementCB(self, mode): # increment entry field
if mode != 0: mode = 1
self.lockIncrement = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBIncrementCB(self, mode): # increment checkbutton
if mode != 0: mode = 1
self.lockBIncrement = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockPrecisionCB(self, mode):
if mode != 0: mode = 1
self.lockPrecision = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockShowLabelCB(self, mode):
if mode != 0: mode = 1
self.lockShowLabel = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockValueCB(self, mode):
if mode != 0: mode = 1
self.lockValue = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockContinuousCB(self, mode):
if mode != 0: mode = 1
self.lockContinuous = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockOneTurnCB(self, mode):
if mode != 0: mode = 1
self.lockOneTurn = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
if __name__ == '__main__':
def foo(val):
print val
d = Dial(size=50)
d.configure(showLabel=1)
d.callbacks.AddCallback(foo)
| 36.376638
| 87
| 0.547643
| 3,868
| 33,321
| 4.706825
| 0.152792
| 0.046523
| 0.004779
| 0.005273
| 0.32341
| 0.297484
| 0.258376
| 0.209162
| 0.176261
| 0.161046
| 0
| 0.018954
| 0.333393
| 33,321
| 915
| 88
| 36.416393
| 0.800693
| 0.101378
| 0
| 0.279534
| 0
| 0
| 0.064124
| 0
| 0
| 0
| 0
| 0.001093
| 0.023295
| 0
| null | null | 0
| 0.016639
| null | null | 0.023295
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
6a9d42bd307c1507375c76e403f46b3901bbf76d
| 3,560
|
py
|
Python
|
qt-creator-opensource-src-4.6.1/scripts/checkInstalledFiles.py
|
kevinlq/Qt-Creator-Opensource-Study
|
b8cadff1f33f25a5d4ef33ed93f661b788b1ba0f
|
[
"MIT"
] | 5
|
2018-12-22T14:49:13.000Z
|
2022-01-13T07:21:46.000Z
|
qt-creator-opensource-src-4.6.1/scripts/checkInstalledFiles.py
|
kevinlq/Qt-Creator-Opensource-Study
|
b8cadff1f33f25a5d4ef33ed93f661b788b1ba0f
|
[
"MIT"
] | null | null | null |
qt-creator-opensource-src-4.6.1/scripts/checkInstalledFiles.py
|
kevinlq/Qt-Creator-Opensource-Study
|
b8cadff1f33f25a5d4ef33ed93f661b788b1ba0f
|
[
"MIT"
] | 8
|
2018-07-17T03:55:48.000Z
|
2021-12-22T06:37:53.000Z
|
#!/usr/bin/env python
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import os
import sys
import stat
import difflib
import inspect
import getopt
def referenceFile():
if sys.platform.startswith('linux'):
filename = 'makeinstall.linux'
elif sys.platform.startswith('win'):
filename = 'makeinstall.windows'
elif sys.platform == 'darwin':
filename = 'makeinstall.darwin'
else:
print "Unsupported platform: ", sys.platform
sys.exit(-1)
scriptDir = os.path.dirname(inspect.getfile(inspect.currentframe()))
return os.path.join(scriptDir,'..','tests', 'reference', filename)
def readReferenceFile():
# read file with old diff
f = open(referenceFile(), 'r');
filelist = []
for line in f:
filelist.append(line)
f.close()
return filelist
def generateReference(rootdir):
fileDict = {}
for root, subFolders, files in os.walk(rootdir):
for file in (subFolders + files):
f = os.path.join(root,file)
perm = os.stat(f).st_mode & 0777
if os.path.getsize(f) == 0:
print "'%s' is empty!" % f
fileDict[f[len(rootdir)+1:]] = perm
# generate new list
formattedlist = []
for name, perm in sorted(fileDict.iteritems()):
formattedlist.append("%o %s\n"% (perm, name))
return formattedlist;
def usage():
print "Usage: %s [-g | --generate] <dir>" % os.path.basename(sys.argv[0])
def main():
generateMode = False
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hg', ['help', 'generate'])
except:
print str(err)
usage()
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-g', '--generate'):
generateMode = True
if len(args) != 1:
usage()
sys.exit(2)
rootdir = args[0]
if generateMode:
f = open(referenceFile(), 'w')
for item in generateReference(rootdir):
f.write(item)
f.close()
print "Do not forget to commit", referenceFile()
else:
hasDiff = False
for line in difflib.unified_diff(readReferenceFile(), generateReference(rootdir), fromfile=referenceFile(), tofile="generated"):
sys.stdout.write(line)
hasDiff = True
if hasDiff:
sys.exit(1)
if __name__ == "__main__":
main()
| 31.504425
| 136
| 0.608989
| 439
| 3,560
| 4.91344
| 0.416856
| 0.016226
| 0.013908
| 0.01669
| 0.024108
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008453
| 0.235674
| 3,560
| 112
| 137
| 31.785714
| 0.78427
| 0.290169
| 0
| 0.123288
| 0
| 0
| 0.100851
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.082192
| null | null | 0.068493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
6aaa29259fb6e01655aa91ee60654bb2eceee036
| 1,271
|
py
|
Python
|
gjqyxyxxcxxt/gjqyxyxxcxxt/queue_companies.py
|
AisinoPythonTeam/PythonAiniso
|
983a29962752679d8cc26a2c3cdb0ba8fcfa3f02
|
[
"Apache-2.0"
] | null | null | null |
gjqyxyxxcxxt/gjqyxyxxcxxt/queue_companies.py
|
AisinoPythonTeam/PythonAiniso
|
983a29962752679d8cc26a2c3cdb0ba8fcfa3f02
|
[
"Apache-2.0"
] | null | null | null |
gjqyxyxxcxxt/gjqyxyxxcxxt/queue_companies.py
|
AisinoPythonTeam/PythonAiniso
|
983a29962752679d8cc26a2c3cdb0ba8fcfa3f02
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import pymysql
import sys, os, json, time, pymongo
app_dir = os.path.abspath("../")
sys.path.append(app_dir)
from gjqyxyxxcxxt import settings
from gjqyxyxxcxxt.database.my_redis import QueueRedis
conn = None
def connect_db():
global conn
conn = pymysql.connect(host="172.16.16.15",port=3306,user="root",passwd="A1s1n0@zxyc#3",db="ixinnuo_sjcj",charset="utf8")
return
def get_req_from_db():
global conn
cursor = conn.cursor()
cursor.execute('select id, entname from req where status=0 order by id limit 10')
results = cursor.fetchall()
companies = []
for res in results:
company = {}
company['id'] = res[0]
company['name'] = res[1]
companies.append(company)
return companies
def main():
my_queue = QueueRedis()
result = my_queue.get_queue_length(settings.COMPANIES)
print result
#mq 里存在数据则,3秒后退出
if result:
time.sleep(3)
exit()
time.sleep(3)
global conn
connect_db()
source = get_req_from_db()
for id_name in source:
message = json.dumps(id_name)
my_queue.send_to_queue(settings.COMPANIES, message)
conn.close()
print '成功添加队列%s条数据!!!' % len(source)
if __name__ == '__main__':
main()
| 24.921569
| 125
| 0.650669
| 173
| 1,271
| 4.612717
| 0.508671
| 0.037594
| 0.030075
| 0.030075
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027328
| 0.222659
| 1,271
| 50
| 126
| 25.42
| 0.780364
| 0.028324
| 0
| 0.121951
| 0
| 0
| 0.112916
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.02439
| 0.097561
| null | null | 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
6ad3007b95e5d17415b05151d343ee3326e45e1d
| 2,157
|
py
|
Python
|
experiment/diabetes/accuracy_info.py
|
leandro-santiago/bloomwisard
|
4c02610c4ef2d2cf8424797c8a815da182ca2383
|
[
"MIT"
] | 2
|
2020-10-25T17:01:10.000Z
|
2020-12-04T14:26:26.000Z
|
experiment/diabetes/accuracy_info.py
|
leandro-santiago/bloomwisard
|
4c02610c4ef2d2cf8424797c8a815da182ca2383
|
[
"MIT"
] | null | null | null |
experiment/diabetes/accuracy_info.py
|
leandro-santiago/bloomwisard
|
4c02610c4ef2d2cf8424797c8a815da182ca2383
|
[
"MIT"
] | null | null | null |
import numpy as np
import sys
from timeit import default_timer as timer
sys.path.append("../../")
from core import wnn
from encoding import thermometer
from encoding import util
#Load Diabetes data
base_path = "../../dataset/diabetes/"
#2/3 Test
bits_encoding = 20
train_data, train_label, test_data, test_label, data_min, data_max = util.load_3data(base_path)
ths = []
for i in range(len(data_max)):
ths.append(thermometer.Thermometer(data_min[i], data_max[i], bits_encoding))
train_bin = []
test_bin = []
i = 0
for data in train_data:
train_bin.append(np.array([], dtype=bool))
t = 0
for v in data:
binarr = ths[t].binarize(v)
train_bin[i] = np.append(train_bin[i], binarr)
t += 1
i += 1
i = 0
for data in test_data:
test_bin.append(np.array([], dtype=bool))
t = 0
for v in data:
binarr = ths[t].binarize(v)
test_bin[i] = np.append(test_bin[i], binarr)
t += 1
i += 1
#print test_label
#Wisard
num_classes = 2
tuple_list = [2, 4, 8, 14, 16, 18, 20, 22, 24, 26, 28, 30]
acc_list = []
test_length = len(test_label)
entry_size = len(train_bin[0])
#print entry_size
for t in tuple_list:
wisard = wnn.Wisard(entry_size, t, num_classes)
wisard.train(train_bin, train_label)
rank_result = wisard.rank(test_bin)
num_hits = 0
for i in range(test_length):
if rank_result[i] == test_label[i]:
num_hits += 1
acc_list.append(float(num_hits)/float(test_length))
#Bloom Wisard
btuple_list = [2, 4, 8, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 40, 56]
bacc_list = []
#capacity = len(train_bin)
capacity = 10
print capacity
for t in btuple_list:
bwisard = wnn.BloomWisard(entry_size, t, num_classes, capacity)
bwisard.train(train_bin, train_label)
rank_result = bwisard.rank(test_bin)
num_hits = 0
for i in range(test_length):
if rank_result[i] == test_label[i]:
num_hits += 1
bacc_list.append(float(num_hits)/float(test_length))
print "Tuples=", tuple_list
print "Wisard Accuracy=", acc_list
print "Tuples=", btuple_list
print "BloomWisard Accuracy=",bacc_list
| 23.445652
| 95
| 0.658785
| 349
| 2,157
| 3.868195
| 0.240688
| 0.047407
| 0.013333
| 0.024444
| 0.395556
| 0.34963
| 0.34963
| 0.28
| 0.225185
| 0.225185
| 0
| 0.040756
| 0.215114
| 2,157
| 91
| 96
| 23.703297
| 0.756645
| 0.046824
| 0
| 0.3125
| 0
| 0
| 0.039063
| 0.01123
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.09375
| null | null | 0.078125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a7c48d84a538009f1d4846a3bf1ffec3626caf1
| 1,005
|
py
|
Python
|
Components/Align All Components.py
|
davidtahim/Glyphs-Scripts
|
5ed28805b5fe03c63d904ad2f79117844c22aa44
|
[
"Apache-2.0"
] | 1
|
2021-09-04T18:41:30.000Z
|
2021-09-04T18:41:30.000Z
|
Components/Align All Components.py
|
davidtahim/Glyphs-Scripts
|
5ed28805b5fe03c63d904ad2f79117844c22aa44
|
[
"Apache-2.0"
] | null | null | null |
Components/Align All Components.py
|
davidtahim/Glyphs-Scripts
|
5ed28805b5fe03c63d904ad2f79117844c22aa44
|
[
"Apache-2.0"
] | null | null | null |
#MenuTitle: Align All Components
# -*- coding: utf-8 -*-
__doc__="""
Fakes auto-alignment in glyphs that cannot be auto-aligned.
"""
import GlyphsApp
thisFont = Glyphs.font # frontmost font
thisFontMaster = thisFont.selectedFontMaster # active master
thisFontMasterID = thisFont.selectedFontMaster.id # active master
listOfSelectedLayers = thisFont.selectedLayers # active layers of selected glyphs
def process( thisLayer ):
advance = 0.0
for thisComponent in thisLayer.components:
thisComponent.position = NSPoint( advance, 0.0 )
advance += thisComponent.component.layers[thisFontMasterID].width
thisLayer.width = advance
thisFont.disableUpdateInterface() # suppresses UI updates in Font View
for thisLayer in listOfSelectedLayers:
thisGlyph = thisLayer.parent
print "Aligning components in:", thisGlyph.name
thisGlyph.beginUndo() # begin undo grouping
process( thisLayer )
thisGlyph.endUndo() # end undo grouping
thisFont.enableUpdateInterface() # re-enables UI updates in Font View
| 32.419355
| 81
| 0.78607
| 112
| 1,005
| 7.017857
| 0.5625
| 0.066158
| 0.022901
| 0.038168
| 0.048346
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005727
| 0.131343
| 1,005
| 30
| 82
| 33.5
| 0.894616
| 0.235821
| 0
| 0
| 0
| 0
| 0.110818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.045455
| null | null | 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a7cd64e2508df91e539f1a6f804bc5eb4b0ea83
| 12,372
|
py
|
Python
|
audio/audio_server.py
|
artigianitecnologici/marrtino_apps
|
b58bf4daa1d06db2f1c8a47be02b29948d41f48d
|
[
"BSD-4-Clause"
] | null | null | null |
audio/audio_server.py
|
artigianitecnologici/marrtino_apps
|
b58bf4daa1d06db2f1c8a47be02b29948d41f48d
|
[
"BSD-4-Clause"
] | null | null | null |
audio/audio_server.py
|
artigianitecnologici/marrtino_apps
|
b58bf4daa1d06db2f1c8a47be02b29948d41f48d
|
[
"BSD-4-Clause"
] | null | null | null |
# Only PCM 16 bit wav 44100 Hz - Use audacity or sox to convert audio files.
# WAV generation
# Synth
# sox -n --no-show-progress -G --channels 1 -r 44100 -b 16 -t wav bip.wav synth 0.25 sine 800
# sox -n --no-show-progress -G --channels 1 -r 44100 -b 16 -t wav bop.wav synth 0.25 sine 400
# Voices
# pico2wave -l "it-IT" -w start.wav "Bene! Si Parte!"
# Then convert wav files to to 44100 Hz
# Note: some initial sound may not be played.
# alsaaudio examples
# https://larsimmisch.github.io/pyalsaaudio/libalsaaudio.html
import threading
import time
import socket
import sys, os, platform
import re
import wave
import argparse
import rospy
use_sound_play = False
use_alsaaudio = True
try:
from sound_play.msg import SoundRequest
from sound_play.libsoundplay import SoundClient
except:
print('ROS package sound_play required.')
print('Install with: sudo apt-get install ros-kinetic-audio-common libasound2')
use_sound_play = False
#sys.exit(0)
try:
import sox
except:
print('sox required. Install with: pip install --user sox')
sys.exit(0)
try:
import alsaaudio
except:
print('alsaaudio required. Install with: pip install --user pyalsaaudio')
use_alsaaudio = False
#sys.exit(0)
from asr_server import ASRServer
SOUNDS_DIR = "sounds/" # dir with sounds
soundfile = None # sound file
tts_server = None
asr_server = None
def TTS_callback(in_data, frame_count, time_info, status):
global soundfile
if (soundfile==None):
return (None, True)
else:
data = soundfile.readframes(frame_count)
return (data, pyaudio.paContinue)
class TTSServer(threading.Thread):
def __init__(self, port, output_device):
global use_alsaaudio, use_sound_play
threading.Thread.__init__(self)
# Initialize audio player
self.streaming = False
self.output_device = output_device
self.soundhandle = None
m = platform.machine()
print "Machine type:" , m
if (m[0:3]=='arm'):
use_sound_play = False
if (use_sound_play):
os.system('roslaunch sound_play.launch &')
time.sleep(5)
rospy.init_node('sound_client', disable_signals=True)
use_alsaaudio = False
elif (use_alsaaudio):
self.init_alsaaudio()
else:
print('Cannot initializa audio interface')
# Create a TCP/IP socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.settimeout(3)
# Bind the socket to the port
server_address = ('', port)
self.sock.bind(server_address)
self.sock.listen(1)
print "TTS Server running on port ", port, " ..."
self.dorun = True
self.connection = None
# Dictionary of sounds
self.Sounds = {}
self.Sounds['bip'] = wave.open(SOUNDS_DIR+'bip.wav', 'rb')
self.idcache = 0
def init_alsaaudio(self):
print("Audio devices available")
pp = alsaaudio.pcms()
if (self.output_device=='sysdefault'):
# select proper sysdefault name
for l in pp:
print(' %s' %l)
if (l[0:10]=='sysdefault'):
print "choose ",l
self.output_device = l # choose default device
break
print("Audio device used: %s" %self.output_device)
self.aa_stream = None
retry = 3
while retry>0:
try:
self.aa_stream = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, self.output_device)
retry = 0
except Exception as e:
print(e)
retry -= 1
time.sleep(2)
if self.aa_stream == None:
retry = 3
while retry>0:
try:
self.output_device='default'
print("Audio device used: %s" %self.output_device)
self.aa_stream = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, self.output_device)
retry = 0
except Exception as e:
print(e)
retry -= 1
time.sleep(2)
self.audio_rate = 44100
self.periodsize = self.audio_rate / 8
if self.aa_stream != None:
self.aa_stream.setformat(alsaaudio.PCM_FORMAT_S16_LE)
self.aa_stream.setchannels(1)
self.aa_stream.setrate(self.audio_rate)
self.aa_stream.setperiodsize(self.periodsize)
def stop(self):
self.dorun = False
def connect(self):
connected = False
while (self.dorun and not connected):
try:
# print 'Waiting for a connection ...'
# Wait for a connection
self.connection, client_address = self.sock.accept()
self.connection.settimeout(3) # timeout when listening (exit with CTRL+C)
connected = True
print 'TTS Server Connection from ', client_address
except:
pass #print "Listen again ..."
def reply(self,mstr):
if (self.connection != None):
try:
mstr = mstr.encode('utf-8')
self.connection.send(mstr+'\n\r')
except:
print('Connection closed')
def setVolume(self,volperc): # volume in percentag [0-100]
cmdstr = 'amixer set PCM %d%%' %volperc
os.system(cmdstr)
def run(self):
global asr_server
if (use_sound_play and self.soundhandle == None):
self.soundhandle = SoundClient()
time.sleep(3)
self.setVolume(99) # set volume (99% = +3 dB)
#print('bip')
#self.play('bip')
#time.sleep(3)
self.say('Hello!', 'en')
self.say('Audio server is running.', 'en')
time.sleep(3)
while (self.dorun):
self.connect()
try:
# Receive the data in small chunks
while (self.dorun):
try:
data = self.connection.recv(320)
data = data.strip()
except socket.timeout:
data = "***"
except:
data = None
if (data!=None and data !="" and data!="***"):
if data!="ASR":
print 'TTS Received [%s]' % data
if (data.startswith('TTS')):
lang = 'en-US' # default language
strsay = data[4:]
if (data[3]=='['):
vd = re.split('\[|\]',data)
lang = vd[1]
strsay = vd[2]
self.say(strsay,lang)
self.reply('OK')
elif (data=="ASR"):
#print('asr request')
bh = asr_server.get_asr()
self.reply(bh)
if bh!='':
print('ASR sent [%s]' %bh)
elif (data.startswith('SOUND')):
self.play(data[6:]) # play this sound
self.reply('OK')
#print 'sending data back to the client'
#self.connection.sendall("OK")
else:
print('Message not understood: %s' %data)
self.reply('ERR')
elif (data == None or data==""):
break
finally:
print 'TTS Server Connection closed.'
# Clean up the connection
if (self.connection != None):
self.connection.close()
self.connection = None
self.say('Audio server has been closed.', 'en')
time.sleep(2)
self.aa_stream = None
def say(self, data, lang):
print 'Say ',data
if (use_sound_play):
voice = 'voice_kal_diphone'
volume = 1.0
print 'Saying: %s' % data
print 'Voice: %s' % voice
print 'Volume: %s' % volume
self.soundhandle.say(data, voice, volume)
rospy.sleep(3)
elif (use_alsaaudio):
cachefile = 'cache'+str(self.idcache)
self.idcache = (self.idcache+1)%10
tmpfile = "/tmp/cache.wav"
ofile = "%s%s.wav" %(SOUNDS_DIR, cachefile)
cmd = 'rm %s %s' %(tmpfile, ofile)
os.system(cmd)
if (lang=='en'):
lang = 'en-US'
elif (len(lang)==2):
lang = lang+'-'+lang.upper()
time.sleep(0.2)
cmd = 'pico2wave -l "%s" -w %s " , %s"' %(lang,tmpfile, data)
print cmd
os.system(cmd)
time.sleep(0.2)
# convert samplerate
tfm = sox.Transformer()
tfm.rate(samplerate=self.audio_rate)
tfm.build(tmpfile, ofile)
time.sleep(0.2)
self.play(cachefile)
else:
print('Cannot play audio. No infrastructure available.')
def play(self, name):
if (use_alsaaudio):
print('Playing %s ...' %name)
soundfile = None
i = 0
while (i<3): #((not name in self.Sounds) and (i<3)):
try:
soundfile = wave.open(SOUNDS_DIR+name+".wav", 'rb')
#self.Sounds[name] = soundfile
except:
print "File %s%s.wav not found." %(SOUNDS_DIR,name)
time.sleep(1)
i += 1
if (soundfile != None and use_alsaaudio): #(name in self.Sounds):
self.playwav_aa(soundfile)
print('Play completed.')
def playwav_aa(self, soundfile):
soundfile.setpos(0)
data = soundfile.readframes(self.periodsize)
while (len(data)>0):
# print('stream data %d' %(len(data)))
if self.aa_stream != None:
self.aa_stream.write(data)
data = soundfile.readframes(self.periodsize)
# def playwav_pa(self, sfile):
# global soundfile
# self.streaming = True
# self.stream = self.pa.open(format = 8, #self.pa.get_format_from_width(f.getsampwidth#()),
# channels = 1, #f.getnchannels(),
# rate = 44100, #f.getframerate(),
# output = True,
# stream_callback = TTS_callback,
# output_device_index = self.output_device)
# soundfile = sfile
# soundfile.setpos(0)
# self.stream.start_stream()
# while self.stream.is_active():
# time.sleep(1.0)
# self.stream.stop_stream()
# self.stream.close()
# self.streaming = False
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='audio_server')
parser.add_argument('-ttsport', type=int, help='TTS server port [default: 9001]', default=9001)
parser.add_argument('-asrport', type=int, help='ASR server port [default: 9002]', default=9002)
parser.add_argument('-device', type=str, help='audio device [default: \'sysdefault\']', default='sysdefault')
args = parser.parse_args()
tts_server = TTSServer(args.ttsport,args.device)
asr_server = ASRServer(args.asrport)
tts_server.start()
time.sleep(1)
asr_server.start()
run = True
while (run):
try:
time.sleep(3)
#if (not tts_server.streaming):
# cmd = 'play -n --no-show-progress -r 44100 -c1 synth 0.1 sine 50 vol 0.01' # keep sound alive
# os.system(cmd)
except KeyboardInterrupt:
print "Exit"
run = False
tts_server.stop()
asr_server.stop()
sys.exit(0)
| 30.93
| 116
| 0.516246
| 1,364
| 12,372
| 4.585777
| 0.246334
| 0.020144
| 0.023022
| 0.01279
| 0.119265
| 0.096882
| 0.086331
| 0.086331
| 0.076739
| 0.076739
| 0
| 0.020704
| 0.375364
| 12,372
| 399
| 117
| 31.007519
| 0.78869
| 0.165535
| 0
| 0.312977
| 0
| 0
| 0.106993
| 0.002341
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.003817
| 0.049618
| null | null | 0.114504
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a85751a815d71753d3e2aaa3ccbd06b815ba219
| 5,200
|
py
|
Python
|
bat_train/evaluate.py
|
bgotthold-usgs/batdetect
|
0d4a70f1cda9f6104f6f785f0d953f802fddf0f1
|
[
"BSD-Source-Code"
] | 59
|
2018-03-05T08:58:59.000Z
|
2022-03-19T17:33:14.000Z
|
bat_train/evaluate.py
|
bgotthold-usgs/batdetect
|
0d4a70f1cda9f6104f6f785f0d953f802fddf0f1
|
[
"BSD-Source-Code"
] | 11
|
2018-03-16T21:46:51.000Z
|
2021-12-14T16:07:55.000Z
|
bat_train/evaluate.py
|
bgotthold-usgs/batdetect
|
0d4a70f1cda9f6104f6f785f0d953f802fddf0f1
|
[
"BSD-Source-Code"
] | 24
|
2018-03-15T14:48:08.000Z
|
2022-01-09T01:12:51.000Z
|
import numpy as np
from sklearn.metrics import roc_curve, auc
def compute_error_auc(op_str, gt, pred, prob):
# classification error
pred_int = (pred > prob).astype(np.int)
class_acc = (pred_int == gt).mean() * 100.0
# ROC - area under curve
fpr, tpr, thresholds = roc_curve(gt, pred)
roc_auc = auc(fpr, tpr)
print op_str, ', class acc = %.3f, ROC AUC = %.3f' % (class_acc, roc_auc)
#return class_acc, roc_auc
def calc_average_precision(recall, precision):
precision[np.isnan(precision)] = 0
recall[np.isnan(recall)] = 0
# pascal'12 way
mprec = np.hstack((0, precision, 0))
mrec = np.hstack((0, recall, 1))
for ii in range(mprec.shape[0]-2, -1,-1):
mprec[ii] = np.maximum(mprec[ii], mprec[ii+1])
inds = np.where(np.not_equal(mrec[1:], mrec[:-1]))[0]+1
ave_prec = ((mrec[inds] - mrec[inds-1])*mprec[inds]).sum()
return ave_prec
def remove_end_preds(nms_pos_o, nms_prob_o, gt_pos_o, durations, win_size):
# this filters out predictions and gt that are close to the end
# this is a bit messy because of the shapes of gt_pos_o
nms_pos = []
nms_prob = []
gt_pos = []
for ii in range(len(nms_pos_o)):
valid_time = durations[ii] - win_size
gt_cur = gt_pos_o[ii]
if gt_cur.shape[0] > 0:
gt_pos.append(gt_cur[:, 0][gt_cur[:, 0] < valid_time][..., np.newaxis])
else:
gt_pos.append(gt_cur)
valid_preds = nms_pos_o[ii] < valid_time
nms_pos.append(nms_pos_o[ii][valid_preds])
nms_prob.append(nms_prob_o[ii][valid_preds, 0][..., np.newaxis])
return nms_pos, nms_prob, gt_pos
def prec_recall_1d(nms_pos_o, nms_prob_o, gt_pos_o, durations, detection_overlap, win_size, remove_eof=True):
"""
nms_pos, nms_prob, and gt_pos are lists of numpy arrays specifying detection
position, detection probability and GT position.
Each list entry is a different file.
Each entry in nms_pos is an array of length num_entries. For nms_prob and
gt_pos its an array of size (num_entries, 1).
durations is a array of the length of the number of files with each entry
containing that file length in seconds.
detection_overlap determines if a prediction is counted as correct or not.
win_size is used to ignore predictions and ground truth at the end of an
audio file.
returns
precision: fraction of retrieved instances that are relevant.
recall: fraction of relevant instances that are retrieved.
"""
if remove_eof:
# filter out the detections in both ground truth and predictions that are too
# close to the end of the file - dont count them during eval
nms_pos, nms_prob, gt_pos = remove_end_preds(nms_pos_o, nms_prob_o, gt_pos_o, durations, win_size)
else:
nms_pos = nms_pos_o
nms_prob = nms_prob_o
gt_pos = gt_pos_o
# loop through each file
true_pos = [] # correctly predicts the ground truth
false_pos = [] # says there is a detection but isn't
for ii in range(len(nms_pos)):
num_preds = nms_pos[ii].shape[0]
if num_preds > 0: # check to make sure it contains something
num_gt = gt_pos[ii].shape[0]
# for each set of predictions label them as true positive or false positive (i.e. 1-tp)
tp = np.zeros(num_preds)
distance_to_gt = np.abs(gt_pos[ii].ravel()-nms_pos[ii].ravel()[:, np.newaxis])
within_overlap = (distance_to_gt <= detection_overlap)
# remove duplicate detections - assign to valid detection with highest prob
for jj in range(num_gt):
inds = np.where(within_overlap[:, jj])[0] # get the indices of all valid predictions
if inds.shape[0] > 0:
max_prob = np.argmax(nms_prob[ii][inds])
selected_pred = inds[max_prob]
within_overlap[selected_pred, :] = False
tp[selected_pred] = 1 # set as true positives
true_pos.append(tp)
false_pos.append(1 - tp)
# calc precision and recall - sort confidence in descending order
# PASCAL style
conf = np.concatenate(nms_prob)[:, 0]
num_gt = np.concatenate(gt_pos).shape[0]
inds = np.argsort(conf)[::-1]
true_pos_cat = np.concatenate(true_pos)[inds].astype(float)
false_pos_cat = np.concatenate(false_pos)[inds].astype(float) # i.e. 1-true_pos_cat
if (conf == conf[0]).sum() == conf.shape[0]:
# all the probability values are the same therefore we will not sweep
# the curve and instead will return a single value
true_pos_sum = true_pos_cat.sum()
false_pos_sum = false_pos_cat.sum()
recall = np.asarray([true_pos_sum / float(num_gt)])
precision = np.asarray([(true_pos_sum / (false_pos_sum + true_pos_sum))])
elif inds.shape[0] > 0:
# otherwise produce a list of values
true_pos_cum = np.cumsum(true_pos_cat)
false_pos_cum = np.cumsum(false_pos_cat)
recall = true_pos_cum / float(num_gt)
precision = (true_pos_cum / (false_pos_cum + true_pos_cum))
return precision, recall
| 38.80597
| 109
| 0.649038
| 814
| 5,200
| 3.927518
| 0.255528
| 0.031905
| 0.015327
| 0.012512
| 0.1198
| 0.071317
| 0.054426
| 0.041289
| 0.041289
| 0.041289
| 0
| 0.012323
| 0.250962
| 5,200
| 133
| 110
| 39.097744
| 0.808472
| 0.180962
| 0
| 0.027027
| 0
| 0
| 0.009756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.027027
| null | null | 0.013514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a86094f8b6e8a0e12d48278a3971b48591f4ec2
| 27,399
|
py
|
Python
|
azure-mgmt/tests/test_mgmt_network.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 2
|
2020-07-29T14:22:17.000Z
|
2020-11-06T18:47:40.000Z
|
azure-mgmt/tests/test_mgmt_network.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 1
|
2016-08-01T07:37:04.000Z
|
2016-08-01T07:37:04.000Z
|
azure-mgmt/tests/test_mgmt_network.py
|
SUSE/azure-sdk-for-python
|
324f99d26dd6f4ee9793b9bf1d4d5f928e4b6c2f
|
[
"MIT"
] | 1
|
2020-12-12T21:04:41.000Z
|
2020-12-12T21:04:41.000Z
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import unittest
import azure.mgmt.network.models
from testutils.common_recordingtestcase import record
from tests.mgmt_testcase import HttpStatusCode, AzureMgmtTestCase
class MgmtNetworkTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtNetworkTest, self).setUp()
self.network_client = self.create_mgmt_client(
azure.mgmt.network.NetworkManagementClient
)
if not self.is_playback():
self.create_resource_group()
@record
def test_network_interface_card(self):
vnet_name = self.get_resource_name('pyvnet')
subnet_name = self.get_resource_name('pysubnet')
nic_name = self.get_resource_name('pynic')
# Create VNet
async_vnet_creation = self.network_client.virtual_networks.create_or_update(
self.group_name,
vnet_name,
{
'location': self.region,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
}
)
async_vnet_creation.wait()
# Create Subnet
async_subnet_creation = self.network_client.subnets.create_or_update(
self.group_name,
vnet_name,
subnet_name,
{'address_prefix': '10.0.0.0/24'}
)
subnet_info = async_subnet_creation.result()
# Create NIC
async_nic_creation = self.network_client.network_interfaces.create_or_update(
self.group_name,
nic_name,
{
'location': self.region,
'ip_configurations': [{
'name': 'MyIpConfig',
'subnet': {
'id': subnet_info.id
}
}]
}
)
nic_info = async_nic_creation.result()
nic_info = self.network_client.network_interfaces.get(
self.group_name,
nic_info.name
)
nics = list(self.network_client.network_interfaces.list(
self.group_name
))
self.assertEqual(len(nics), 1)
nics = list(self.network_client.network_interfaces.list_all())
self.assertGreater(len(nics), 0)
async_delete = self.network_client.network_interfaces.delete(
self.group_name,
nic_info.name
)
async_delete.wait()
@record
def test_load_balancers(self):
public_ip_name = self.get_resource_name('pyipname')
frontend_ip_name = self.get_resource_name('pyfipname')
addr_pool_name = self.get_resource_name('pyapname')
probe_name = self.get_resource_name('pyprobename')
lb_name = self.get_resource_name('pylbname')
front_end_id = ('/subscriptions/{}'
'/resourceGroups/{}'
'/providers/Microsoft.Network'
'/loadBalancers/{}'
'/frontendIPConfigurations/{}').format(
self.settings.SUBSCRIPTION_ID,
self.group_name,
lb_name,
frontend_ip_name
)
back_end_id = ('/subscriptions/{}'
'/resourceGroups/{}'
'/providers/Microsoft.Network'
'/loadBalancers/{}'
'/backendAddressPools/{}').format(
self.settings.SUBSCRIPTION_ID,
self.group_name,
lb_name,
addr_pool_name
)
probe_id = ('/subscriptions/{}'
'/resourceGroups/{}'
'/providers/Microsoft.Network'
'/loadBalancers/{}'
'/probes/{}').format(
self.settings.SUBSCRIPTION_ID,
self.group_name,
lb_name,
probe_name
)
# Create PublicIP
public_ip_parameters = {
'location': self.region,
'public_ip_allocation_method': 'static',
'idle_timeout_in_minutes': 4
}
async_publicip_creation = self.network_client.public_ip_addresses.create_or_update(
self.group_name,
public_ip_name,
public_ip_parameters
)
public_ip_info = async_publicip_creation.result()
# Building a FrontEndIpPool
frontend_ip_configurations = [{
'name': frontend_ip_name,
'private_ip_allocation_method': 'Dynamic',
'public_ip_address': {
'id': public_ip_info.id
}
}]
# Building a BackEnd adress pool
backend_address_pools = [{
'name': addr_pool_name
}]
# Building a HealthProbe
probes = [{
'name': probe_name,
'protocol': 'Http',
'port': 80,
'interval_in_seconds': 15,
'number_of_probes': 4,
'request_path': 'healthprobe.aspx'
}]
# Building a LoadBalancer rule
load_balancing_rules = [{
'name': 'azure-sample-lb-rule',
'protocol': 'tcp',
'frontend_port': 80,
'backend_port': 80,
'idle_timeout_in_minutes': 4,
'enable_floating_ip': False,
'load_distribution': 'Default',
'frontend_ip_configuration': {
'id': front_end_id
},
'backend_address_pool': {
'id': back_end_id
},
'probe': {
'id': probe_id
}
}]
# Building InboundNATRule1
inbound_nat_rules = [{
'name': 'azure-sample-netrule1',
'protocol': 'tcp',
'frontend_port': 21,
'backend_port': 22,
'enable_floating_ip': False,
'idle_timeout_in_minutes': 4,
'frontend_ip_configuration': {
'id': front_end_id
}
}]
# Building InboundNATRule2
inbound_nat_rules.append({
'name': 'azure-sample-netrule2',
'protocol': 'tcp',
'frontend_port': 23,
'backend_port': 22,
'enable_floating_ip': False,
'idle_timeout_in_minutes': 4,
'frontend_ip_configuration': {
'id': front_end_id
}
})
# Creating Load Balancer
lb_async_creation = self.network_client.load_balancers.create_or_update(
self.group_name,
lb_name,
{
'location': self.region,
'frontend_ip_configurations': frontend_ip_configurations,
'backend_address_pools': backend_address_pools,
'probes': probes,
'load_balancing_rules': load_balancing_rules,
'inbound_nat_rules' :inbound_nat_rules
}
)
lb_info = lb_async_creation.result()
# Get it
lb_info = self.network_client.load_balancers.get(
self.group_name,
lb_name
)
# List all
lbs = self.network_client.load_balancers.list_all()
lbs = list(lbs)
self.assertGreater(len(lbs), 0)
# List RG
lbs = self.network_client.load_balancers.list(self.group_name)
lbs = list(lbs)
self.assertGreater(len(lbs), 0)
# Delete
async_lb_delete = self.network_client.load_balancers.delete(
self.group_name,
lb_name
)
async_lb_delete.wait()
@record
def test_public_ip_addresses(self):
public_ip_name = self.get_resource_name('pyipname')
params_create = azure.mgmt.network.models.PublicIPAddress(
location=self.region,
public_ip_allocation_method=azure.mgmt.network.models.IPAllocationMethod.dynamic,
tags={
'key': 'value',
},
)
result_create = self.network_client.public_ip_addresses.create_or_update(
self.group_name,
public_ip_name,
params_create,
)
result_create.wait() # AzureOperationPoller
#self.assertEqual(result_create.status_code, HttpStatusCode.OK)
result_get = self.network_client.public_ip_addresses.get(
self.group_name,
public_ip_name,
)
#self.assertEqual(result_get.status_code, HttpStatusCode.OK)
self.assertEqual(result_get.location, self.region)
self.assertEqual(result_get.tags['key'], 'value')
result_list = self.network_client.public_ip_addresses.list(self.group_name)
#self.assertEqual(result_list.status_code, HttpStatusCode.OK)
result_list = list(result_list)
self.assertEqual(len(result_list), 1)
result_list_all = self.network_client.public_ip_addresses.list_all()
#self.assertEqual(result_list_all.status_code, HttpStatusCode.OK)
result_list_all = list(result_list_all)
self.assertGreater(len(result_list_all), 0)
result_delete = self.network_client.public_ip_addresses.delete(
self.group_name,
public_ip_name,
)
result_delete.wait() # AzureOperationPoller
#self.assertEqual(result_delete.status_code, HttpStatusCode.OK)
result_list = self.network_client.public_ip_addresses.list(self.group_name)
#self.assertEqual(result_list.status_code, HttpStatusCode.OK)
result_list = list(result_list)
self.assertEqual(len(result_list), 0)
@record
def test_virtual_networks(self):
network_name = self.get_resource_name('pyvnet')
subnet1_name = self.get_resource_name('pyvnetsubnetone')
subnet2_name = self.get_resource_name('pyvnetsubnettwo')
params_create = azure.mgmt.network.models.VirtualNetwork(
location=self.region,
address_space=azure.mgmt.network.models.AddressSpace(
address_prefixes=[
'10.0.0.0/16',
],
),
dhcp_options=azure.mgmt.network.models.DhcpOptions(
dns_servers=[
'10.1.1.1',
'10.1.2.4',
],
),
subnets=[
azure.mgmt.network.models.Subnet(
name=subnet1_name,
address_prefix='10.0.1.0/24',
),
azure.mgmt.network.models.Subnet(
name=subnet2_name,
address_prefix='10.0.2.0/24',
),
],
)
result_create = self.network_client.virtual_networks.create_or_update(
self.group_name,
network_name,
params_create,
)
vnet = result_create.result()
vnet = self.network_client.virtual_networks.get(
self.group_name,
vnet.name,
)
ip_availability = self.network_client.virtual_networks.check_ip_address_availability(
self.group_name,
vnet.name,
'10.0.1.35' # Should be available since new VNet sor Subnet 1
)
self.assertTrue(ip_availability.available)
result_list = list(self.network_client.virtual_networks.list(
self.group_name,
))
self.assertEqual(len(result_list), 1)
result_list_all = list(self.network_client.virtual_networks.list_all())
async_delete = self.network_client.virtual_networks.delete(
self.group_name,
network_name,
)
async_delete.wait()
@record
def test_dns_availability(self):
result_check = self.network_client.check_dns_name_availability(
self.region,
'pydomain',
)
#self.assertEqual(result_check.status_code, HttpStatusCode.OK)
self.assertTrue(result_check)
@record
def test_subnets(self):
network_name = self.get_resource_name('pysubnet')
subnet1_name = self.get_resource_name('pysubnetone')
subnet2_name = self.get_resource_name('pysubnettwo')
params_create = azure.mgmt.network.models.VirtualNetwork(
location=self.region,
address_space=azure.mgmt.network.models.AddressSpace(
address_prefixes=[
'10.0.0.0/16',
],
),
dhcp_options=azure.mgmt.network.models.DhcpOptions(
dns_servers=[
'10.1.1.1',
'10.1.2.4',
],
),
subnets=[
azure.mgmt.network.models.Subnet(
name=subnet1_name,
address_prefix='10.0.1.0/24',
),
],
)
result_create = self.network_client.virtual_networks.create_or_update(
self.group_name,
network_name,
params_create,
)
result_create.wait() # AzureOperationPoller
params_create = azure.mgmt.network.models.Subnet(
name=subnet2_name,
address_prefix='10.0.2.0/24',
)
result_create = self.network_client.subnets.create_or_update(
self.group_name,
network_name,
subnet2_name,
params_create,
)
result_create.wait() # AzureOperationPoller
result_get = self.network_client.virtual_networks.get(
self.group_name,
network_name,
)
self.assertEqual(len(result_get.subnets), 2)
result_get = self.network_client.subnets.get(
self.group_name,
network_name,
subnet2_name,
)
result_list = self.network_client.subnets.list(
self.group_name,
network_name,
)
subnets = list(result_list)
result_delete = self.network_client.subnets.delete(
self.group_name,
network_name,
subnet2_name,
)
result_delete.wait()
@record
def test_network_security_groups(self):
security_group_name = self.get_resource_name('pysecgroup')
security_rule_name = self.get_resource_name('pysecgrouprule')
params_create = azure.mgmt.network.models.NetworkSecurityGroup(
location=self.region,
security_rules=[
azure.mgmt.network.models.SecurityRule(
name=security_rule_name,
access=azure.mgmt.network.models.SecurityRuleAccess.allow,
description='Test security rule',
destination_address_prefix='*',
destination_port_range='123-3500',
direction=azure.mgmt.network.models.SecurityRuleDirection.inbound,
priority=500,
protocol=azure.mgmt.network.models.SecurityRuleProtocol.tcp,
source_address_prefix='*',
source_port_range='655',
),
],
)
result_create = self.network_client.network_security_groups.create_or_update(
self.group_name,
security_group_name,
params_create,
)
result_create.wait() # AzureOperationPoller
result_get = self.network_client.network_security_groups.get(
self.group_name,
security_group_name,
)
result_list = list(self.network_client.network_security_groups.list(
self.group_name,
))
self.assertEqual(len(result_list), 1)
result_list_all = list(self.network_client.network_security_groups.list_all())
# Security Rules
new_security_rule_name = self.get_resource_name('pynewrule')
async_security_rule = self.network_client.security_rules.create_or_update(
self.group_name,
security_group_name,
new_security_rule_name,
{
'access':azure.mgmt.network.models.SecurityRuleAccess.allow,
'description':'New Test security rule',
'destination_address_prefix':'*',
'destination_port_range':'123-3500',
'direction':azure.mgmt.network.models.SecurityRuleDirection.outbound,
'priority':400,
'protocol':azure.mgmt.network.models.SecurityRuleProtocol.tcp,
'source_address_prefix':'*',
'source_port_range':'655',
}
)
security_rule = async_security_rule.result()
security_rule = self.network_client.security_rules.get(
self.group_name,
security_group_name,
security_rule.name
)
self.assertEqual(security_rule.name, new_security_rule_name)
new_security_rules = list(self.network_client.security_rules.list(
self.group_name,
security_group_name
))
self.assertEqual(len(new_security_rules), 2)
result_delete = self.network_client.security_rules.delete(
self.group_name,
security_group_name,
new_security_rule_name
)
result_delete.wait()
# Delete NSG
result_delete = self.network_client.network_security_groups.delete(
self.group_name,
security_group_name,
)
result_delete.wait()
@record
def test_routes(self):
route_table_name = self.get_resource_name('pyroutetable')
route_name = self.get_resource_name('pyroute')
async_route_table = self.network_client.route_tables.create_or_update(
self.group_name,
route_table_name,
{'location': self.region}
)
route_table = async_route_table.result()
route_table = self.network_client.route_tables.get(
self.group_name,
route_table.name
)
self.assertEqual(route_table.name, route_table_name)
route_tables = list(self.network_client.route_tables.list(
self.group_name
))
self.assertEqual(len(route_tables), 1)
route_tables = list(self.network_client.route_tables.list_all())
self.assertGreater(len(route_tables), 0)
async_route = self.network_client.routes.create_or_update(
self.group_name,
route_table.name,
route_name,
{
'address_prefix': '10.1.0.0/16',
'next_hop_type': 'None'
}
)
route = async_route.result()
route = self.network_client.routes.get(
self.group_name,
route_table.name,
route.name
)
self.assertEqual(route.name, route_name)
routes = list(self.network_client.routes.list(
self.group_name,
route_table.name
))
self.assertEqual(len(routes), 1)
async_route_delete = self.network_client.routes.delete(
self.group_name,
route_table.name,
route.name
)
async_route_delete.wait()
async_route_table_delete = self.network_client.route_tables.delete(
self.group_name,
route_table_name
)
async_route_table_delete.wait()
@record
def test_usages(self):
usages = list(self.network_client.usages.list(self.region))
self.assertGreater(len(usages), 1)
self.assertTrue(all(hasattr(u, 'name') for u in usages))
@record
def test_express_route_service_providers(self):
ersp = list(self.network_client.express_route_service_providers.list())
self.assertGreater(len(ersp), 0)
self.assertTrue(all(hasattr(u, 'bandwidths_offered') for u in ersp))
@record
def test_express_route_circuit(self):
express_route_name = self.get_resource_name('pyexpressroute')
async_express_route = self.network_client.express_route_circuits.create_or_update(
self.group_name,
express_route_name,
{
"location": self.region,
"sku": {
"name": "Standard_MeteredData",
"tier": "Standard",
"family": "MeteredData"
},
"service_provider_properties": {
"service_provider_name": "Comcast",
"peering_location": "Chicago",
"bandwidth_in_mbps": 100
}
}
)
express_route = async_express_route.result()
express_route = self.network_client.express_route_circuits.get(
self.group_name,
express_route_name
)
routes = list(self.network_client.express_route_circuits.list(
self.group_name
))
self.assertEqual(len(routes), 1)
routes = list(self.network_client.express_route_circuits.list_all())
self.assertGreater(len(routes), 0)
stats = self.network_client.express_route_circuits.get_stats(
self.group_name,
express_route_name
)
self.assertIsNotNone(stats)
async_peering = self.network_client.express_route_circuit_peerings.create_or_update(
self.group_name,
express_route_name,
'AzurePublicPeering',
{
"peering_type": "AzurePublicPeering",
"peer_asn": 100,
"primary_peer_address_prefix": "192.168.1.0/30",
"secondary_peer_address_prefix": "192.168.2.0/30",
"vlan_id": 200,
}
)
peering = async_peering.result()
peering = self.network_client.express_route_circuit_peerings.get(
self.group_name,
express_route_name,
'AzurePublicPeering'
)
peerings = list(self.network_client.express_route_circuit_peerings.list(
self.group_name,
express_route_name
))
self.assertEqual(len(peerings), 1)
stats = self.network_client.express_route_circuits.get_peering_stats(
self.group_name,
express_route_name,
'AzurePublicPeering'
)
self.assertIsNotNone(stats)
auth_name = self.get_resource_name('pyauth')
async_auth = self.network_client.express_route_circuit_authorizations.create_or_update(
self.group_name,
express_route_name,
auth_name,
{}
)
auth = async_auth.result()
auth = self.network_client.express_route_circuit_authorizations.get(
self.group_name,
express_route_name,
auth_name
)
auths = list(self.network_client.express_route_circuit_authorizations.list(
self.group_name,
express_route_name
))
self.assertEqual(len(auths), 1)
async_auth = self.network_client.express_route_circuit_authorizations.delete(
self.group_name,
express_route_name,
auth_name
)
async_auth.wait()
async_peering = self.network_client.express_route_circuit_peerings.delete(
self.group_name,
express_route_name,
'AzurePublicPeering'
)
async_peering.wait()
async_express_route = self.network_client.express_route_circuits.delete(
self.group_name,
express_route_name
)
async_express_route.wait()
@record
def test_virtual_network_gateway_operations(self):
# https://docs.microsoft.com/en-us/azure/vpn-gateway/vpn-gateway-howto-site-to-site-resource-manager-portal
vnet_name = self.get_resource_name('pyvirtnet')
fe_name = self.get_resource_name('pysubnetfe')
be_name = self.get_resource_name('pysubnetbe')
gateway_name = self.get_resource_name('pysubnetga')
# Create VNet
async_vnet_creation = self.network_client.virtual_networks.create_or_update(
self.group_name,
vnet_name,
{
'location': self.region,
'address_space': {
'address_prefixes': [
'10.11.0.0/16',
'10.12.0.0/16'
]
}
}
)
async_vnet_creation.wait()
# Create Front End Subnet
async_subnet_creation = self.network_client.subnets.create_or_update(
self.group_name,
vnet_name,
fe_name,
{'address_prefix': '10.11.0.0/24'}
)
fe_subnet_info = async_subnet_creation.result()
# Create Back End Subnet
async_subnet_creation = self.network_client.subnets.create_or_update(
self.group_name,
vnet_name,
be_name,
{'address_prefix': '10.12.0.0/24'}
)
be_subnet_info = async_subnet_creation.result()
# Create Gateway Subnet
async_subnet_creation = self.network_client.subnets.create_or_update(
self.group_name,
vnet_name,
'GatewaySubnet',
{'address_prefix': '10.12.255.0/27'}
)
gateway_subnet_info = async_subnet_creation.result()
# Public IP Address
public_ip_name = self.get_resource_name('pyipname')
params_create = azure.mgmt.network.models.PublicIPAddress(
location=self.region,
public_ip_allocation_method=azure.mgmt.network.models.IPAllocationMethod.dynamic,
tags={
'key': 'value',
},
)
result_create = self.network_client.public_ip_addresses.create_or_update(
self.group_name,
public_ip_name,
params_create,
)
public_ip_address = result_create.result()
# Gateway itself
vng_name = self.get_resource_name('pyvng')
gw_params = {
'location': self.region,
'gateway_type': 'VPN',
'vpn_type': 'RouteBased',
'enable_bgp': False,
'sku': {
'tier': 'Standard',
'capacity': 2,
'name': 'Standard'},
'ip_configurations':[{
'name': 'default',
'private_ip_allocation_method': 'Dynamic',
'subnet': {
'id': gateway_subnet_info.id
},
'public_ip_address': {
'id': public_ip_address.id
}
}],
}
async_create = self.network_client.virtual_network_gateways.create_or_update(
self.group_name,
vng_name,
gw_params
)
vng = async_create.result()
self.assertEquals(vng.name, vng_name)
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 33.454212
| 115
| 0.567904
| 2,714
| 27,399
| 5.389831
| 0.119381
| 0.057151
| 0.085999
| 0.036369
| 0.688474
| 0.577864
| 0.510938
| 0.401217
| 0.331966
| 0.267774
| 0
| 0.013431
| 0.334209
| 27,399
| 818
| 116
| 33.49511
| 0.7884
| 0.05409
| 0
| 0.440703
| 0
| 0
| 0.097832
| 0.025009
| 0
| 0
| 0
| 0
| 0.046852
| 0
| null | null | 0
| 0.005857
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a9abefb5c7f43f4b3586ebf44ef35bd05d5118a
| 1,223
|
py
|
Python
|
redisSeed.py
|
bigmacd/miscPython
|
ec473c724be54241e369a1bdb0f739d2b0ed02ee
|
[
"BSD-3-Clause"
] | null | null | null |
redisSeed.py
|
bigmacd/miscPython
|
ec473c724be54241e369a1bdb0f739d2b0ed02ee
|
[
"BSD-3-Clause"
] | null | null | null |
redisSeed.py
|
bigmacd/miscPython
|
ec473c724be54241e369a1bdb0f739d2b0ed02ee
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import redis
import json
import argparse
""" Follows the StackExchange best practice for creating a work queue.
Basically push a task and publish a message that a task is there."""
def PushTask(client, queue, task, topic):
client.lpush(queue, task)
client.publish(topic, queue)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-q", "--queue", help="The queue from which workers will grab tasks")
parser.add_argument("-t", "--task", help="The task data")
parser.add_argument("-o", "--topic", help="The topic to which workers are subscribed")
parser.add_argument("-s", "--server", help="redis server host or IP")
parser.add_argument("-p",
"--port",
help="redis server port (default is 6379)",
type=int,
default=6379)
args = parser.parse_args()
if args.queue is None
or args.task is None
or args.topic is None
or args.server is None:
parser.print_help()
else:
client=redis.StrictRedis(host=args.server, args.port)
PushTask(client, args.queue, args.task, args.topic)
| 34.942857
| 95
| 0.614881
| 158
| 1,223
| 4.664557
| 0.424051
| 0.061058
| 0.115332
| 0.048847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008959
| 0.269828
| 1,223
| 34
| 96
| 35.970588
| 0.816349
| 0
| 0
| 0
| 0
| 0
| 0.192771
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.148148
| null | null | 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0a9e0852ce066b6a61ac5cfb9625f8879b66f594
| 536
|
py
|
Python
|
serveur/serveurDroit.py
|
PL4typus/SysNetProject17
|
283c127a3363876360bc52b54eae939c6104c6b4
|
[
"MIT"
] | null | null | null |
serveur/serveurDroit.py
|
PL4typus/SysNetProject17
|
283c127a3363876360bc52b54eae939c6104c6b4
|
[
"MIT"
] | null | null | null |
serveur/serveurDroit.py
|
PL4typus/SysNetProject17
|
283c127a3363876360bc52b54eae939c6104c6b4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import socket,sys,os
TCP_IP = '127.0.0.1'
TCP_PORT = 6262
BUFFER_SIZE = 1024
s= socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((TCP_IP,TCP_PORT))
s.listen(5)
conn, addr = s.accept()
print('Connection entrante :', addr)
data = conn.recv(BUFFER_SIZE)
if data == "m" :
os.popen("chmod +w $PWD")
else :
os.popen("chmod -w $PWD")
while 1 :
data = conn.recv(BUFFER_SIZE)
print data
if data == "1":
break
rep = os.popen(data+" 2>&1")
conn.send("reponse : \n"+rep.read())
conn.close()
| 14.486486
| 51
| 0.641791
| 91
| 536
| 3.681319
| 0.538462
| 0.089552
| 0.071642
| 0.107463
| 0.226866
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042889
| 0.173507
| 536
| 36
| 52
| 14.888889
| 0.713318
| 0.029851
| 0
| 0.090909
| 0
| 0
| 0.144509
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.045455
| null | null | 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0aab9dbbc4006ac10614eb6e13f1101929dde5bc
| 5,242
|
py
|
Python
|
objectstoreSiteMover.py
|
nikmagini/pilot
|
1c84fcf6f7e43b669d2357326cdbe06382ac829f
|
[
"Apache-2.0"
] | 13
|
2015-02-19T17:17:10.000Z
|
2021-12-22T06:48:02.000Z
|
objectstoreSiteMover.py
|
nikmagini/pilot
|
1c84fcf6f7e43b669d2357326cdbe06382ac829f
|
[
"Apache-2.0"
] | 85
|
2015-01-06T15:01:51.000Z
|
2018-11-29T09:03:35.000Z
|
objectstoreSiteMover.py
|
nikmagini/pilot
|
1c84fcf6f7e43b669d2357326cdbe06382ac829f
|
[
"Apache-2.0"
] | 22
|
2015-06-09T12:08:29.000Z
|
2018-11-20T10:07:01.000Z
|
#!/usr/bin/env python
# Copyright European Organization for Nuclear Research (CERN)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Wen Guan, <wguan@cern.ch>, 2014
# objectstoreSiteMover.py
import os
from configSiteMover import config_sm
import SiteMover
from xrootdObjectstoreSiteMover import xrootdObjectstoreSiteMover
from S3ObjectstoreSiteMover import S3ObjectstoreSiteMover
class objectstoreSiteMover(SiteMover.SiteMover):
"""
ObjectstoreSiteMover
It uses the url to decide which ObjectstoreSiteMover implementation to be used.
"""
copyCommand = "objectstore"
checksum_command = "adler32"
def __init__(self, setup_path='', useTimerCommand=True, *args, **kwrds):
self._setup = setup_path
self._useTimerCommand = useTimerCommand
def get_data(self, gpfn, lfn, path, fsize=0, fchecksum=0, guid=0, **pdict):
gpfn = gpfn.replace("s3+rucio", "s3")
if gpfn.startswith("root:"):
sitemover = xrootdObjectstoreSiteMover(self.getSetup())
return sitemover.get_data(gpfn, lfn, path, fsize, fchecksum, guid, **pdict)
if gpfn.startswith("s3:"):
sitemover = S3ObjectstoreSiteMover(self.getSetup(), self._useTimerCommand)
return sitemover.get_data(gpfn, lfn, path, fsize, fchecksum, guid, **pdict)
return -1, "No objectstore sitemover found for this scheme(%s)" % gpfn
def put_data(self, source, destination, fsize=0, fchecksum=0, **pdict):
# Get input parameters from pdict
lfn = pdict.get('lfn', '')
logPath = pdict.get('logPath', '')
if logPath != "":
surl = logPath
else:
surl = os.path.join(destination, lfn)
surl = surl.replace("s3+rucio", "s3")
if surl.startswith("root:"):
sitemover = xrootdObjectstoreSiteMover(self.getSetup())
return sitemover. put_data(source, destination, fsize, fchecksum, **pdict)
if surl.startswith("s3:"):
sitemover = S3ObjectstoreSiteMover(self.getSetup(), self._useTimerCommand)
return sitemover. put_data(source, surl, fsize, fchecksum, **pdict)
return -1, "No objectstore sitemover found for this scheme(%s)" % destination, destination, fsize, fchecksum, config_sm.ARCH_DEFAULT
if __name__ == '__main__':
os.environ['PilotHomeDir'] = os.getcwd()
from SiteInformation import SiteInformation
s1 = SiteInformation()
#s1.getObjectstoresField("os_access_key", "eventservice", queuename='BNL_EC2W2_MCORE')
f = objectstoreSiteMover()
gpfn = "nonsens_gpfn"
lfn = "AOD.310713._000004.pool.root.1"
path = os.getcwd()
fsize = "4261010441"
fchecksum = "9145af38"
dsname = "data11_7TeV.00177986.physics_Egamma.merge.AOD.r2276_p516_p523_tid310713_00"
report = {}
#print f.getGlobalFilePaths(dsname)
#print f.findGlobalFilePath(lfn, dsname)
#print f.getLocalROOTSetup()
#path = "root://atlas-objectstore.cern.ch//atlas/eventservice/2181626927" # + your .root filename"
"""
source = "/bin/hostname"
dest = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
localSize = 17848
localChecksum = "89b93830"
print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc')
gpfn = "root://eosatlas.cern.ch//eos/atlas/unpledged/group-wisc/users/wguan/NTUP_PHOTON.01255150._000001.root.1"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
tmpDir = "/tmp/"
localSize = 17848
localChecksum = "89b93830"
print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc')
"""
# test S3 object store
source = "/bin/hostname"
#dest = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1"
dest = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
localSize = None
localChecksum = None
print f.put_data(source, dest, fsize=localSize, fchecksum=localChecksum, prodSourceLabel='ptest', experiment='ATLAS', report =report, lfn=lfn, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='')
gpfn = "s3://ceph003.usatlas.bnl.gov:8443//wguan_bucket/dir1/dir2/NTUP_PHOTON.01255150._000001.root.1"
gpfn = "s3://s3-us-west-2.amazonaws.com:80//s3-atlasdatadisk-west2-racf/dir1/NTUP_PHOTON.01255150._000001.root.1"
lfn = "NTUP_PHOTON.01255150._000001.root.1"
tmpDir = "/tmp/"
localSize = None
localChecksum = None
print f.get_data(gpfn, lfn, tmpDir, fsize=localSize, fchecksum=localChecksum, experiment='ATLAS', report =report, guid='aa8ee1ae-54a5-468b-a0a0-41cf17477ffc', jobId=2730987843, jobsetID=2728044425,pandaProxySecretKey='deb05b9fb5034a45b80c03bd671359c9')
| 44.803419
| 256
| 0.702404
| 618
| 5,242
| 5.857605
| 0.333333
| 0.012431
| 0.039779
| 0.053039
| 0.530663
| 0.513536
| 0.487569
| 0.487569
| 0.427072
| 0.427072
| 0
| 0.093664
| 0.169019
| 5,242
| 116
| 257
| 45.189655
| 0.737374
| 0.15166
| 0
| 0.2
| 0
| 0.05
| 0.226713
| 0.155919
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.1
| null | null | 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ab4aa21cfd4871d1766355bdd0923074d0f5c05
| 32,515
|
py
|
Python
|
gpMgmt/bin/gpload_test/gpload2/TEST.py
|
Tylarb/gpdb
|
15e1341cfbac7f70d2086a9a1d46149a82765b5e
|
[
"PostgreSQL",
"Apache-2.0"
] | 1
|
2020-07-08T13:20:27.000Z
|
2020-07-08T13:20:27.000Z
|
gpMgmt/bin/gpload_test/gpload2/TEST.py
|
Tylarb/gpdb
|
15e1341cfbac7f70d2086a9a1d46149a82765b5e
|
[
"PostgreSQL",
"Apache-2.0"
] | 6
|
2020-06-24T18:56:06.000Z
|
2022-02-26T08:53:11.000Z
|
gpMgmt/bin/gpload_test/gpload2/TEST.py
|
Tylarb/gpdb
|
15e1341cfbac7f70d2086a9a1d46149a82765b5e
|
[
"PostgreSQL",
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import unittest
import sys
import os
import string
import time
import socket
import fileinput
import platform
import re
try:
import subprocess32 as subprocess
except:
import subprocess
import pg
def get_port_from_conf():
file = os.environ.get('MASTER_DATA_DIRECTORY')+'/postgresql.conf'
if os.path.isfile(file):
with open(file) as f:
for line in f.xreadlines():
match = re.search('port=\d+',line)
if match:
match1 = re.search('\d+', match.group())
if match1:
return match1.group()
def get_port():
port = os.environ['PGPORT']
if not port:
port = get_port_from_conf()
return port if port else 5432
def get_ip(hostname=None):
if hostname is None:
hostname = socket.gethostname()
else:
hostname = hostname
hostinfo = socket.getaddrinfo(hostname, None)
ipaddrlist = list(set([(ai[4][0]) for ai in hostinfo]))
for myip in ipaddrlist:
if myip.find(":") > 0:
ipv6 = myip
return ipv6
elif myip.find(".") > 0:
ipv4 = myip
return ipv4
def getPortMasterOnly(host = 'localhost',master_value = None,
user = os.environ.get('USER'),gphome = os.environ['GPHOME'],
mdd=os.environ['MASTER_DATA_DIRECTORY'],port = os.environ['PGPORT']):
master_pattern = "Context:\s*-1\s*Value:\s*\d+"
command = "gpconfig -s %s" % ( "port" )
cmd = "source %s/greenplum_path.sh; export MASTER_DATA_DIRECTORY=%s; export PGPORT=%s; %s" \
% (gphome, mdd, port, command)
(ok,out) = run(cmd)
if not ok:
raise Exception("Unable to connect to segment server %s as user %s" % (host, user))
for line in out:
out = line.split('\n')
for line in out:
if re.search(master_pattern, line):
master_value = int(line.split()[3].strip())
if master_value is None:
error_msg = "".join(out)
raise Exception(error_msg)
return str(master_value)
"""
Global Values
"""
MYD = os.path.abspath(os.path.dirname(__file__))
mkpath = lambda *x: os.path.join(MYD, *x)
UPD = os.path.abspath(mkpath('..'))
if UPD not in sys.path:
sys.path.append(UPD)
DBNAME = "postgres"
USER = os.environ.get( "LOGNAME" )
HOST = socket.gethostname()
GPHOME = os.getenv("GPHOME")
PGPORT = get_port()
PGUSER = os.environ.get("PGUSER")
if PGUSER is None:
PGUSER = USER
PGHOST = os.environ.get("PGHOST")
if PGHOST is None:
PGHOST = HOST
d = mkpath('config')
if not os.path.exists(d):
os.mkdir(d)
def write_config_file(mode='insert', reuse_flag='',columns_flag='0',mapping='0',portNum='8081',database='reuse_gptest',host='localhost',formatOpts='text',file='data/external_file_01.txt',table='texttable',format='text',delimiter="'|'",escape='',quote='',truncate='False',log_errors=None, error_limit='0',error_table=None,externalSchema=None,staging_table=None,fast_match='false', encoding=None, preload=True, fill=False):
f = open(mkpath('config/config_file'),'w')
f.write("VERSION: 1.0.0.1")
if database:
f.write("\nDATABASE: "+database)
f.write("\nUSER: "+os.environ.get('USER'))
f.write("\nHOST: "+hostNameAddrs)
f.write("\nPORT: "+masterPort)
f.write("\nGPLOAD:")
f.write("\n INPUT:")
f.write("\n - SOURCE:")
f.write("\n LOCAL_HOSTNAME:")
f.write("\n - "+hostNameAddrs)
if portNum:
f.write("\n PORT: "+portNum)
f.write("\n FILE:")
f.write("\n - "+mkpath(file))
if columns_flag=='1':
f.write("\n - COLUMNS:")
f.write("\n - s_s1: text")
f.write("\n - s_s2: text")
f.write("\n - s_dt: timestamp")
f.write("\n - s_s3: text")
f.write("\n - s_n1: smallint")
f.write("\n - s_n2: integer")
f.write("\n - s_n3: bigint")
f.write("\n - s_n4: decimal")
f.write("\n - s_n5: numeric")
f.write("\n - s_n6: real")
f.write("\n - s_n7: double precision")
f.write("\n - s_n8: text")
f.write("\n - s_n9: text")
if format:
f.write("\n - FORMAT: "+format)
if log_errors:
f.write("\n - LOG_ERRORS: true")
f.write("\n - ERROR_LIMIT: " + error_limit)
if error_table:
f.write("\n - ERROR_TABLE: " + error_table)
f.write("\n - ERROR_LIMIT: " + error_limit)
if delimiter:
f.write("\n - DELIMITER: "+delimiter)
if encoding:
f.write("\n - ENCODING: "+encoding)
if escape:
f.write("\n - ESCAPE: "+escape)
if quote:
f.write("\n - QUOTE: "+quote)
if fill:
f.write("\n - FILL_MISSING_FIELDS: true")
f.write("\n OUTPUT:")
f.write("\n - TABLE: "+table)
if mode:
if mode == 'insert':
f.write("\n - MODE: "+'insert')
if mode == 'update':
f.write("\n - MODE: "+'update')
if mode == 'merge':
f.write("\n - MODE: "+'merge')
f.write("\n - UPDATE_COLUMNS:")
f.write("\n - n2")
f.write("\n - MATCH_COLUMNS:")
f.write("\n - n1")
f.write("\n - s1")
f.write("\n - s2")
if mapping=='1':
f.write("\n - MAPPING:")
f.write("\n s1: s_s1")
f.write("\n s2: s_s2")
f.write("\n dt: s_dt")
f.write("\n s3: s_s3")
f.write("\n n1: s_n1")
f.write("\n n2: s_n2")
f.write("\n n3: s_n3")
f.write("\n n4: s_n4")
f.write("\n n5: s_n5")
f.write("\n n6: s_n6")
f.write("\n n7: s_n7")
f.write("\n n8: s_n8")
f.write("\n n9: s_n9")
if externalSchema:
f.write("\n EXTERNAL:")
f.write("\n - SCHEMA: "+externalSchema)
if preload:
f.write("\n PRELOAD:")
f.write("\n - REUSE_TABLES: "+reuse_flag)
f.write("\n - FAST_MATCH: "+fast_match)
if staging_table:
f.write("\n - STAGING_TABLE: "+staging_table)
f.write("\n")
f.close()
def runfile(ifile, flag='', dbname=None, outputPath="", outputFile="",
username=None,
PGOPTIONS=None, host = None, port = None):
if len(outputFile) == 0:
(ok, out) = psql_run(ifile = ifile,ofile = outFile(ifile, outputPath),flag = flag,
dbname=dbname , username=username,
PGOPTIONS=PGOPTIONS, host = host, port = port)
else:
(ok,out) = psql_run(ifile =ifile, ofile =outFile(outputFile, outputPath), flag =flag,
dbname= dbname, username= username,
PGOPTIONS= PGOPTIONS, host = host, port = port)
return (ok, out)
def psql_run(ifile = None, ofile = None, cmd = None,
flag = '-e',dbname = None,
username = None,
PGOPTIONS = None, host = None, port = None):
'''
Run a command or file against psql. Return True if OK.
@param dbname: database name
@param ifile: input file
@param cmd: command line
@param flag: -e Run SQL with no comments (default)
-a Run SQL with comments and psql notice
@param username: psql user
@param host : to connect to a different host
@param port : port where gpdb is running
@param PGOPTIONS: connects to postgres via utility mode
'''
if dbname is None:
dbname = DBNAME
if username is None:
username = PGUSER # Use the default login user
if PGOPTIONS is None:
PGOPTIONS = ""
else:
PGOPTIONS = "PGOPTIONS='%s'" % PGOPTIONS
if host is None:
host = "-h %s" % PGHOST
else:
host = "-h %s" % host
if port is None:
port = ""
else:
port = "-p %s" % port
if cmd:
arg = '-c "%s"' % cmd
elif ifile:
arg = ' < ' + ifile
if not (flag == '-q'): # Don't echo commands sent to server
arg = '-e < ' + ifile
if flag == '-a':
arg = '-f ' + ifile
else:
raise PSQLError('missing cmd and ifile')
if ofile == '-':
ofile = '2>&1'
elif not ofile:
ofile = '> /dev/null 2>&1'
else:
ofile = '> %s 2>&1' % ofile
return run('%s psql -d %s %s %s -U %s %s %s %s' %
(PGOPTIONS, dbname, host, port, username, flag, arg, ofile))
def run(cmd):
"""
Run a shell command. Return (True, [result]) if OK, or (False, []) otherwise.
@params cmd: The command to run at the shell.
oFile: an optional output file.
mode: What to do if the output file already exists: 'a' = append;
'w' = write. Defaults to append (so that the function is
backwards compatible). Yes, this is passed to the open()
function, so you can theoretically pass any value that is
valid for the second parameter of open().
"""
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
out = p.communicate()[0]
ret = []
ret.append(out)
rc = False if p.wait() else True
return (rc,ret)
def outFile(fname,outputPath = ''):
return changeExtFile(fname, ".out", outputPath)
def diffFile( fname, outputPath = "" ):
return changeExtFile( fname, ".diff", outputPath )
def changeExtFile( fname, ext = ".diff", outputPath = "" ):
if len( outputPath ) == 0:
return os.path.splitext( fname )[0] + ext
else:
filename = fname.split( "/" )
fname = os.path.splitext( filename[len( filename ) - 1] )[0]
return outputPath + "/" + fname + ext
def gpdbAnsFile(fname):
ext = '.ans'
return os.path.splitext(fname)[0] + ext
def isFileEqual( f1, f2, optionalFlags = "", outputPath = "", myinitfile = ""):
LMYD = os.path.abspath(os.path.dirname(__file__))
if not os.access( f1, os.R_OK ):
raise Exception( 'Error: cannot find file %s' % f1 )
if not os.access( f2, os.R_OK ):
raise Exception( 'Error: cannot find file %s' % f2 )
dfile = diffFile( f1, outputPath = outputPath )
# Gets the suitePath name to add init_file
suitePath = f1[0:f1.rindex( "/" )]
if os.path.exists(suitePath + "/init_file"):
(ok, out) = run('../gpdiff.pl -w ' + optionalFlags + \
' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s/init_file '
'%s %s > %s 2>&1' % (LMYD, suitePath, f1, f2, dfile))
else:
if os.path.exists(myinitfile):
(ok, out) = run('../gpdiff.pl -w ' + optionalFlags + \
' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file --gp_init_file=%s '
'%s %s > %s 2>&1' % (LMYD, myinitfile, f1, f2, dfile))
else:
(ok, out) = run( '../gpdiff.pl -w ' + optionalFlags + \
' -I NOTICE: -I HINT: -I CONTEXT: -I GP_IGNORE: --gp_init_file=%s/global_init_file '
'%s %s > %s 2>&1' % ( LMYD, f1, f2, dfile ) )
if ok:
os.unlink( dfile )
return ok
def read_diff(ifile, outputPath):
"""
Opens the diff file that is assocated with the given input file and returns
its contents as a string.
"""
dfile = diffFile(ifile, outputPath)
with open(dfile, 'r') as diff:
return diff.read()
def modify_sql_file(num):
file = mkpath('query%d.sql' % num)
user = os.environ.get('USER')
if not user:
user = os.environ.get('USER')
if os.path.isfile(file):
for line in fileinput.FileInput(file,inplace=1):
line = line.replace("gpload.py ","gpload ")
print str(re.sub('\n','',line))
def copy_data(source='',target=''):
cmd = 'cp '+ mkpath('data/' + source) + ' ' + mkpath(target)
p = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
return p.communicate()
hostNameAddrs = get_ip(HOST)
masterPort = getPortMasterOnly()
def get_table_name():
try:
db = pg.DB(dbname='reuse_gptest'
,host='localhost'
,port=int(PGPORT)
)
except Exception,e:
errorMessage = str(e)
print 'could not connect to database: ' + errorMessage
queryString = """SELECT relname
from pg_class
WHERE relname
like 'ext_gpload_reusable%'
OR relname
like 'staging_gpload_reusable%';"""
resultList = db.query(queryString.encode('utf-8')).getresult()
return resultList
def drop_tables():
try:
db = pg.DB(dbname='reuse_gptest'
,host='localhost'
,port=int(PGPORT)
)
except Exception,e:
errorMessage = str(e)
print 'could not connect to database: ' + errorMessage
list = get_table_name()
for i in list:
name = i[0]
match = re.search('ext_gpload',name)
if match:
queryString = "DROP EXTERNAL TABLE %s" % name
db.query(queryString.encode('utf-8'))
else:
queryString = "DROP TABLE %s" % name
db.query(queryString.encode('utf-8'))
class PSQLError(Exception):
'''
PSQLError is the base class for exceptions in this module
http://docs.python.org/tutorial/errors.html
We want to raise an error and not a failure. The reason for an error
might be program error, file not found, etc.
Failure is define as test case failures, when the output is different
from the expected result.
'''
pass
class GPLoad_FormatOpts_TestCase(unittest.TestCase):
def check_result(self,ifile, optionalFlags = "-U3", outputPath = ""):
"""
PURPOSE: compare the actual and expected output files and report an
error if they don't match.
PARAMETERS:
ifile: the name of the .sql file whose actual and expected outputs
we want to compare. You may include the path as well as the
filename. This function will process this file name to
figure out the proper names of the .out and .ans files.
optionalFlags: command-line options (if any) for diff.
For example, pass " -B " (with the blank spaces) to ignore
blank lines. By default, diffs are unified with 3 lines of
context (i.e. optionalFlags is "-U3").
"""
f1 = gpdbAnsFile(ifile)
f2 = outFile(ifile, outputPath=outputPath)
result = isFileEqual(f1, f2, optionalFlags, outputPath=outputPath)
diff = None if result else read_diff(ifile, outputPath)
self.assertTrue(result, "query resulted in diff:\n{}".format(diff))
return True
def doTest(self, num):
file = mkpath('query%d.diff' % num)
if os.path.isfile(file):
run("rm -f" + " " + file)
modify_sql_file(num)
file = mkpath('query%d.sql' % num)
runfile(file)
self.check_result(file)
def test_00_gpload_formatOpts_setup(self):
"0 gpload setup"
for num in range(1,40):
f = open(mkpath('query%d.sql' % num),'w')
f.write("\! gpload -f "+mkpath('config/config_file')+ " -d reuse_gptest\n"+"\! gpload -f "+mkpath('config/config_file')+ " -d reuse_gptest\n")
f.close()
file = mkpath('setup.sql')
runfile(file)
self.check_result(file)
def test_01_gpload_formatOpts_delimiter(self):
"1 gpload formatOpts delimiter '|' with reuse "
copy_data('external_file_01.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="'|'")
self.doTest(1)
def test_02_gpload_formatOpts_delimiter(self):
"2 gpload formatOpts delimiter '\t' with reuse"
copy_data('external_file_02.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="'\t'")
self.doTest(2)
def test_03_gpload_formatOpts_delimiter(self):
"3 gpload formatOpts delimiter E'\t' with reuse"
copy_data('external_file_02.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="E'\\t'")
self.doTest(3)
def test_04_gpload_formatOpts_delimiter(self):
"4 gpload formatOpts delimiter E'\u0009' with reuse"
copy_data('external_file_02.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="E'\u0009'")
self.doTest(4)
def test_05_gpload_formatOpts_delimiter(self):
"5 gpload formatOpts delimiter E'\\'' with reuse"
copy_data('external_file_03.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="E'\''")
self.doTest(5)
def test_06_gpload_formatOpts_delimiter(self):
"6 gpload formatOpts delimiter \"'\" with reuse"
copy_data('external_file_03.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',delimiter="\"'\"")
self.doTest(6)
def test_07_gpload_reuse_table_insert_mode_without_reuse(self):
"7 gpload insert mode without reuse"
runfile(mkpath('setup.sql'))
f = open(mkpath('query7.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from texttable;'")
f.close()
write_config_file(mode='insert',reuse_flag='false')
self.doTest(7)
def test_08_gpload_reuse_table_update_mode_with_reuse(self):
"8 gpload update mode with reuse"
drop_tables()
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='true',file='data_file.txt')
self.doTest(8)
def test_09_gpload_reuse_table_update_mode_without_reuse(self):
"9 gpload update mode without reuse"
f = open(mkpath('query9.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from texttable;'\n"+"\! psql -d reuse_gptest -c 'select * from texttable where n2=222;'")
f.close()
copy_data('external_file_05.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='false',file='data_file.txt')
self.doTest(9)
def test_10_gpload_reuse_table_merge_mode_with_reuse(self):
"10 gpload merge mode with reuse "
drop_tables()
copy_data('external_file_06.txt','data_file.txt')
write_config_file('merge','true',file='data_file.txt')
self.doTest(10)
def test_11_gpload_reuse_table_merge_mode_without_reuse(self):
"11 gpload merge mode without reuse "
copy_data('external_file_07.txt','data_file.txt')
write_config_file('merge','false',file='data_file.txt')
self.doTest(11)
def test_12_gpload_reuse_table_merge_mode_with_different_columns_number_in_file(self):
"12 gpload merge mode with reuse (RERUN with different columns number in file) "
psql_run(cmd="ALTER TABLE texttable ADD column n8 text",dbname='reuse_gptest')
copy_data('external_file_08.txt','data_file.txt')
write_config_file('merge','true',file='data_file.txt')
self.doTest(12)
def test_13_gpload_reuse_table_merge_mode_with_different_columns_number_in_DB(self):
"13 gpload merge mode with reuse (RERUN with different columns number in DB table) "
preTest = mkpath('pre_test_13.sql')
psql_run(preTest, dbname='reuse_gptest')
copy_data('external_file_09.txt','data_file.txt')
write_config_file('merge','true',file='data_file.txt')
self.doTest(13)
def test_14_gpload_reuse_table_update_mode_with_reuse_RERUN(self):
"14 gpload update mode with reuse (RERUN) "
write_config_file('update','true',file='data_file.txt')
self.doTest(14)
def test_15_gpload_reuse_table_merge_mode_with_different_columns_order(self):
"15 gpload merge mode with different columns' order "
copy_data('external_file_10.txt','data/data_file.tbl')
write_config_file('merge','true',file='data/data_file.tbl',columns_flag='1',mapping='1')
self.doTest(15)
def test_16_gpload_formatOpts_quote(self):
"16 gpload formatOpts quote unspecified in CSV with reuse "
copy_data('external_file_11.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','")
self.doTest(16)
def test_17_gpload_formatOpts_quote(self):
"17 gpload formatOpts quote '\\x26'(&) with reuse"
copy_data('external_file_12.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",quote="'\x26'")
self.doTest(17)
def test_18_gpload_formatOpts_quote(self):
"18 gpload formatOpts quote E'\\x26'(&) with reuse"
copy_data('external_file_12.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",quote="E'\x26'")
self.doTest(18)
def test_19_gpload_formatOpts_escape(self):
"19 gpload formatOpts escape '\\' with reuse"
copy_data('external_file_01.txt','data_file.txt')
file = mkpath('setup.sql')
runfile(file)
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape='\\')
self.doTest(19)
def test_20_gpload_formatOpts_escape(self):
"20 gpload formatOpts escape '\\' with reuse"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape= '\x5C')
self.doTest(20)
def test_21_gpload_formatOpts_escape(self):
"21 gpload formatOpts escape E'\\\\' with reuse"
copy_data('external_file_01.txt','data_file.txt')
write_config_file(reuse_flag='true',formatOpts='text',file='data_file.txt',table='texttable',escape="E'\\\\'")
self.doTest(21)
# case 22 is flaky on concourse. It may report: Fatal Python error: GC object already tracked during testing.
# This is seldom issue. we can't reproduce it locally, so we disable it, in order to not blocking others
#def test_22_gpload_error_count(self):
# "22 gpload error count"
# f = open(mkpath('query22.sql'),'a')
# f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
# f.close()
# f = open(mkpath('data/large_file.csv'),'w')
# for i in range(0, 10000):
# if i % 2 == 0:
# f.write('1997,Ford,E350,"ac, abs, moon",3000.00,a\n')
# else:
# f.write('1997,Ford,E350,"ac, abs, moon",3000.00\n')
# f.close()
# copy_data('large_file.csv','data_file.csv')
# write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='90000000')
# self.doTest(22)
def test_23_gpload_error_count(self):
"23 gpload error_table"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query23.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
f.close()
f = open(mkpath('data/large_file.csv'),'w')
for i in range(0, 10000):
if i % 2 == 0:
f.write('1997,Ford,E350,"ac, abs, moon",3000.00,a\n')
else:
f.write('1997,Ford,E350,"ac, abs, moon",3000.00\n')
f.close()
copy_data('large_file.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",error_table="err_table",error_limit='90000000')
self.doTest(23)
def test_24_gpload_error_count(self):
"24 gpload error count with ext schema"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query24.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
f.close()
f = open(mkpath('data/large_file.csv'),'w')
for i in range(0, 10000):
if i % 2 == 0:
f.write('1997,Ford,E350,"ac, abs, moon",3000.00,a\n')
else:
f.write('1997,Ford,E350,"ac, abs, moon",3000.00\n')
f.close()
copy_data('large_file.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='90000000',externalSchema='test')
self.doTest(24)
def test_25_gpload_ext_staging_table(self):
"25 gpload reuse ext_staging_table if it is configured"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query25.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
f.close()
copy_data('external_file_13.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='staging_table')
self.doTest(25)
def test_26_gpload_ext_staging_table_with_externalschema(self):
"26 gpload reuse ext_staging_table if it is configured with externalschema"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query26.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from csvtable;'")
f.close()
copy_data('external_file_13.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema='test')
self.doTest(26)
def test_27_gpload_ext_staging_table_with_externalschema(self):
"27 gpload reuse ext_staging_table if it is configured with externalschema"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query27.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from test.csvtable;'")
f.close()
copy_data('external_file_13.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='staging_table',externalSchema="'%'")
self.doTest(27)
def test_28_gpload_ext_staging_table_with_dot(self):
"28 gpload reuse ext_staging_table if it is configured with dot"
file = mkpath('setup.sql')
runfile(file)
f = open(mkpath('query28.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from test.csvtable;'")
f.close()
copy_data('external_file_13.csv','data_file.csv')
write_config_file(reuse_flag='true',formatOpts='csv',file='data_file.csv',table='test.csvtable',format='csv',delimiter="','",log_errors=True,error_limit='10',staging_table='t.staging_table')
self.doTest(28)
def test_29_gpload_reuse_table_insert_mode_with_reuse_and_null(self):
"29 gpload insert mode with reuse and null"
runfile(mkpath('setup.sql'))
f = open(mkpath('query29.sql'),'a')
f.write("\! psql -d reuse_gptest -c 'select count(*) from texttable where n2 is null;'")
f.close()
copy_data('external_file_14.txt','data_file.txt')
write_config_file(mode='insert',reuse_flag='true',file='data_file.txt',log_errors=True, error_limit='100')
self.doTest(29)
def test_30_gpload_reuse_table_update_mode_with_fast_match(self):
"30 gpload update mode with fast match"
drop_tables()
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt')
self.doTest(30)
def test_31_gpload_reuse_table_update_mode_with_fast_match_and_different_columns_number(self):
"31 gpload update mode with fast match and differenct columns number) "
psql_run(cmd="ALTER TABLE texttable ADD column n8 text",dbname='reuse_gptest')
copy_data('external_file_08.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='true',fast_match='true',file='data_file.txt')
self.doTest(31)
def test_32_gpload_update_mode_without_reuse_table_with_fast_match(self):
"32 gpload update mode when reuse table is false and fast match is true"
drop_tables()
copy_data('external_file_08.txt','data_file.txt')
write_config_file(mode='update',reuse_flag='false',fast_match='true',file='data_file.txt')
self.doTest(32)
def test_33_gpload_reuse_table_merge_mode_with_fast_match_and_external_schema(self):
"33 gpload update mode with fast match and external schema"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',externalSchema='test')
self.doTest(33)
def test_34_gpload_reuse_table_merge_mode_with_fast_match_and_encoding(self):
"34 gpload merge mode with fast match and encoding GBK"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt',encoding='GBK')
self.doTest(34)
def test_35_gpload_reuse_table_merge_mode_with_fast_match_default_encoding(self):
"35 gpload does not reuse table when encoding is setted from GBK to empty"
write_config_file(mode='merge',reuse_flag='true',fast_match='true',file='data_file.txt')
self.doTest(35)
def test_36_gpload_reuse_table_merge_mode_default_encoding(self):
"36 gpload merge mode with encoding GBK"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='GBK')
self.doTest(36)
def test_37_gpload_reuse_table_merge_mode_invalid_encoding(self):
"37 gpload merge mode with invalid encoding"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='merge',reuse_flag='true',fast_match='false',file='data_file.txt',encoding='xxxx')
self.doTest(37)
def test_38_gpload_without_preload(self):
"38 gpload insert mode without preload"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='insert',reuse_flag='true',fast_match='false',file='data_file.txt',error_table="err_table",error_limit='1000',preload=False)
self.doTest(38)
def test_39_gpload_fill_missing_fields(self):
"39 gpload fill missing fields"
file = mkpath('setup.sql')
runfile(file)
copy_data('external_file_04.txt','data_file.txt')
write_config_file(mode='insert',reuse_flag='false',fast_match='false',file='data_file.txt',table='texttable1', error_limit='1000', fill=True)
self.doTest(39)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(GPLoad_FormatOpts_TestCase)
runner = unittest.TextTestRunner(verbosity=2)
ret = not runner.run(suite).wasSuccessful()
sys.exit(ret)
| 42.172503
| 421
| 0.611041
| 4,386
| 32,515
| 4.347697
| 0.120383
| 0.02706
| 0.023127
| 0.034611
| 0.524411
| 0.480518
| 0.454612
| 0.416487
| 0.386019
| 0.359589
| 0
| 0.024738
| 0.249085
| 32,515
| 770
| 422
| 42.227273
| 0.756266
| 0.030909
| 0
| 0.257282
| 0
| 0.006472
| 0.291934
| 0.010893
| 0
| 0
| 0
| 0
| 0.001618
| 0
| null | null | 0.001618
| 0.019417
| null | null | 0.004854
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ab8196f812a9bd1c5cff6d84c43cd3a82467a55
| 618
|
py
|
Python
|
VMI/VMItest.py
|
thomasbarillot/DAQ
|
20126655f74194757d25380680af9429ff27784e
|
[
"MIT"
] | 1
|
2017-04-25T10:56:01.000Z
|
2017-04-25T10:56:01.000Z
|
VMI/VMItest.py
|
thomasbarillot/DAQ
|
20126655f74194757d25380680af9429ff27784e
|
[
"MIT"
] | null | null | null |
VMI/VMItest.py
|
thomasbarillot/DAQ
|
20126655f74194757d25380680af9429ff27784e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sat May 7 11:38:18 2016
@author: thomasbarillot
VMI control
"""
from ctypes import cdll
#slib="VMIcrtl_ext.dll"
#hlib=cdll('VMIcrtl.dll')
import VMIcrtl_ext
test=VMIcrtl_ext.VMIcrtl()
#%%
print test.GetFilename()
#%%
test.setFilename('20161115_1841.dat')
print test.GetFilename()
#%%
test.StartAcquisitionPrev()
#%%
test.StopAcquisition()
#%%
img=test.RecallImagePrev()
#%%
import numpy as np
print np.shape(img)
a=np.array(img)
print a
#%%
from matplotlib import pyplot as plt
#%%
b=np.reshape(a,[400,400])
print b
plt.figure()
plt.pcolor(np.reshape(a,[400,400]))
| 12.875
| 37
| 0.699029
| 90
| 618
| 4.755556
| 0.555556
| 0.070093
| 0.093458
| 0.11215
| 0.074766
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066543
| 0.124595
| 618
| 48
| 38
| 12.875
| 0.724584
| 0.134304
| 0
| 0.111111
| 0
| 0
| 0.038375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.222222
| null | null | 0.277778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0abbc3e1d5afde9470d734d62bcb0511ac93cadd
| 5,390
|
py
|
Python
|
samples/samplenetconf/demos/vr_demo3.py
|
gaberger/pysdn
|
67442e1c259d8ca8620ada95b95977e3852463c5
|
[
"BSD-3-Clause"
] | 1
|
2017-08-22T14:17:10.000Z
|
2017-08-22T14:17:10.000Z
|
samples/samplenetconf/demos/vr_demo3.py
|
gaberger/pysdn
|
67442e1c259d8ca8620ada95b95977e3852463c5
|
[
"BSD-3-Clause"
] | 1
|
2021-03-26T00:47:22.000Z
|
2021-03-26T00:47:22.000Z
|
samples/samplenetconf/demos/vr_demo3.py
|
gaberger/pysdn
|
67442e1c259d8ca8620ada95b95977e3852463c5
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2015, BROCADE COMMUNICATIONS SYSTEMS, INC
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
import json
from pysdn.controller.controller import Controller
from pysdn.netconfdev.vrouter.vrouter5600 import VRouter5600
from pysdn.common.status import STATUS
from pysdn.common.utils import load_dict_from_file
def vr_demo_3():
f = "cfg4.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit()
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
nodeIpAddr = d['nodeIpAddr']
nodePortNum = d['nodePortNum']
nodeUname = d['nodeUname']
nodePswd = d['nodePswd']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("\n")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
vrouter = VRouter5600(ctrl, nodeName, nodeIpAddr, nodePortNum,
nodeUname, nodePswd)
print ("<<< 'Controller': %s, '%s': %s"
% (ctrlIpAddr, nodeName, nodeIpAddr))
print ("\n")
time.sleep(rundelay)
node_configured = False
result = ctrl.check_node_config_status(nodeName)
status = result.get_status()
if(status.eq(STATUS.NODE_CONFIGURED)):
node_configured = True
print ("<<< '%s' is configured on the Controller" % nodeName)
elif(status.eq(STATUS.DATA_NOT_FOUND)):
node_configured = False
else:
print ("\n")
print "Failed to get configuration status for the '%s'" % nodeName
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
if node_configured is False:
result = ctrl.add_netconf_node(vrouter)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< '%s' added to the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
exit(0)
print ("\n")
time.sleep(rundelay)
result = ctrl.check_node_conn_status(nodeName)
status = result.get_status()
if(status.eq(STATUS.NODE_CONNECTED)):
print ("<<< '%s' is connected to the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print("\n")
print ("<<< Show configuration of the '%s'" % nodeName)
time.sleep(rundelay)
result = vrouter.get_cfg()
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("'%s' configuration:" % nodeName)
cfg = result.get_data()
data = json.loads(cfg)
print json.dumps(data, indent=4)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief().lower())
exit(0)
print "\n"
print (">>> Remove '%s' NETCONF node from the Controller" % nodeName)
time.sleep(rundelay)
result = ctrl.delete_netconf_node(vrouter)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("'%s' NETCONF node was successfully removed "
"from the Controller" % nodeName)
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.brief())
exit(0)
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo End")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
if __name__ == "__main__":
vr_demo_3()
| 34.113924
| 78
| 0.62115
| 637
| 5,390
| 5.188383
| 0.343799
| 0.01997
| 0.026626
| 0.03177
| 0.288351
| 0.232073
| 0.232073
| 0.232073
| 0.232073
| 0.214523
| 0
| 0.007679
| 0.226902
| 5,390
| 157
| 79
| 34.33121
| 0.785457
| 0.281633
| 0
| 0.425743
| 0
| 0
| 0.254305
| 0.061457
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.059406
| null | null | 0.336634
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0abf250849dcb075b82b1ca50e27cc3adefcc742
| 3,993
|
py
|
Python
|
src/mgls_bootstrapping.py
|
rosich/mgls
|
64c924f59adba2dddf44bb70a84868173f0b7120
|
[
"MIT"
] | null | null | null |
src/mgls_bootstrapping.py
|
rosich/mgls
|
64c924f59adba2dddf44bb70a84868173f0b7120
|
[
"MIT"
] | null | null | null |
src/mgls_bootstrapping.py
|
rosich/mgls
|
64c924f59adba2dddf44bb70a84868173f0b7120
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from math import sin, cos, tan, atan, pi, acos, sqrt, exp, log10
import sys, os
import copy
import random
import numpy as np
import multiprocessing as mp
import ConfigParser
sys.path.append('./bin')
import mGLS, mMGLS
sys.path.append('./src')
from EnvGlobals import Globals
import mgls_io
import mgls_mc
from mgls_lib import *
#definitions and constants
to_radians = pi/180.0
to_deg = 1.0/to_radians
#-------------------------
def _gls_instance_Ndim_bootstrapping(n_runs):
"""executes n_runs instances of MGLS for with previous data shuffle
"""
cpu_periodogram = list()
for iter in range(n_runs):
"""
#shuffle RV's and their errors. Repetition is not allowed
comb_rv_err = zip(Globals.rv, Globals.rv_err)
random.shuffle(comb_rv_err)
Globals.rv[:], Globals.rv_err[:] = zip(*comb_rv_err)
"""
#allowing repetition
rv = [0.0]*len(Globals.time)
rv_err = [0.0]*len(Globals.time)
for i in range(len(Globals.time)):
index = int(random.uniform(0,len(Globals.time)))
rv[i] = Globals.rv[index]
rv_err[i] = Globals.rv_err[index]
Globals.rv = rv
Globals.rv_err = rv_err
opt_state = mgls_mc.optimal(Globals.ndim, msgs = False, temp_steps=20, n_iter=1000)
pwr_opt, fitting_coeffs, A = mgls(opt_state)
cpu_periodogram.append(pwr_opt) #save the best period determination (highest power)
return cpu_periodogram
def fap(bootstrapping_stats, pwr):
"""returns FAP for a given pwr. i.e. how many realizations overcome
a given power, over unit.
"""
return float(sum(i > pwr for i in bootstrapping_stats))/len(bootstrapping_stats)
def fap_levels(bootstrapping_stats):
"""determines which power a FAP of 1, 0.1, 0.01 % is reached
"""
FAPs = [1.0, 0.1, 0.01, 0.001] #FAPS to compute in %
n_bs = len(bootstrapping_stats)
#sort bootstrapping_stats vector ascendently
sorted_pwr = sorted(bootstrapping_stats)
return [np.percentile(sorted_pwr,100-FAPs[i]) for i in range(len(FAPs))]
def parallel_Mdim_bootstrapping(n_bootstrapping):
"""
"""
n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)]
pool = mp.Pool(Globals.ncpus) #ncpus available
#run parallell execution
try:
out = pool.map_async(_gls_instance_Ndim_bootstrapping, n_runs).get(1./.0001)
pool.terminate()
except KeyboardInterrupt:
pool.terminate()
sys.exit()
"""
except ZeroDivisionError:
print "Error: Zero division error. Restarted parallel bootstapping"
"""
#join the output bunches
out_spectra = list()
for cpu in range(len(n_runs)):
out_spectra.extend(out[cpu])
bootstrapping_stats = list()
for j in range(len(out_spectra)):
bootstrapping_stats.append(out_spectra[j])
return bootstrapping_stats
def parallel_bootstrapping(n_bootstrapping):
"""
"""
n_runs = [n_bootstrapping/Globals.ncpus for i in range(Globals.ncpus)]
pool = mp.Pool(Globals.ncpus) #ncpus available
#run parallell execution
try:
out = pool.map_async(_gls_instance_bootstrapping, n_runs).get(1./.00001)
pool.terminate()
except KeyboardInterrupt:
pool.terminate()
sys.exit()
#join the output bunches
out_spectra = list()
for cpu in range(len(n_runs)):
out_spectra.extend(out[cpu])
bootstrapping_stats = list()
for j in range(len(out_spectra)):
bootstrapping_stats.append(out_spectra[j])
return bootstrapping_stats
def Mdim_bootstrapping(max_pow):
"""
"""
#n_bootstrapping = 500 #iterations
bootstrapping_stats = parallel_Mdim_bootstrapping(Globals.n_bootstrapping)
print "\n//BOOTSTRAPPING:// {1.0, 0.1, 0.01, 0.001}%"
print "FAP Levels:", fap_levels(bootstrapping_stats)
print "Total bootstapping samples: ", len(bootstrapping_stats)
return bootstrapping_stats
| 31.690476
| 91
| 0.672176
| 543
| 3,993
| 4.767956
| 0.313076
| 0.118192
| 0.023175
| 0.016995
| 0.4017
| 0.344921
| 0.323677
| 0.323677
| 0.27192
| 0.27192
| 0
| 0.020395
| 0.214125
| 3,993
| 125
| 92
| 31.944
| 0.804653
| 0.088405
| 0
| 0.381579
| 0
| 0.013158
| 0.031302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.157895
| null | null | 0.039474
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0acb3e8369864a2998734321cae251dc26fd05fa
| 2,884
|
py
|
Python
|
extractFeatures.py
|
PatrickJReed/Longboard
|
f6ca4a6e51c91296894aee2e02b86f83b38c080a
|
[
"MIT"
] | 1
|
2020-04-27T19:55:29.000Z
|
2020-04-27T19:55:29.000Z
|
extractFeatures.py
|
PatrickJReed/Longboard2
|
f6ca4a6e51c91296894aee2e02b86f83b38c080a
|
[
"MIT"
] | 1
|
2020-02-26T18:06:09.000Z
|
2020-02-26T18:06:09.000Z
|
extractFeatures.py
|
PatrickJReed/Longboard
|
f6ca4a6e51c91296894aee2e02b86f83b38c080a
|
[
"MIT"
] | null | null | null |
#!/home/ubuntu/miniconda2/bin/python
from __future__ import division
import sys
import glob, os, gc
import uuid
import os.path
import csv
import numpy as np
from time import time
from subprocess import (call, Popen, PIPE)
from itertools import product
import shutil
import re
import pickle
from boto3.session import Session
import boto3
import h5py
import umap
import hdbscan
from keras.models import load_model
from keras.models import Model
from keras import backend as K
from keras.utils import multi_gpu_model
##Path to Data
basepath = "/home/ubuntu/"
subject = sys.argv[1]
with open("config.txt") as f:
config = [line.rstrip() for line in f]
print config[0]
print config[1]
session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])
s3 = session.resource('s3')
s3 = boto3.client ('s3')
s3.download_file('for-ndar',os.path.join("metadata/", subject + ".txt"),os.path.join(basepath,subject + ".txt"))
with open(subject + ".txt") as f:
Cells = [line.rstrip() for line in f]
session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])
s3 = session.resource('s3')
s3.meta.client.download_file('bsmn-data',os.path.join('Inception_Transfer_Model.h5'),os.path.join(basepath,'Inception_Transfer_Model.h5'))
feat_extractor = load_model(os.path.join(basepath,'Inception_Transfer_Model.h5'))
parallel_model = multi_gpu_model(feat_extractor, gpus=2)
count = 0
for cell in Cells:
print(cell)
cell_size=0
cell_ids = []
s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_IDs.h5'),os.path.join(basepath,cell+'_IDs.h5'))
f = h5py.File(os.path.join(basepath,cell+'_IDs.h5'), 'r')
cell_ids = f['ID']
for cid in cell_ids:
cid = cid.decode('utf-8')
s3.meta.client.download_file('bsmn-data',os.path.join(subject, cell+'_'+cid+'.h5'),os.path.join(basepath,cell+'_'+cid+'.h5'))
xyz = h5py.File(os.path.join(basepath,cell+'_'+cid+'.h5'), 'r')
os.remove(os.path.join(basepath,cell+'_'+cid+'.h5'))
if count == 0:
X = xyz['X']
Y = xyz['Y']
Z = parallel_model.predict(X, batch_size = 128)
count+=1
length = len(Y)
U = [cid] * length
else:
X = xyz['X']
Y = np.append(Y,xyz['Y'], axis=0)
z = feat_extractor.predict(X, batch_size = 128)
Z = np.append(Z,z, axis=0)
length = len(xyz['Y'])
U = U + ([cid] * length)
print(Z.shape)
hf = h5py.File(subject+'_ef.h5', 'w')
hf.create_dataset('Y', data=Y)
hf.create_dataset('Z', data=Z)
hf.close()
session = Session(aws_access_key_id=config[0],aws_secret_access_key=config[1])
s3 = session.resource('s3')
s3.meta.client.upload_file(os.path.join(subject+'_ef.h5'),'bsmn-data',os.path.join(subject, subject+'_ef.h5'))
call(['sudo', 'shutdown', '-h', 'now'])
| 31.692308
| 138
| 0.662968
| 452
| 2,884
| 4.09292
| 0.265487
| 0.048649
| 0.075676
| 0.077838
| 0.402703
| 0.38
| 0.342703
| 0.261081
| 0.215676
| 0.215676
| 0
| 0.02397
| 0.175451
| 2,884
| 91
| 139
| 31.692308
| 0.753995
| 0.016297
| 0
| 0.106667
| 0
| 0
| 0.093827
| 0.028571
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.293333
| null | null | 0.053333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0ad16ca68b13c3255bfd62c00d84e6b8aa940002
| 3,021
|
py
|
Python
|
finex_history.py
|
yihming/gdax-data
|
7e562f314e9ef12eb6be2df3b97190af632c4530
|
[
"MIT"
] | null | null | null |
finex_history.py
|
yihming/gdax-data
|
7e562f314e9ef12eb6be2df3b97190af632c4530
|
[
"MIT"
] | null | null | null |
finex_history.py
|
yihming/gdax-data
|
7e562f314e9ef12eb6be2df3b97190af632c4530
|
[
"MIT"
] | null | null | null |
import datetime
import calendar
import requests
import pandas as pd
import json
import os.path
import time
import MySQLdb as M
from gdax_history import timestamp_to_utcstr
def connect_to_db():
config = json.load(open('dbconn.json'))["mysql"]
db = M.connect(host = config["host"],
user = config["user"],
passwd = config["password"],
db = config["database"])
return db
def write_to_db(df, db):
print "Write %d entries to database." % df.shape[0]
cur = db.cursor()
try:
for row in df.itertuples():
ts = row.Time / 1000
cur.execute(
"""INSERT INTO finex_history (timestamp, open, close, high, low, volume, utc_datetime)
VALUES (%s, %s, %s, %s, %s, %s, %s)""",
[ts, row.Open, row.Close, row.High, row.Low, row.Volume, timestamp_to_utcstr(ts)])
db.commit()
print "Write successfully!\n"
except (M.Error, M.Warning) as e:
print e
db.rollback()
def collect_data(start, end):
starttime = datetime.datetime.strptime(start, '%m/%d/%Y')
endtime = datetime.datetime.strptime(end, '%m/%d/%Y')
start_unixtime = calendar.timegm(starttime.utctimetuple())
end_unixtime = calendar.timegm(endtime.utctimetuple())
track_time = time.time() #because bitstamp only allows 10 requests per minute. Take rest if we are faster than that
count = 0
df = pd.DataFrame(data = [], columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume'])
while (start_unixtime < end_unixtime):
cur_end_unixtime = start_unixtime + 60 * 999 #60*60*24*30 #30 days at a time
if (cur_end_unixtime > end_unixtime):
cur_end_unixtime = end_unixtime #if the time is in future.
url = 'https://api.bitfinex.com/v2/candles/trade:1m:tBTCUSD/hist?start={}&end={}&limit=1000'.format(str(start_unixtime) + "000", str(cur_end_unixtime) + "000") #1 hour can be changed to any timeframe
response = requests.get(url)
data = response.json()
df_tmp = pd.DataFrame(data)
df_tmp.columns = ['Time', 'Open', 'Close', 'High', 'Low', 'Volume']
#df.set_index('Time')
df = pd.concat([df, df_tmp])
start_unixtime = cur_end_unixtime + 60 #to prevent duplicates
count = count + 1
if (count == 10): #if 10 requests are made
count = 0 #reset it
diff = time.time() - track_time
if (diff <= 60):
print('Sleeping for {} seconds'.format(str(60 - diff)))
time.sleep(60 - diff) #sleep
track_time = time.time()
#bitstamp limits to 10 requests per minute
df = df.sort_values(by = ['Time'])
return df
def main():
db = connect_to_db()
df = collect_data(start = '09/24/2018', end = '09/26/2018')
write_to_db(df, db)
db.close()
if __name__ == "__main__":
main()
| 30.21
| 207
| 0.581595
| 398
| 3,021
| 4.28392
| 0.386935
| 0.058065
| 0.008798
| 0.009384
| 0.119648
| 0.081525
| 0.03871
| 0
| 0
| 0
| 0
| 0.031496
| 0.285336
| 3,021
| 99
| 208
| 30.515152
| 0.758221
| 0.098974
| 0
| 0.060606
| 0
| 0.015152
| 0.117899
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.015152
| 0.136364
| null | null | 0.060606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0af766c917854c90cf7eae087d9105162f3eb248
| 8,667
|
py
|
Python
|
py/testdir_multi_jvm/test_many_fp_formats_libsvm_2.py
|
vkuznet/h2o
|
e08f7014f228cbaecfb21f57379970e6a3ac0756
|
[
"Apache-2.0"
] | null | null | null |
py/testdir_multi_jvm/test_many_fp_formats_libsvm_2.py
|
vkuznet/h2o
|
e08f7014f228cbaecfb21f57379970e6a3ac0756
|
[
"Apache-2.0"
] | null | null | null |
py/testdir_multi_jvm/test_many_fp_formats_libsvm_2.py
|
vkuznet/h2o
|
e08f7014f228cbaecfb21f57379970e6a3ac0756
|
[
"Apache-2.0"
] | null | null | null |
import unittest, random, sys, time
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_exec as h2e, h2o_glm
import h2o_util
zeroList = [
'Result0 = 0',
]
# the first column should use this
exprList = [
'Result<n> = sum(<keyX>[<col1>])',
]
DO_SUMMARY = False
DO_COMPARE_SUM = False
def write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE, sel, distribution):
# we can do all sorts of methods off the r object
r = random.Random(SEEDPERFILE)
def addRandValToRowStuff(colNumber, valMin, valMax, rowData, synColSumDict):
# colNumber should not be 0, because the output will be there
## val = r.uniform(MIN,MAX)
val = r.triangular(valMin,valMax,0)
valFormatted = h2o_util.fp_format(val, sel)
# force it to be zero in this range. so we don't print zeroes for svm!
if (val > valMin/2) and (val < valMax/2):
return None
else:
rowData.append(str(colNumber) + ":" + valFormatted) # f should always return string
if colNumber in synColSumDict:
synColSumDict[colNumber] += val # sum of column (dict)
else:
synColSumDict[colNumber] = val # sum of column (dict)
return val
valMin = -1e2
valMax = 1e2
classMin = -36
classMax = 36
dsf = open(csvPathname, "w+")
synColSumDict = {0: 0} # guaranteed to have col 0 for output
# even though we try to get a max colCount with random, we might fall short
# track what max we really got
colNumberMax = 0
for i in range(rowCount):
rowData = []
d = random.randint(0,2)
if d==0:
if distribution == 'sparse':
# only one value per row!
# is it okay to specify col 0 in svm? where does the output data go? (col 0)
colNumber = random.randint(1, colCount)
val = addRandValToRowStuff(colNumber, valMin, valMax, rowData, synColSumDict)
# did we add a val?
if val and (colNumber > colNumberMax):
colNumberMax = colNumber
else:
# some number of values per row.. 50% or so?
for colNumber in range(1, colCount+1):
val = addRandValToRowStuff(colNumber, valMin, valMax, rowData, synColSumDict)
if val and (colNumber > colNumberMax):
colNumberMax = colNumber
# always need an output class, even if no cols are non-zero
# space is the only valid separator
# add the output (col 0)
# random integer for class
val = random.randint(classMin,classMax)
rowData.insert(0, val)
synColSumDict[0] += val # sum of column (dict)
rowDataCsv = " ".join(map(str,rowData))
# FIX! vary the eol ?
# randomly skip some rows. only write 1/3
dsf.write(rowDataCsv + "\n")
dsf.close()
return (colNumberMax, synColSumDict)
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(2,java_heap_GB=5)
else:
h2o_hosts.build_cloud_with_hosts()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_many_fp_formats_libsvm_2(self):
# h2b.browseTheCloud()
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100, 10000, 'cA', 300, 'sparse50'),
(100, 10000, 'cB', 300, 'sparse'),
# (100, 40000, 'cC', 300, 'sparse50'),
# (100, 40000, 'cD', 300, 'sparse'),
]
# h2b.browseTheCloud()
for (rowCount, colCount, hex_key, timeoutSecs, distribution) in tryList:
NUM_CASES = h2o_util.fp_format()
for sel in [random.randint(0,NUM_CASES-1)]: # len(caseList)
SEEDPERFILE = random.randint(0, sys.maxint)
csvFilename = "syn_%s_%s_%s_%s.csv" % (SEEDPERFILE, sel, rowCount, colCount)
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
# dict of col sums for comparison to exec col sums below
(colNumberMax, synColSumDict) = write_syn_dataset(csvPathname, rowCount, colCount, SEEDPERFILE, sel, distribution)
selKey2 = hex_key + "_" + str(sel)
print "This dataset requires telling h2o parse it's a libsvm..doesn't detect automatically"
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=selKey2,
timeoutSecs=timeoutSecs, doSummary=False, parser_type='SVMLight')
print csvFilename, 'parse time:', parseResult['response']['time']
print "Parse result['destination_key']:", parseResult['destination_key']
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], max_column_display=colNumberMax+1, timeoutSecs=timeoutSecs)
num_cols = inspect['num_cols']
num_rows = inspect['num_rows']
print "\n" + csvFilename
# SUMMARY****************************************
# gives us some reporting on missing values, constant values,
# to see if we have x specified well
# figures out everything from parseResult['destination_key']
# needs y to avoid output column (which can be index or name)
# assume all the configs have the same y..just check with the firs tone
goodX = h2o_glm.goodXFromColumnInfo(y=0,
key=parseResult['destination_key'], timeoutSecs=300, noPrint=True)
if DO_SUMMARY:
summaryResult = h2o_cmd.runSummary(key=selKey2, max_column_display=colNumberMax+1, timeoutSecs=timeoutSecs)
h2o_cmd.infoFromSummary(summaryResult, noPrint=True)
self.assertEqual(colNumberMax+1, num_cols, msg="generated %s cols (including output). parsed to %s cols" % (colNumberMax+1, num_cols))
# Exec (column sums)*************************************************
if DO_COMPARE_SUM:
h2e.exec_zero_list(zeroList)
colResultList = h2e.exec_expr_list_across_cols(None, exprList, selKey2, maxCol=colNumberMax+1,
timeoutSecs=timeoutSecs)
print "\n*************"
print "colResultList", colResultList
print "*************"
self.assertEqual(rowCount, num_rows, msg="generated %s rows, parsed to %s rows" % (rowCount, num_rows))
# need to fix this for compare to expected
# we should be able to keep the list of fp sums per col above
# when we generate the dataset
### print "\nsynColSumDict:", synColSumDict
for k,v in synColSumDict.iteritems():
if DO_COMPARE_SUM:
# k should be integers that match the number of cols
self.assertTrue(k>=0 and k<len(colResultList))
compare = colResultList[k]
print "\nComparing col sums:", v, compare
# Even though we're comparing floating point sums, the operations probably should have
# been done in same order, so maybe the comparison can be exact (or not!)
self.assertAlmostEqual(v, compare, places=0,
msg='%0.6f col sum is not equal to expected %0.6f' % (v, compare))
synMean = (v + 0.0)/rowCount
# enums don't have mean, but we're not enums
mean = float(inspect['cols'][k]['mean'])
# our fp formats in the syn generation sometimes only have two places?
self.assertAlmostEqual(mean, synMean, places=0,
msg='col %s mean %0.6f is not equal to generated mean %0.6f' % (k, mean, synMean))
num_missing_values = inspect['cols'][k]['num_missing_values']
self.assertEqual(0, num_missing_values,
msg='col %s num_missing_values %d should be 0' % (k, num_missing_values))
if __name__ == '__main__':
h2o.unit_main()
| 45.376963
| 151
| 0.572055
| 989
| 8,667
| 4.905966
| 0.336704
| 0.016076
| 0.016488
| 0.02535
| 0.128813
| 0.125103
| 0.112531
| 0.02803
| 0.02803
| 0
| 0
| 0.025137
| 0.325257
| 8,667
| 190
| 152
| 45.615789
| 0.804549
| 0.219222
| 0
| 0.113821
| 0
| 0
| 0.097485
| 0.00387
| 0
| 0
| 0
| 0
| 0.04878
| 0
| null | null | 0
| 0.03252
| null | null | 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0afb2dc8c2daf11d9a82ca819aeffdafacc6c971
| 2,515
|
py
|
Python
|
graph.py
|
VaniSHadow/tpGenerator
|
2a2e0a65df48c812d9fa2e2b1474573c6a6ab6c0
|
[
"Unlicense"
] | null | null | null |
graph.py
|
VaniSHadow/tpGenerator
|
2a2e0a65df48c812d9fa2e2b1474573c6a6ab6c0
|
[
"Unlicense"
] | null | null | null |
graph.py
|
VaniSHadow/tpGenerator
|
2a2e0a65df48c812d9fa2e2b1474573c6a6ab6c0
|
[
"Unlicense"
] | null | null | null |
import random
import numpy
import copy
class Graph:
"""n表示图中点的个数,m表示图中边的个数"""
def __init__(self, n, m, edge_weight=1, directed=True, connected='weak', loop=False, weighted=False, trim=True):
"""
n 图中点的个数
m 图中边的个数
edge_weight 边的权值上限
directed 有向性
connected 连通性
loop 有环性
weighted 带权性
trim True:点编号从1开始 False:点编号从0开始
"""
self.directed = directed
self.weighted = weighted
self.connected = connected
self.loop = loop
self.trim = trim
if directed==True and connected=='weak' and loop==False:#弱连通有向无环
self.n = n
self.m = m
self.matr = numpy.zeros((n, n))
self.topo = list(range(n))
random.shuffle(self.topo)
self.RandomGenerTopoEdges(m-(n-1))
weak_connected = self.CheckWeakConnectivity()
if weak_connected:
self.RandomGenerTopoEdges(n-1)
else:
count = 0
for i in range(n-1):
if self.matr[self.topo[i]][self.topo[i+1]]!=1:
self.matr[self.topo[i]][self.topo[i+1]]=1
count = count+1
self.RandomGenerTopoEdges(n-1-count)
self.edges = list()
for i in range(n):
for j in range(n):
if self.matr[i][j]==1:
e = (i, j)
self.edges.append(e)
"""检查图的弱连通性"""
def CheckWeakConnectivity(self):
temp = copy.deepcopy(self.matr)
for i in range(self.n):
for j in range(self.n):
if temp[i][j]==1:
temp[j][i]=1
elif temp[j][i]==1:
temp[i][j]=1
for i in range(self.n-1):
if i==0:
result = temp.dot(temp)
else:
result = result.dot(temp)
for i in range(self.n):
for j in range(self.n):
if result[i][j]==0 and i!=j:
return False
return True
"""在图中随机生成edge_num条边"""
def RandomGenerTopoEdges(self, edge_num):
for i in range(edge_num):
mid = random.randint(1, self.n-2)
st = random.randint(0, mid)
end = random.randint(mid+1, self.n-1)
while self.matr[self.topo[st]][self.topo[end]] != 0:
mid = random.randint(1, self.n-2)
st = random.randint(0, mid)
end = random.randint(mid+1, self.n-1)
self.matr[self.topo[st]][self.topo[end]] = 1
"""以字符串返回第i条边的信息"""
def GetEdge(self, i):
if self.trim:#点从1开始
if self.weighted == False:
return str(self.edges[i][0]+1) + " " + str(self.edges[i][1]+1)
else:
return str(self.edges[i][0]+1) + " " + str(self.edges[i][1]+1) + random.randint(1, edge_weight)
else:#点从0开始
if self.weighted == False:
return str(self.edges[i][0]) + " " + str(self.edges[i][1])
else:
return str(self.edges[i][0]) + " " + str(self.edges[i][1]) + random.randint(1, edge_weight)
| 27.043011
| 113
| 0.622664
| 407
| 2,515
| 3.815725
| 0.184275
| 0.035415
| 0.061816
| 0.066967
| 0.388281
| 0.365744
| 0.324533
| 0.324533
| 0.282035
| 0.282035
| 0
| 0.024549
| 0.206362
| 2,515
| 92
| 114
| 27.336957
| 0.753507
| 0.006759
| 0
| 0.236111
| 0
| 0
| 0.005314
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.041667
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c120c632a3695672ca8dce5ff251b3540195c6e
| 68,026
|
py
|
Python
|
sandroad.py
|
lancelee82/bluelake
|
3ac3bba191ec5e331dcf66e0a20725445585c316
|
[
"MIT"
] | null | null | null |
sandroad.py
|
lancelee82/bluelake
|
3ac3bba191ec5e331dcf66e0a20725445585c316
|
[
"MIT"
] | null | null | null |
sandroad.py
|
lancelee82/bluelake
|
3ac3bba191ec5e331dcf66e0a20725445585c316
|
[
"MIT"
] | null | null | null |
"""
Flatpath, go forward forever.
http://codeincomplete.com/posts/javascript-racer/
http://www.extentofthejam.com/pseudo/
http://pixel.garoux.net/screen/game_list
Usage:
* UP/DOWN/LEFT/RIGHT
* SPACE : hide/show road map
* TAB : replay this road
* RETURN : go to a new road
TODO:
* hill road
* more road sprites
* sound
"""
import math
import random
import time
from starfish import pygm
from starfish import consts
from starfish import sptdraw
from starfish import utils
IMG_POS_BACKGROUND = {
'HILLS': { 'x': 5, 'y': 5, 'w': 1280, 'h': 480 },
'SKY': { 'x': 5, 'y': 495, 'w': 1280, 'h': 480 },
'TREES': { 'x': 5, 'y': 985, 'w': 1280, 'h': 480 },
}
IMG_POS_SPRITES = {
'PALM_TREE': { 'x': 5, 'y': 5, 'w': 215, 'h': 540 },
'BILLBOARD08': { 'x': 230, 'y': 5, 'w': 385, 'h': 265 },
'TREE1': { 'x': 625, 'y': 5, 'w': 360, 'h': 360 },
'DEAD_TREE1': { 'x': 5, 'y': 555, 'w': 135, 'h': 332 },
'BILLBOARD09': { 'x': 150, 'y': 555, 'w': 328, 'h': 282 },
'BOULDER3': { 'x': 230, 'y': 280, 'w': 320, 'h': 220 },
'COLUMN': { 'x': 995, 'y': 5, 'w': 200, 'h': 315 },
'BILLBOARD01': { 'x': 625, 'y': 375, 'w': 300, 'h': 170 },
'BILLBOARD06': { 'x': 488, 'y': 555, 'w': 298, 'h': 190 },
'BILLBOARD05': { 'x': 5, 'y': 897, 'w': 298, 'h': 190 },
'BILLBOARD07': { 'x': 313, 'y': 897, 'w': 298, 'h': 190 },
'BOULDER2': { 'x': 621, 'y': 897, 'w': 298, 'h': 140 },
'TREE2': { 'x': 1205, 'y': 5, 'w': 282, 'h': 295 },
'BILLBOARD04': { 'x': 1205, 'y': 310, 'w': 268, 'h': 170 },
'DEAD_TREE2': { 'x': 1205, 'y': 490, 'w': 150, 'h': 260 },
'BOULDER1': { 'x': 1205, 'y': 760, 'w': 168, 'h': 248 },
'BUSH1': { 'x': 5, 'y': 1097, 'w': 240, 'h': 155 },
'CACTUS': { 'x': 929, 'y': 897, 'w': 235, 'h': 118 },
'BUSH2': { 'x': 255, 'y': 1097, 'w': 232, 'h': 152 },
'BILLBOARD03': { 'x': 5, 'y': 1262, 'w': 230, 'h': 220 },
'BILLBOARD02': { 'x': 245, 'y': 1262, 'w': 215, 'h': 220 },
'STUMP': { 'x': 995, 'y': 330, 'w': 195, 'h': 140 },
'SEMI': { 'x': 1365, 'y': 490, 'w': 122, 'h': 144 },
'TRUCK': { 'x': 1365, 'y': 644, 'w': 100, 'h': 78 },
'CAR03': { 'x': 1383, 'y': 760, 'w': 88, 'h': 55 },
'CAR02': { 'x': 1383, 'y': 825, 'w': 80, 'h': 59 },
'CAR04': { 'x': 1383, 'y': 894, 'w': 80, 'h': 57 },
'CAR01': { 'x': 1205, 'y': 1018, 'w': 80, 'h': 56 },
'PLAYER_UPHILL_LEFT': { 'x': 1383, 'y': 961, 'w': 80, 'h': 45 },
'PLAYER_UPHILL_STRAIGHT': { 'x': 1295, 'y': 1018, 'w': 80, 'h': 45 },
'PLAYER_UPHILL_RIGHT': { 'x': 1385, 'y': 1018, 'w': 80, 'h': 45 },
'PLAYER_LEFT': { 'x': 995, 'y': 480, 'w': 80, 'h': 41 },
'PLAYER_STRAIGHT': { 'x': 1085, 'y': 480, 'w': 80, 'h': 41 },
'PLAYER_RIGHT': { 'x': 995, 'y': 531, 'w': 80, 'h': 41 }
}
FP_COLOR_WHITE = '#FFFFFF'
FP_COLOR_BLACK = '#000000'
FP_COLOR_YELLOW = '#EEEE00'
FP_COLOR_BLUE = '#00EEEE'
FP_COLORS = {
'SKY': '#72D7EE',
'TREE': '#005108',
'FOG': '#005108',
'LIGHT': {'road': '#6B6B6B', 'grass': '#10AA10', 'rumble': '#555555', 'lane': '#CCCCCC'},
'DARK': {'road': '#696969', 'grass': '#009A00', 'rumble': '#BBBBBB' },
'START': {'road': FP_COLOR_WHITE, 'grass': FP_COLOR_WHITE, 'rumble': FP_COLOR_WHITE},
'FINISH': {'road': FP_COLOR_BLACK, 'grass': FP_COLOR_BLACK, 'rumble': FP_COLOR_BLACK},
'START_Y': {'road': FP_COLOR_YELLOW, 'grass': '#10AA10', 'rumble': '#555555', 'lane': '#CCCCCC'},
}
FP_ROAD = {
'LENGTH': {'NONE': 0, 'SHORT': 25, 'MEDIUM': 50, 'LONG': 100 }, # num segments
'CURVE': {'NONE': 0, 'EASY': 2, 'MEDIUM': 4, 'HARD': 6 },
'HILL': {'NONE': 0, 'LOW': 20, 'MEDIUM': 40, 'HIGH': 60 },
}
FP_ROAD_SPRTS = {
'chest': {'imgs': ['img_sprts/i_chest1.png'], 'score': 100,},
'coin1': {'imgs': ['img_sprts/i_coin1.png'], 'score': 1,},
'coin5': {'imgs': ['img_sprts/i_coin5.png'], 'score': 5,},
'coin20': {'imgs': ['img_sprts/i_coin20.png'], 'score': 20,},
'health': {'imgs': ['img_sprts/i_health.png'], 'score': 10,},
'heart': {'imgs': ['img_sprts/i_heart.png'], 'score': 50,},
'pot1': {'imgs': ['img_sprts/i_pot1.png'], 'score': -5,},
'pot2': {'imgs': ['img_sprts/i_pot2.png'], 'score': -1,},
'shell': {'imgs': ['img_sprts/p_shell.png'], 'score': -20,},
'rockd': {'imgs': ['img_sprts/rock_d2.png'], 'score': -10,},
'rockr': {'imgs': ['img_sprts/rock_r2.png'], 'score': -50,},
#'ashra_defeat': {'imgs': ['img_sprts/ashra_defeat1.png'], 'score': -100,},
#'bear': {'imgs': ['img_sprts/bear2.png'], 'score': -80,},
#'dinof': {'imgs': ['img_sprts/dinof2.png'], 'score': -50,},
'blobb': {'imgs': ['img_sprts/blobb1.png'], 'score': -50,},
'chick_fly': {'imgs': ['img_sprts/chick_fly3.png'], 'score': 70,},
'clown': {'imgs': ['img_sprts/clown1.png'], 'score': -100,},
}
class SptTmpx(sptdraw.SptDrawBase):
def __init__(self, size, *args, **kwargs):
super(SptTmpx, self).__init__(size)
self.draw_on()
def draw_on(self, *args, **kwargs):
self.fill(consts.GREEN)
self.pygm.draw.circle(self.surf, consts.WHITE,
(self.size[0] / 2, self.size[1] / 2),
self.size[0] / 2, 0)
class SptTmpi(pygm.SptImg):
def __init__(self, img_file, *args, **kwargs):
super(SptTmpi, self).__init__(img_file)
class FPSptBg(pygm.SptImgOne):
def __init__(self, img_file, pos, *args, **kwargs):
super(FPSptBg, self).__init__(img_file, pos)
class FPSptSprts(pygm.SptImgOne):
def __init__(self, img_file, pos, *args, **kwargs):
super(FPSptSprts, self).__init__(img_file, pos)
class FPSptFog(sptdraw.SptDrawBase):
def __init__(self, size, c=[0, 81, 8, 0], h=30, *args, **kwargs):
super(FPSptFog, self).__init__(size)
self.c = c
self.h = h
self.draw_on()
def draw_on(self, *args, **kwargs):
#self.fill(self.c)
d = 2
n = self.h / d
for i in range(n):
rct = [0, i * d, self.size[0], d]
#ca = 255 / n * (n - i)
ca = 200 / n * (n - i)
self.c[3] = ca
self.pygm.draw.rect(self.surf, self.c, rct)
class FPSptRdSprts(pygm.SptImg):
def __init__(self, img_file, *args, **kwargs):
super(FPSptRdSprts, self).__init__(img_file)
@classmethod
def create_by_img(cls, img):
return cls(img)
# for test
#o = SptTmpx((40, 40))
#return o
class FPSptRoadB(sptdraw.SptDrawBase):
def __init__(self, size, cfg, *args, **kwargs):
super(FPSptRoadB, self).__init__(size)
self.cfg = cfg
self.car = kwargs.get('car')
self.bg_sky = kwargs.get('bg_sky')
self.bg_hills = kwargs.get('bg_hills')
self.bg_trees = kwargs.get('bg_trees')
self.clr_dark_road = utils.clr_from_str(FP_COLORS['DARK']['road'])
self.clr_dark_grass = utils.clr_from_str(FP_COLORS['DARK']['grass'])
self.rd_reset(init=True)
self.add_fog()
def prms_reset(self, keep_segs=False):
self.e_keys_up = []
self.e_keys_dn = []
self.camera_x = 0.0
self.camera_y = 0.0
self.camera_z = 500.0#1000.0#0.0 == self.camera_h
self.xw = 0.0
self.yw = 0.0
self.zw = 0.0
self.xc = 0.0
self.yc = 0.0
self.zc = 0.0 ##
self.xp = 0.0
self.yp = 0.0
self.xs = 0.0
self.ys = 0.0
self.d = 200.0#100.0#10.0#30.0#1.0
self.w = self.size[0]
self.h = self.size[1]
if not keep_segs:
self.segments = []
self.rd_sprt_objs = {}
self.rd_sprt_cache = [] # for sprites render order
self.track_len = 0.0
self.seg_len = 200.0#100.0#20.0#60.0#200.0#
self.road_w = 2400#2000#600.0#200.0#1000.0#200#
self.camera_h = 500.0#1000.0#
self.speed_max = 300.0#180.0#200.0#100.0
self.lane_w = 60
self.seg_n = 300#200
#self.seg_draw_n = 200#150
self.seg_draw_n = 70#100#200#150
self.speed = 0.0
self.position = 0.0
self.player_x = 0.0#100.0#1000.0#
self.centrifugal = 0.1#0.06#0.08#0.01#0.3
self.player_seg = None
self.base_seg = None # the segment just under the car
self.player_di = 0 # 0:^ 1:> 2:v 3:<
self.player_go = 0 # 0:- 1:^ 2:v
self.speed_dt_up = 1.0#2.0#3.0
self.speed_dt_dn = 2.0#4.0#6.0
self.speed_dt_na = 1.0#3.0
self.player_x_dt = 60.0#30.0#20.0
self.last_seg_i = 0
self.score = 0
self.game_over = False
self.game_score = 0.0
self.tm_start = 0.0
self.tm_end = 0.0
self.tm_last_once = 0.0
self.sky_speed = 0.1#0.05#
self.hill_speed = 0.2#0.1#
self.tree_speed = 0.3#0.15#
def rd_reset(self, init=False, keep_segs=False, segs_file=None):
#if not init and not keep_segs:
if not init:
self.rd_sprts_del_all_objs()
self.prms_reset(keep_segs=keep_segs)
if segs_file is not None:
try:
segs = self.rd_seg_json_load(segs_file)
self.segments = segs
self.track_len = len(self.segments) * self.seg_len
except Exception as e:
print e
self.init_rd_segs_rand_1()
else:
if not keep_segs:
self.init_rd_segs_rand_1()
self.draw_on()
self.rd_seg_render()
def init_rd_segs_rand_1(self):
#self.rd_seg_init(self.seg_n)
#self.rd_seg_init(self.seg_draw_n)
#self.rd_seg_init(100)#20#500#2#10#4#1#100#200
#self.rd_seg_init(random.randint(30, 100))
self.rd_seg_init(random.randint(1, 10)) # for a3c train
self.rd_seg_init_rand_curve()
#self.add_curves()
#self.add_low_rolling_hills(20, 2.0)
##self.add_low_rolling_hills(30, 4.0)
#self.rd_seg_init_rand(10)#50#10#3#1
#segnrand = random.randint(3, 30)
segnrand = random.randint(2, 6) # for a3c train
self.rd_seg_init_rand(segnrand)
# for segment draw
#self.rd_seg_init(self.seg_draw_n)
#self.rd_seg_init(100)#20#500#2#10#4#1#100#200
self.rd_seg_init(10) # for a3c train
self.rd_start_seg_init()
self.rd_sprts_init_rand()
def draw_on(self, *args, **kwargs):
self.fill(self.clr_dark_grass)
def add_fog(self):
self.fog = FPSptFog(self.size)
self.fog.rect.top = 240
self.fog.rect.left = 0
self.disp_add(self.fog)
def get_seg_base_i(self, pos=None):
if pos is None:
pos = self.position
i = int(pos / self.seg_len)
#x#i = int(utils.math_round(pos / self.seg_len))
#i = int(math.floor(pos / self.seg_len))
#i = int(math.ceil(pos / self.seg_len))
seg_n = len(self.segments)
i = (i + seg_n) % seg_n
return i
def rd_get_segs(self, whole=False):
if whole:
segs = self.segments
else:
segs = self.segments[:-self.seg_draw_n]
return segs
# #### geometry #### #
def geo_prjc_scale(self, d, zc):
if zc == 0.0:
return 1.0
else:
return d / zc
def xc_to_xp(self, xc, d, zc):
if zc == 0.0:
#xp = float('inf')
#xp = 2 ** 64
xp = xc
else:
xp = xc * (d / zc)
return xp
def yc_to_yp(self, yc, d, zc):
if zc == 0.0:
#yp = float('inf')
#yp = 2 ** 64
yp = yc
else:
yp = yc * (d / zc)
return yp
def xp_to_xs(self, xp, w):
#xs = w / 2.0 + w / 2.0 * xp
xs = w / 2.0 + xp
return xs
def yp_to_ys(self, yp, h):
#ys = h / 2.0 - h / 2.0 * yp
ys = h / 2.0 - yp
return ys
def rd_seg_init(self, a=500):
for n in range(a):
self.rd_seg_add(0.0, 0.0)
def rd_seg_add(self, curve=0.0, yw=0.0):
#print '+', curve, yw
n = len(self.segments)
#print n
if n % 2 == 0:
#if n % 4 == 0:
c = FP_COLORS['LIGHT']
#c = {'road': FP_COLOR_WHITE}
else:
c = FP_COLORS['DARK']
#c = {'road': FP_COLOR_BLACK}
seg = {
'index': n,
'p1': {'world': {'z': (n + 1) * self.seg_len,
'y': self.seg_lasy_y()},
'camera': {},
'screen': {}},
'p2': {'world': {'z': (n + 2) * self.seg_len,
'y': yw},
'camera': {},
'screen': {}},
'curve': curve,
'color': c,
'sprites': [],
'looped': 0,
}
self.segments.append(seg)
self.track_len = len(self.segments) * self.seg_len
#self.track_len = (len(self.segments) - self.seg_draw_n) * self.seg_len
def seg_lasy_y(self):
seg_n = len(self.segments)
if seg_n == 0:
return 0.0
else:
return self.segments[seg_n - 1]['p2']['world'].get('y', 0.0)
def rd_seg_init_rand(self, n=50):
#print 'rd_seg_init_rand', n
for i in range(n):
p = random.random()
#print p
rl = random.choice([1, -1])
enter = random.randint(10, 40)
hold = random.randint(10, 40)
leave = random.randint(10, 40)
if p < 0.3:
curve = 0.0
yw = 0.0
#elif p < 0.8:
# curve = 0.0
# yw = random.random() * 10.0
else:
curve = rl * random.random() * 6.0
yw = 0.0
self.add_road(enter, hold, leave, curve, yw)
def rd_seg_init_rand_2(self, n=50):
for i in range(n):
p = random.random()
#print p
rl = random.choice([1, -1])
if p < 0.35:
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
rl * FP_ROAD['CURVE']['MEDIUM'])
elif p < 0.7:
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
rl * FP_ROAD['CURVE']['EASY'])
else:
enter = random.randint(10, 100)
hold = random.randint(10, 100)
leave = random.randint(10, 100)
self.add_road(enter, hold, leave, 0.0, 0.0)
def rd_seg_init_rand_curve(self, n=5):
#print 'rd_seg_init_rand', n
for i in range(n):
rl = random.choice([1, -1])
enter = random.randint(10, 40)
hold = random.randint(10, 40)
leave = random.randint(10, 40)
curve = rl * random.random() * 8.0
yw = 0.0
self.add_road(enter, hold, leave, curve, yw)
def rd_start_seg_init(self, n=3):
seg_n = len(self.segments)
if seg_n == 0:
return
#self.segments[0]['color'] = FP_COLORS['START_Y']
#self.segments[2]['color'] = FP_COLORS['START_Y']
for i in range(n):
self.segments[i]['color'] = FP_COLORS['START_Y']
def rd_sprts_init_rand(self, n=None):
seg_n = len(self.segments)
if n is None:
#n = seg_n / 20
n = seg_n / random.randint(10, 30)
for i in range(n):
j = random.randint(10, seg_n - 10)
sprt = random.choice(FP_ROAD_SPRTS.keys())
s = {
'name': sprt,
'type': 1, # image / animate / ...
'obj': None, # need to create at render
##'x_i': None, # get real (random) x from x_pos
'x_i': random.randint(0, 4),
'score': FP_ROAD_SPRTS[sprt].get('score', 0),
}
self.segments[j]['sprites'].append(s)
def rd_sprts_del_all_objs(self):
for k, sprt in self.rd_sprt_objs.items():
#print k, sprt
self.disp_del(sprt)
del self.rd_sprt_objs[k]
def util_limit(self, value, mn, mx):
return max(mn, min(value, mx))
def util_accelerate(self, v, accel, dt):
return v + (accel * dt)
def util_increase(self, start, increment, mx): # with looping
result = start + increment
while (result >= mx):
result -= mx
while (result < 0):
result += mx
return result
def util_ease_in(self, a, b, percent):
return a + (b - a) * math.pow(percent, 2)
def util_ease_out(self, a, b, percent):
return a + (b - a) * (1 - math.pow(1 - percent, 2))
def util_ease_in_out(self, a, b, percent):
return a + (b - a) * ((-math.cos(percent * math.pi)/2) + 0.5)
def util_curve_percent_remaining(self, n, total):
return (n % total) / total
def add_road(self, enter, hold, leave, curve, yw=0.0):
#print enter, hold, leave, curve, yw
start_y = self.seg_lasy_y()
end_y = start_y + (int(yw) * self.seg_len)
total = enter + hold + leave
for n in range(enter):
self.rd_seg_add(self.util_ease_in(0, curve, float(n)/enter),
self.util_ease_out(start_y, end_y,
float(n)/total))
for n in range(hold):
self.rd_seg_add(curve,
self.util_ease_out(start_y, end_y,
(float(n)+enter)/total))
for n in range(leave):
self.rd_seg_add(self.util_ease_out(curve, 0, n/leave),
self.util_ease_out(start_y, end_y,
(float(n)+enter+hold)/total))
def add_curves(self):
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
-FP_ROAD['CURVE']['EASY'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['CURVE']['MEDIUM'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['CURVE']['EASY'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
-FP_ROAD['CURVE']['EASY'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
-FP_ROAD['CURVE']['MEDIUM'])
self.add_road(FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
FP_ROAD['LENGTH']['MEDIUM'],
0.0)
def add_low_rolling_hills(self, num, height):
num = num or ROAD['LENGTH']['SHORT']
height = height or ROAD['HILL']['LOW']
self.add_road(num, num, num, 0, height/2.0)
self.add_road(num, num, num, 0, -height)
self.add_road(num, num, num, 0, height)
self.add_road(num, num, num, 0, 0)
self.add_road(num, num, num, 0, height/2.0)
self.add_road(num, num, num, 0, 0)
def rd_seg_get_cleared(self, segs=None):
if not segs:
segs = self.segments
segs_c = []
for seg in segs:
if not seg['sprites']:
segs_c.append(seg)
else:
seg_c = {}
for k, v in seg.items():
if k not in ['sprites']:
seg_c[k] = v
else:
seg_c[k] = []
for spr in seg['sprites']:
spr_n = {}
for sk, sv in spr.items():
if sk not in ['obj']:
spr_n[sk] = sv
else:
spr_n[sk] = None
seg_c[k].append(spr_n)
segs_c.append(seg_c)
return segs_c
def rd_seg_json_save(self, f):
sc = self.rd_seg_get_cleared(self.segments)
s = utils.json_dumps(sc)
with open(f, 'w') as fo:
fo.write(s)
def rd_seg_json_load(self, f):
with open(f, 'r') as fi:
s = fi.read()
segs = utils.json_loads(s)
return segs
def rd_seg_render__1_o(self):
"""straight"""
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
xcl1 = xc1 - self.lane_w
xcl2 = xc2 + self.lane_w
xcl3 = xc3 - self.lane_w
xcl4 = xc4 + self.lane_w
xcr1 = self.lane_w - self.player_x
xcr2 = -self.lane_w - self.player_x
xcr3 = self.lane_w - self.player_x
xcr4 = -self.lane_w - self.player_x
yc = self.camera_h
#print '=' * 80
#print 'self.position', self.position
for i, seg in enumerate(self.segments):
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
#zc1 = self.position - (zw1 - self.camera_z)
#zc2 = self.position - (zw2 - self.camera_z)
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
if 1:#i % 2 == 1:
xpl1 = self.xc_to_xp(xcl1, self.d, zc1)
xsl1 = self.xp_to_xs(xpl1, self.w)
xpl2 = self.xc_to_xp(xcl2, self.d, zc1)
xsl2 = self.xp_to_xs(xpl2, self.w)
xpl3 = self.xc_to_xp(xcl3, self.d, zc2)
xsl3 = self.xp_to_xs(xpl3, self.w)
xpl4 = self.xc_to_xp(xcl4, self.d, zc2)
xsl4 = self.xp_to_xs(xpl4, self.w)
self.render_polygon(None,
xs1, ys1, xsl1, ys1,
xsl3, ys3, xs3, ys3,
seg['color']['rumble'])
self.render_polygon(None,
xs2, ys2, xsl2, ys2,
xsl4, ys4, xs4, ys4,
seg['color']['rumble'])
xpr1 = self.xc_to_xp(xcr1, self.d, zc1)
xsr1 = self.xp_to_xs(xpr1, self.w)
xpr2 = self.xc_to_xp(xcr2, self.d, zc1)
xsr2 = self.xp_to_xs(xpr2, self.w)
xpr3 = self.xc_to_xp(xcr3, self.d, zc2)
xsr3 = self.xp_to_xs(xpr3, self.w)
xpr4 = self.xc_to_xp(xcr4, self.d, zc2)
xsr4 = self.xp_to_xs(xpr4, self.w)
self.render_polygon(None,
xsr1, ys1, xsr2, ys2,
xsr4, ys4, xsr3, ys3,
seg['color']['rumble'])
def rd_seg_render__2_o(self):
"""curve test 1"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
yc = self.camera_h
print '=' * 80
print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
print 'segbi', segbi
# TODO: do at update
#dpx1 = self.seg_len * math.tan(theta_i)
#self.player_x -= dpx1
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#x#zw1 = (i+1)*self.seg_len
#zw2 = (i+2)*self.seg_len
#'''
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
curve_d = 500
#x#xc1 = self.road_w / 2 - self.player_x - curve_d * i
#xc2 = -self.road_w / 2 - self.player_x - curve_d * i
#xc3 = self.road_w / 2 - self.player_x - curve_d * i
#xc4 = -self.road_w / 2 - self.player_x - curve_d * i
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
#'''
#if 1:
#if i < self.seg_draw_n / 2:
if i < self.seg_draw_n / 4:
theta1 = theta_i * i
theta2 = theta_i * (i + 1)
dx1 = self.seg_len * math.tan(theta1)
dx2 = self.seg_len * math.tan(theta2)
xs1 += dx1
xs2 += dx1
xs3 += dx2 #+ dx1
xs4 += dx2 #+ dx1
#'''
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
def rd_seg_render__3_o(self):
"""curve test 2: draw a circle"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
#xc1 = self.road_w / 2 - self.player_x
#xc2 = -self.road_w / 2 - self.player_x
#xc3 = self.road_w / 2 - self.player_x
#xc4 = -self.road_w / 2 - self.player_x
# <3>
#engi = math.pi / 2.0 / self.seg_draw_n
engi = math.pi / 2.0 / 60#10#20
rad = self.road_w * 4#2
rad1 = rad + self.road_w / 2
rad2 = rad - self.road_w / 2
yc = self.camera_h
print '=' * 80
print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
print 'segbi', segbi
# TODO: do at update
#dpx1 = self.seg_len * math.tan(theta_i)
#self.player_x -= dpx1
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#x#zw1 = (i+1)*self.seg_len
#zw2 = (i+2)*self.seg_len
#'''
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
curve_d = 500
#x#xc1 = self.road_w / 2 - self.player_x - curve_d * i
#xc2 = -self.road_w / 2 - self.player_x - curve_d * i
#xc3 = self.road_w / 2 - self.player_x - curve_d * i
#xc4 = -self.road_w / 2 - self.player_x - curve_d * i
# <3>
xx1 = rad1 * math.cos(engi * i)
xx2 = rad2 * math.cos(engi * i)
xx3 = rad1 * math.cos(engi * (i + 1))
xx4 = rad2 * math.cos(engi * (i + 1))
xc1 = (rad - xx1) - self.player_x
xc2 = (rad - xx2) - self.player_x
xc3 = (rad - xx3) - self.player_x
xc4 = (rad - xx4) - self.player_x
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
'''
#if 1:
#if i < self.seg_draw_n / 2:
if i < self.seg_draw_n / 4:
theta1 = theta_i * i
theta2 = theta_i * (i + 1)
dx1 = self.seg_len * math.tan(theta1)
dx2 = self.seg_len * math.tan(theta2)
xs1 += dx1
xs2 += dx1
xs3 += dx2 #+ dx1
xs4 += dx2 #+ dx1
'''
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
def rd_seg_render__4_o(self):
"""curve"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
#xcl1 = xc1 - self.lane_w
#xcl2 = xc2 + self.lane_w
#xcl3 = xc3 - self.lane_w
#xcl4 = xc4 + self.lane_w
xcr1 = self.lane_w - self.player_x
xcr2 = -self.lane_w - self.player_x
xcr3 = self.lane_w - self.player_x
xcr4 = -self.lane_w - self.player_x
yc = self.camera_h
print '=' * 80
print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
print 'segbi', segbi
self.player_seg = self.segments[segbi]
b_curve = self.player_seg.get('curve', 0.0)
#b_percent = 0.5
b_percent = self.util_curve_percent_remaining(self.position,
self.seg_len)
dx_curve = - (b_curve * b_percent)
x_curve = 0
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#'''
'''
#x#
if seg['index'] < segbi:
zw1 = (i+1)*self.seg_len
zw2 = (i+2)*self.seg_len
else:
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
'''
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
# for curve
xc1 = xc1 - x_curve
xc2 = xc2 - x_curve
xc3 = xc3 - x_curve - dx_curve
xc4 = xc4 - x_curve - dx_curve
xcl1 = xc1 - self.lane_w
xcl2 = xc2 + self.lane_w
xcl3 = xc3 - self.lane_w
xcl4 = xc4 + self.lane_w
xcr1 = xcr1 - x_curve
xcr2 = xcr2 - x_curve
xcr3 = xcr3 - x_curve - dx_curve
xcr4 = xcr4 - x_curve - dx_curve
x_curve = x_curve + dx_curve
dx_curve = dx_curve + seg.get('curve', 0.0)
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
'''
#if 1:
#if i < self.seg_draw_n / 2:
if i < self.seg_draw_n / 4:
theta1 = theta_i * i
theta2 = theta_i * (i + 1)
dx1 = self.seg_len * math.tan(theta1)
dx2 = self.seg_len * math.tan(theta2)
xs1 += dx1
xs2 += dx1
xs3 += dx2 #+ dx1
xs4 += dx2 #+ dx1
'''
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
if 1:#i % 2 == 1:
xpl1 = self.xc_to_xp(xcl1, self.d, zc1)
xsl1 = self.xp_to_xs(xpl1, self.w)
xpl2 = self.xc_to_xp(xcl2, self.d, zc1)
xsl2 = self.xp_to_xs(xpl2, self.w)
xpl3 = self.xc_to_xp(xcl3, self.d, zc2)
xsl3 = self.xp_to_xs(xpl3, self.w)
xpl4 = self.xc_to_xp(xcl4, self.d, zc2)
xsl4 = self.xp_to_xs(xpl4, self.w)
self.render_polygon(None,
xs1, ys1, xsl1, ys1,
xsl3, ys3, xs3, ys3,
seg['color']['rumble'])
self.render_polygon(None,
xs2, ys2, xsl2, ys2,
xsl4, ys4, xs4, ys4,
seg['color']['rumble'])
xpr1 = self.xc_to_xp(xcr1, self.d, zc1)
xsr1 = self.xp_to_xs(xpr1, self.w)
xpr2 = self.xc_to_xp(xcr2, self.d, zc1)
xsr2 = self.xp_to_xs(xpr2, self.w)
xpr3 = self.xc_to_xp(xcr3, self.d, zc2)
xsr3 = self.xp_to_xs(xpr3, self.w)
xpr4 = self.xc_to_xp(xcr4, self.d, zc2)
xsr4 = self.xp_to_xs(xpr4, self.w)
self.render_polygon(None,
xsr1, ys1, xsr2, ys2,
xsr4, ys4, xsr3, ys3,
seg['color']['rumble'])
def rd_seg_render(self):
"""curve"""
#theta_i = math.pi /180.0 * 0.1
#theta_i = math.pi /180.0 * 0.5
theta_i = math.pi /180.0 * 0.9
#theta_i = 0.0
xc1 = self.road_w / 2 - self.player_x
xc2 = -self.road_w / 2 - self.player_x
xc3 = self.road_w / 2 - self.player_x
xc4 = -self.road_w / 2 - self.player_x
#xcl1 = xc1 - self.lane_w
#xcl2 = xc2 + self.lane_w
#xcl3 = xc3 - self.lane_w
#xcl4 = xc4 + self.lane_w
xcr1 = self.lane_w - self.player_x
xcr2 = -self.lane_w - self.player_x
xcr3 = self.lane_w - self.player_x
xcr4 = -self.lane_w - self.player_x
yc = self.camera_h
#print '=' * 80
#print 'self.position', self.position
# <2>
seg_n = len(self.segments)
segbi = self.get_seg_base_i()
#print 'segbi', segbi, ' / ', seg_n
self.player_seg = self.segments[segbi]
self.base_seg = self.segments[(segbi + 2) % seg_n]
# for test
#self.base_seg['color'] = FP_COLORS['FINISH']
b_curve = self.player_seg.get('curve', 0.0)
#b_percent = 0.5
b_percent = self.util_curve_percent_remaining(self.position,
self.seg_len)
dx_curve = - (b_curve * b_percent)
x_curve = 0
#print 'b_curve', b_curve
#print 'world z', self.player_seg['p1']['world']['z']
#print 'world y', self.player_seg['p1']['world'].get('y', 0.0)
# clear the sprites cache
self.rd_sprt_cache = []
# <1>
#for i, seg in enumerate(self.segments):
# <2>
for i in range(self.seg_draw_n):
#'''
# <2>
si = (segbi + i) % seg_n
#print si
seg = self.segments[si]
#'''
'''
# for test
if i < 10:
print '>>> ', i
print 'curve', seg.get('curve', 0.0)
print 'world z', seg['p1']['world']['z']
print 'world y', seg['p1']['world'].get('y', 0.0)
#print '-' * 30
'''
'''
#x#
if seg['index'] < segbi:
zw1 = (i+1)*self.seg_len
zw2 = (i+2)*self.seg_len
else:
# <1>
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
'''
zw1 = (i+1)*self.seg_len
zw2 = (i+2)*self.seg_len
zc1 = zw1 - self.camera_z - (self.position % self.seg_len)
zc2 = zw2 - self.camera_z - (self.position % self.seg_len)
'''
#x#
zw1 = seg['p1']['world']['z']
zw2 = seg['p2']['world']['z']
zc1 = zw1 - self.camera_z - self.position
zc2 = zw2 - self.camera_z - self.position
'''
# for curve
xc1 = xc1 - x_curve
xc2 = xc2 - x_curve
xc3 = xc3 - x_curve - dx_curve
xc4 = xc4 - x_curve - dx_curve
xcl1 = xc1 - self.lane_w
xcl2 = xc2 + self.lane_w
xcl3 = xc3 - self.lane_w
xcl4 = xc4 + self.lane_w
xcr1 = xcr1 - x_curve
xcr2 = xcr2 - x_curve
xcr3 = xcr3 - x_curve - dx_curve
xcr4 = xcr4 - x_curve - dx_curve
x_curve = x_curve + dx_curve
dx_curve = dx_curve + seg.get('curve', 0.0)
# for hills
yw1 = seg['p1']['world'].get('y', 0.0)
yw2 = seg['p2']['world'].get('y', 0.0)
yc1 = yc - yw1
yc2 = yc - yw2
#print yw1, yw2
xp1 = self.xc_to_xp(xc1, self.d, zc1)
xs1 = self.xp_to_xs(xp1, self.w)
xp2 = self.xc_to_xp(xc2, self.d, zc1)
xs2 = self.xp_to_xs(xp2, self.w)
xp3 = self.xc_to_xp(xc3, self.d, zc2)
xs3 = self.xp_to_xs(xp3, self.w)
xp4 = self.xc_to_xp(xc4, self.d, zc2)
xs4 = self.xp_to_xs(xp4, self.w)
yp1 = self.yc_to_yp(yc1, self.d, zc1)
ys1 = self.yp_to_ys(yp1, self.h)
ys2 = ys1
yp3 = self.yc_to_yp(yc2, self.d, zc2)
ys3 = self.yp_to_ys(yp3, self.h)
ys4 = ys3
'''
# for test
if i < 10:
print xs1, ys1, xs2, ys2
print xs4, ys4, xs3, ys3
print '-' * 30
'''
# grass
self.render_polygon(None,
0, ys1, self.w, ys2,
self.w, ys4, 0, ys3,
seg['color']['grass'])
# road
self.render_polygon(None,
xs1, ys1, xs2, ys2,
xs4, ys4, xs3, ys3,
seg['color']['road'])
if 1:#i % 2 == 1:
xpl1 = self.xc_to_xp(xcl1, self.d, zc1)
xsl1 = self.xp_to_xs(xpl1, self.w)
xpl2 = self.xc_to_xp(xcl2, self.d, zc1)
xsl2 = self.xp_to_xs(xpl2, self.w)
xpl3 = self.xc_to_xp(xcl3, self.d, zc2)
xsl3 = self.xp_to_xs(xpl3, self.w)
xpl4 = self.xc_to_xp(xcl4, self.d, zc2)
xsl4 = self.xp_to_xs(xpl4, self.w)
self.render_polygon(None,
xs1, ys1, xsl1, ys1,
xsl3, ys3, xs3, ys3,
seg['color']['rumble'])
self.render_polygon(None,
xs2, ys2, xsl2, ys2,
xsl4, ys4, xs4, ys4,
seg['color']['rumble'])
xpr1 = self.xc_to_xp(xcr1, self.d, zc1)
xsr1 = self.xp_to_xs(xpr1, self.w)
xpr2 = self.xc_to_xp(xcr2, self.d, zc1)
xsr2 = self.xp_to_xs(xpr2, self.w)
xpr3 = self.xc_to_xp(xcr3, self.d, zc2)
xsr3 = self.xp_to_xs(xpr3, self.w)
xpr4 = self.xc_to_xp(xcr4, self.d, zc2)
xsr4 = self.xp_to_xs(xpr4, self.w)
self.render_polygon(None,
xsr1, ys1, xsr2, ys2,
xsr4, ys4, xsr3, ys3,
seg['color']['rumble'])
# for test
#self.pygm.draw.circle(self.surf, consts.BLUE,
# (int(xsr1), 116 - int(ys1)),
# 3, 0)
# render road sprites
# TODO: check if this seg is looped
seg_scale = self.geo_prjc_scale(self.d, zc1)
x_rnd = random.randint(1, self.road_w / 2 - 10) * seg_scale
#x_sprt = (xs1 + xs2) / 2.0
#y_sprt = (ys1 + ys3) / 2.0
x_dt = x_rnd * seg_scale
x_pos = [xsr1, xsr2,
(xsr1 + xsl1) / 2.0,
(xsr2 + xsl2) / 2.0,
xsl1, xsl2]
#x_sprt = xsr1
x_sprt = (xsr1 + xsl1) / 2.0
#x_sprt = random.choice(x_pos)
x_i = random.randint(0, len(x_pos) - 1) # NOTE: not used now !!
##x_i = 2
y_sprt = ys1
scale_sprt = seg_scale * 8.0#10.0#2.0
obj = self.rd_sprts_render(seg, x_pos, x_i, y_sprt, scale_sprt)
if obj:
self.rd_sprt_cache.append(obj)
# render the sprites with right order
for obj in self.rd_sprt_cache[::-1]:
self.disp_add(obj)
def render_polygon(self, ctx, x1, y1, x2, y2, x3, y3, x4, y4, color):
#d = 200#100#240#50#
#a = 60
#pnts = [[x1, y1], [x2, y2], [x3, y3], [x4, y4], [x1, y1]]
#pnts = [[x1, y1-d], [x2, y2-d], [x3, y3-d], [x4, y4-d], [x1, y1-d]]
#pnts = [[x1, y1+a], [x2, y2+a], [x3, y3+a], [x4, y4+a], [x1, y1+a]]
# reflect the y-
d = 116
pnts = [[x1, d-y1], [x2, d-y2], [x3, d-y3], [x4, d-y4], [x1, d-y1]]
c = utils.clr_from_str(color)
try:
self.pygm.draw.polygon(self.surf, c, pnts)
except Exception as e:
#print '-' * 60
pass
def rd_sprts_render(self, seg, x_pos, x_i, y, scale):
sprts = seg.get('sprites')
if not sprts:
return None
for i, info in enumerate(sprts):
sprt = info['name']
obj_k = str(seg['index']) + '_' + str(i) + '_' + sprt
obj = info.get('obj')
'''
# TODO: <1>
if not obj:
obj = FPSptRdSprts.create_by_img(FP_ROAD_SPRTS[sprt][0])
info['obj'] = obj
self.disp_add(obj)
'''
# <2>
if obj:
self.disp_del(obj)
# NOTE: objs will be deleted at rd_sprts_del_all_objs()
##del self.rd_sprt_objs[obj_k]
img = FP_ROAD_SPRTS[sprt]['imgs'][0]
obj = FPSptRdSprts.create_by_img(img)
# avoid: pygame.error: Width or height is too large
if scale > 500:
#print 'scale <1>', scale
pass
else:
try:
obj.scale(scale)
except:
#print 'scale <2>', scale
pass
x_i_saved = info.get('x_i')
#if not x_i_saved:
# info['x_i'] = x_i
# x_i_saved = x_i
obj.rect.top = 116 - y + 240 - obj.rect.height
obj.rect.left = x_pos[x_i_saved] - obj.rect.width / 2
#obj.scale(scale)
info['obj'] = obj
##self.disp_add(obj) # NOTE: render out here
self.rd_sprt_objs[obj_k] = obj # for reset to delete all
# NOTE: only show one
break
return obj
def handle_event(self, events, *args, **kwargs):
#print '>>> ', events
if not self.flag_check_event:
return events
else:
return self.check_key(events)
def key_to_di(self, k):
if k == self.pglc.K_UP:
return 0
elif k == self.pglc.K_RIGHT:
return 1
elif k == self.pglc.K_DOWN:
return 2
elif k == self.pglc.K_LEFT:
return 3
else:
return None
def key_to_di_b(self, k):
if k == self.pglc.K_f or k == self.pglc.K_j:
return 0
elif k == self.pglc.K_k:
return 1
elif k == self.pglc.K_SPACE or k == self.pglc.K_v or k == self.pglc.K_n:
return 2
elif k == self.pglc.K_d:
return 3
else:
return None
def check_key(self, events):
#print id(events)
r_events = []
e_keys_up = []
e_keys_dn = []
for event in events:
#print event
if event.type == self.pglc.KEYUP:
di = self.key_to_di(event.key)
if di is None:
di = self.key_to_di_b(event.key)
if di is not None:
e_keys_up.append(di)
else:
r_events.append(event)
elif event.type == self.pglc.KEYDOWN:
di = self.key_to_di(event.key)
if di is None:
di = self.key_to_di_b(event.key)
if di is not None:
e_keys_dn.append(di)
else:
r_events.append(event)
else:
r_events.append(event)
self.e_keys_up = e_keys_up
self.e_keys_dn = e_keys_dn
return r_events
def refresh__1(self, fps_clock, *args, **kwargs):
#print '>>> refresh'
#'''
if self.player_di == 3: # <
self.player_x -= 9
if self.player_x < -1000:
self.player_di = 1
elif self.player_di == 1:
self.player_x += 19
if self.player_x > 1000:
self.player_di = 3
#'''
#'''
self.position += 10.0#5.0#1.0
self.position += random.randint(2, 10)
if self.position > self.track_len:
self.position -= self.track_len
#'''
self.draw_on()
self.rd_seg_render()
def refresh(self, fps_clock, *args, **kwargs):
self.check_player_di(self.e_keys_dn, self.e_keys_up)
self.draw_on()
self.rd_seg_render()
self.update_world()
self.check_if_car_out_road()
self.check_score()
self.check_tm()
self.update_bg()
def check_player_di(self, e_keys_dn, e_keys_up):
if 0 in e_keys_dn:
self.player_go = 1
elif 2 in e_keys_dn:
self.player_go = 2
if 1 in e_keys_dn:
self.player_di = 1
elif 3 in e_keys_dn:
self.player_di = 3
if 0 in e_keys_up:
if self.player_go != 2:
self.player_go = 0
if 2 in e_keys_up:
if self.player_go != 1:
self.player_go = 0
if 1 in e_keys_up:
if self.player_di != 3:
self.player_di = 0
if 3 in e_keys_up:
if self.player_di != 1:
self.player_di = 0
def update_world(self):
if self.player_go == 1:
self.speed += self.speed_dt_up
elif self.player_go == 2:
self.speed -= self.speed_dt_dn
else:
self.speed -= self.speed_dt_na
# if on the grass, slow down
if self.player_x < -self.road_w / 2 or \
self.player_x > self.road_w / 2:
self.speed -= 10
if self.speed < 0.0:
self.speed = 0.0
elif self.speed > self.speed_max:
self.speed = self.speed_max
self.position += self.speed
if self.position > self.track_len:
self.position -= self.track_len
# for check score
self.last_seg_i = 0
self.game_over = True
self.game_score = 1.0
if self.player_di == 1:
#self.player_x += self.player_x_dt
self.player_x += self.speed / 5 + 20
elif self.player_di == 3:
#self.player_x -= self.player_x_dt
self.player_x -= self.speed / 5 + 20
else:
pass
p_curve = self.player_seg.get('curve', 0.0)
#print 'p_curve', p_curve
p_dt = self.speed * p_curve * self.centrifugal
#print p_dt
#self.player_x -= p_dt
self.player_x += p_dt
def check_if_car_out_road(self):
# decrease score when go out the road
if self.player_x < -self.road_w / 2 or \
self.player_x > self.road_w / 2:
if self.score > 0:
self.score -= 1
#self.score -= 1
#if self.score < 0:
# self.score = 0
self.game_over = True
self.game_score = -1.0
def check_score(self):
# make sure we check score once for a segment
seg_i = self.player_seg['index']
if seg_i > self.last_seg_i:
self.last_seg_i = seg_i
else:
return
# NOTE: here we should use the segment just under the car
#sprts = self.player_seg['sprites']
sprts = self.base_seg['sprites']
if not sprts:
return
# NOTE: we now only use the first sprite !
sprt = sprts[0]
x_i = sprt.get('x_i')
if x_i is None:
return
scr = sprt.get('score')
if not scr: # None or 0
return
obj = sprt.get('obj')
if not obj: # None or 0
return
#rd_w_half = self.road_w / 2
#x_pos = [rd_w_half + self.lane_w,
# rd_w_half - self.lane_w]
sprt_x = obj.rect.left
sprt_w = obj.rect.width
car_x = self.player_x
car_w = self.car.rect.width * 2
sprt_at = 10000
if x_i == 0:
sprt_at = 40
elif x_i == 1:
sprt_at = -40
elif x_i == 2:
sprt_at = 580
elif x_i == 3:
sprt_at = -580
elif x_i == 4:
sprt_at = 1100
elif x_i == 5:
sprt_at = -1100
#print 'sprt_x', sprt_x
#print 'car_x', car_x
#print 'car_w', car_w
#print 'sprt_at', (car_x - car_w / 2), sprt_at, (car_x + car_w / 2)
#print '-' * 40
w_half = car_w / 2 + sprt_w / 2
#if (car_x + car_w / 2) < sprt_x < (car_x + car_w / 2):
if (car_x - w_half) < sprt_at < (car_x + w_half):
self.score += scr
def check_tm(self):
if self.position > self.seg_len * 2:
if self.tm_start == 0.0:
self.tm_start = time.time()
self.tm_end = self.tm_start
else:
self.tm_end = time.time()
self.tm_last_once = self.tm_end - self.tm_start
else:
self.tm_start = 0.0
#self.tm_end = 0.0
def update_bg(self):
# always move the cloud
for sky in self.bg_sky:
sky.rect.left -= 1#self.sky_speed
if sky.rect.left + sky.rect.width < 0:
sky.rect.left += sky.rect.width * 2
if sky.rect.left - sky.rect.width > 0:
sky.rect.left -= sky.rect.width * 2
if self.speed <= 0.0:
return
p_curve = self.player_seg.get('curve', 0.0)
#p_curve = 3
#print 'p_curve', p_curve
p_dt = self.speed * p_curve * self.centrifugal
#p_dt = 40
#p_dt = -40
#p_dt = random.randint(-100, 100)
#print p_dt
for sky in self.bg_sky:
#print sky
sky.rect.left += int(self.sky_speed * p_dt)
# always move the cloud
#sky.rect.left -= self.sky_speed
if sky.rect.left + sky.rect.width < 0:
sky.rect.left += sky.rect.width * 2
if sky.rect.left - sky.rect.width > 0:
sky.rect.left -= sky.rect.width * 2
for hill in self.bg_hills:
hill.rect.left += int(self.hill_speed * p_dt)
if hill.rect.left + hill.rect.width < 0:
hill.rect.left += hill.rect.width * 2
if hill.rect.left - hill.rect.width > 0:
hill.rect.left -= hill.rect.width * 2
for trees in self.bg_trees:
trees.rect.left += int(self.tree_speed * p_dt)
if trees.rect.left + trees.rect.width < 0:
trees.rect.left += trees.rect.width * 2
if trees.rect.left - trees.rect.width > 0:
trees.rect.left -= trees.rect.width * 2
class FPSptRoadMap(sptdraw.SptDrawBase):
def __init__(self, size, segs, rad, *args, **kwargs):
super(FPSptRoadMap, self).__init__(size)
self.segs = segs
self.rad = rad
#self.fill(consts.WHITE)
self.draw_segs(self.segs, self.rad)
def xy_to_cntr(self, x, y):
return [self.size[0] / 2 + x, self.size[1] / 2 - y]
def cv_to_engl(self, curve, rad):
a = float(curve) / rad
#a *= 10.0
#print a
s = 1.0
if a < 0.0:
s = -1.0
if a < -1.0:
a = -1.0
elif a > 1.0:
a = 1.0
#tht_d = math.acos(a)
tht_d = math.asin(a)
return tht_d
def get_segs_pnts(self, segs, rad):
pnts = []
x, y = 0.0, 0.0
tht = 0.0
rad_m = 4.0#2.0#1.0#
cv_s = 0
cv_l = 0.0
pnts.append([x, y])
for seg in segs:
curve = seg.get('curve', 0.0)
if curve == 0.0:
if cv_s:
tht_d = self.cv_to_engl(cv_l, rad)
#tht += tht_d
tht -= tht_d
rad_m = 20.0#10.0#50.0#
cv_s = 0
cv_l = 0.0
else:
rad_m = 0.5#1.0#0.1#
else:
if cv_s:
cv_l += curve
else:
cv_s = 1
continue
x += rad_m * math.cos(tht)
y += rad_m * math.sin(tht)
pnts.append([x, y])
#print pnts
return pnts
def get_segs_pnts_1(self, segs, rad):
pnts = []
x, y = 0.0, 0.0
tht = 0.0
rad_m = 4.0#2.0#1.0#
pnts.append([x, y])
for seg in segs:
curve = seg.get('curve', 0.0)
if curve == 0.0:
rad_m = 1.0#0.1#
else:
a = float(curve) / rad
a *= 10.0
#print a
if a < -1.0:
a = -1.0
elif a > 1.0:
a = 1.0
#tht_d = math.acos(a)
tht_d = math.asin(a) # TODO:
tht += tht_d
rad_m = 10.0#50.0#
x += rad_m * math.cos(tht)
y += rad_m * math.sin(tht)
pnts.append([x, y])
#print pnts
return pnts
def draw_segs(self, segs, rad):
pnts = self.get_segs_pnts(segs, rad)
#print pnts
if len(pnts) <= 1:
return
#if len(pnts) > 0:
# pnts.append(pnts[0])
cpnts = [self.xy_to_cntr(p[0], p[1]) for p in pnts]
c = utils.clr_from_str(FP_COLOR_BLUE)
#self.pygm.draw.polygon(self.surf, c, cpnts)
self.pygm.draw.lines(self.surf, c, False, cpnts, 3)
class FPSptProgress(sptdraw.SptDrawBase):
def __init__(self, size, c_bg=consts.BLUE, c_prog=consts.GREEN):
super(FPSptProgress, self).__init__(size)
self.c_bg = c_bg
self.c_prog = c_prog
self.progress(0.0)
def progress(self, prog):
y = self.size[1] * prog
self.fill(self.c_bg)
#self.pygm.draw.rect(self.surf, consts.GREEN,
# [1, 0, self.size[0] - 2, y])
# from down to up
self.pygm.draw.rect(self.surf, self.c_prog,
[1, self.size[1] - y,
self.size[0] - 2, y])
class FPStraight(pygm.PyGMSprite):
def __init__(self, cfg, *args, **kwargs):
super(FPStraight, self).__init__()
self.cfg = cfg
self.bg_sky1 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['SKY'])
self.bg_sky1.rect.top = 0
self.bg_sky1.rect.left = 0
self.disp_add(self.bg_sky1)
self.bg_sky2 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['SKY'])
self.bg_sky2.rect.top = 0
self.bg_sky2.rect.left = self.bg_sky1.rect.width
self.disp_add(self.bg_sky2)
self.bg_hills1 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['HILLS'])
self.bg_hills1.rect.top = 0
self.bg_hills1.rect.left = 0
self.disp_add(self.bg_hills1)
self.bg_hills2 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['HILLS'])
self.bg_hills2.rect.top = 0
self.bg_hills2.rect.left = self.bg_hills1.rect.width
self.disp_add(self.bg_hills2)
self.bg_trees1 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['TREES'])
self.bg_trees1.rect.top = 0
self.bg_trees1.rect.left = 0
self.disp_add(self.bg_trees1)
self.bg_trees2 = FPSptBg('img_flatpath/images/background.png',
IMG_POS_BACKGROUND['TREES'])
self.bg_trees2.rect.top = 0
self.bg_trees2.rect.left = self.bg_trees1.rect.width
self.disp_add(self.bg_trees2)
self.car = FPSptSprts('img_flatpath/images/sprites.png',
IMG_POS_SPRITES['PLAYER_STRAIGHT'])
#print self.road.cameraDepth/self.road.playerZ
#self.car.scale(self.road.cameraDepth/self.road.playerZ)
self.car.scale(2)
self.car.rect.top = 400
self.car.rect.left = (640 - self.car.rect.width) / 2
##self.disp_add(self.car) # car disp add after road
#self.road = FPSptRoad((640, 240), self.cfg)
self.road = FPSptRoadB((640, 240), self.cfg,
car=self.car,
bg_sky=[self.bg_sky1, self.bg_sky2],
bg_hills=[self.bg_hills1, self.bg_hills2],
bg_trees=[self.bg_trees1, self.bg_trees2])
self.road.rect.top = 240
self.road.rect.left = 0
self.disp_add(self.road)
self.disp_add(self.car)
self.rdmap = FPSptRoadMap((480, 480),
self.road.rd_get_segs(whole=True),
self.road.seg_len)
self.rdmap.rect.top = 0
self.rdmap.rect.left = 80
self.rdmap.rotate(90)
self.disp_add(self.rdmap)
self.rdpsd = pygm.SptLbl(str(int(self.road.speed)),
c=consts.GREEN, font_size=12)
self.rdpsd.rect.top = 456
self.rdpsd.rect.left = 312
self.disp_add(self.rdpsd)
self.scr = pygm.SptLbl(str(int(self.road.score)),
c=consts.RED, font_size=16)
self.scr.rect.top = 40#454
self.scr.rect.left = 600
self.disp_add(self.scr)
self.tm_once = pygm.SptLbl(str(int(self.road.tm_last_once)),
c=consts.YELLOW, font_size=16)
self.tm_once.rect.top = 20#454
self.tm_once.rect.left = 600
self.disp_add(self.tm_once)
self.prog = FPSptProgress((4, 100), c_prog=consts.YELLOW)
self.prog.rect.top = 70#340
self.prog.rect.left = 610
#self.prog.rotate(180)
self.disp_add(self.prog)
self.spd = FPSptProgress((4, 100), c_prog=consts.GREEN)
self.spd.rect.top = 70#340
self.spd.rect.left = 602
#self.spd.rotate(180)
self.disp_add(self.spd)
def rdmap_hide(self):
self.rdmap.hide()
def rdmap_reset(self):
self.rdmap.clear()
self.rdmap.draw_segs(self.road.rd_get_segs(whole=True),
self.road.seg_len)
self.rdmap.rotate(90)
def road_reset(self):
self.road.rd_reset()
self.rdmap_reset()
def road_reset_keep_segs(self):
self.road.rd_reset(init=False, keep_segs=True)
def road_reset_from_file(self, segs_file='sr_roads/sr_road.txt'):
segs_file = utils.dir_abs(segs_file, __file__)
self.road.rd_reset(init=False, keep_segs=False,
segs_file=segs_file)
self.rdmap_reset()
def road_segs_to_file(self, segs_file=None):
if not segs_file:
segs_file = 'sr_roads/sr_road_' + str(int(time.time())) + '.txt'
segs_file = utils.dir_abs(segs_file, __file__)
self.road.rd_seg_json_save(segs_file)
def handle_event(self, events, *args, **kwargs):
#return events
r_events = []
for event in events:
#print event
if event.type == self.pglc.KEYUP:
k = event.key
if k == self.pglc.K_SPACE:
# hide / show road map
self.rdmap_hide()
elif k == self.pglc.K_RETURN:
self.road_reset()
elif k == self.pglc.K_TAB:
self.road_reset_keep_segs()
elif k == self.pglc.K_BACKSPACE:
self.road_reset_from_file()
elif k == self.pglc.K_SLASH:
self.road_segs_to_file()
else:
r_events.append(event)
elif event.type == self.pglc.KEYDOWN:
r_events.append(event)
else:
r_events.append(event)
return r_events
def refresh(self, fps_clock, *args, **kwargs):
self.rdpsd.lbl_set(str(int(self.road.speed)))
self.scr.lbl_set(str(int(self.road.score)))
self.tm_once.lbl_set(str(int(self.road.tm_last_once)))
prg = self.road.position / self.road.track_len
self.prog.progress(prg)
spdc = self.road.speed / self.road.speed_max
self.spd.progress(spdc)
class FPSceneA(pygm.PyGMScene):
def __init__(self, *args, **kwargs):
super(FPSceneA, self).__init__(*args, **kwargs)
self.straight = FPStraight({})
self.straight.rect.top = 0
self.straight.rect.left = 0
self.disp_add(self.straight)
''''
self.sn1 = SptTmpx((200, 200))
self.sn1.rect.top = 100
self.sn1.rect.left = 100
self.disp_add(self.sn1)
'''
'''
self.lb1 = pygm.SptLbl('hello,', c=consts.GREEN, font_size=32)
self.lb1.rect.top = 200
self.lb1.rect.left = 100
self.disp_add(self.lb1)
'''
def handle_event(self, events, *args, **kwargs):
return events
def refresh(self, fps_clock, *args, **kwargs):
pass
class GMFlatpath(pygm.PyGMGame):
def __init__(self, title, winw, winh, *args, **kwargs):
super(GMFlatpath, self).__init__(title, winw, winh)
bk_im = utils.dir_abs('starfish/data/img_bk_1.jpg', __file__)
#self.bk = pygm.SptImg('data/img_bk_1.jpg')
self.bk = pygm.SptImg(bk_im)
self.bk.rect.top = -230
self.bk.rect.left = -230
#self.disp_add(self.bk)
self.scn1 = FPSceneA()
self.disp_add(self.scn1)
road_file = kwargs.get('road_file')
if road_file:
self.scn1.straight.road_reset_from_file(segs_file=road_file)
def main():
#sf = GMFlatpath('flatpath <:::>', 640, 480)
sf = GMFlatpath('flatpath <:::>', 640, 480, road_file='sr_road.txt')
sf.mainloop()
if __name__ == '__main__':
main()
| 28.824576
| 100
| 0.467263
| 9,147
| 68,026
| 3.285886
| 0.075107
| 0.032606
| 0.023789
| 0.014639
| 0.617647
| 0.553234
| 0.499667
| 0.445269
| 0.421613
| 0.401051
| 0
| 0.064184
| 0.397186
| 68,026
| 2,359
| 101
| 28.836795
| 0.668764
| 0.099668
| 0
| 0.460497
| 0
| 0
| 0.046124
| 0.008643
| 0
| 0
| 0
| 0.00212
| 0
| 0
| null | null | 0.003762
| 0.005267
| null | null | 0.007524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c1d6fd7dc1976bcfc2727fbe10b4b7b22073b1a
| 705
|
py
|
Python
|
2017/third.py
|
vla3089/adventofcode
|
0aefb5509e9f816f89eeab703393be7222632e02
|
[
"Apache-2.0"
] | null | null | null |
2017/third.py
|
vla3089/adventofcode
|
0aefb5509e9f816f89eeab703393be7222632e02
|
[
"Apache-2.0"
] | null | null | null |
2017/third.py
|
vla3089/adventofcode
|
0aefb5509e9f816f89eeab703393be7222632e02
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
input = 368078
size = 1
s_size = size * size # squared size
while (s_size < input):
size += 2
s_size = size * size
bottom_right = s_size
bottom_left = s_size - size + 1
top_left = s_size - 2 * size + 2
top_right = s_size - 3 * size + 3
input_x = -1
input_y = -1
# bottom horizontal line
if (input > bottom_left):
input_x = size - 1
input_y = input - bottom_left
elif (input > top_left):
input_y = input - top_left
input_x = 0
elif (input > top_right):
input_x = 0
input_y = size - input + top_right - 1
else:
input_x = top_right - input
input_y = size - 1
ap_x = size / 2
ap_y = ap_x
print abs(ap_x - input_x) + abs(ap_y - input_y)
| 19.054054
| 47
| 0.631206
| 125
| 705
| 3.28
| 0.224
| 0.085366
| 0.065854
| 0.063415
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040462
| 0.26383
| 705
| 36
| 48
| 19.583333
| 0.749518
| 0.079433
| 0
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c2c549754955b919f978ac6624f7aa2371b569a
| 19,500
|
py
|
Python
|
PS12/api2.py
|
AbhinavSingh-21f1002369/AFKZenCoders
|
344475e7d5d60c09637b0bec28c5dab1befe2b65
|
[
"MIT"
] | null | null | null |
PS12/api2.py
|
AbhinavSingh-21f1002369/AFKZenCoders
|
344475e7d5d60c09637b0bec28c5dab1befe2b65
|
[
"MIT"
] | null | null | null |
PS12/api2.py
|
AbhinavSingh-21f1002369/AFKZenCoders
|
344475e7d5d60c09637b0bec28c5dab1befe2b65
|
[
"MIT"
] | 2
|
2021-10-11T09:28:00.000Z
|
2021-10-14T10:30:11.000Z
|
from flask import Flask, render_template, request, jsonify,send_file, redirect,session, url_for
from werkzeug import secure_filename
import os
import utilities, queries
import logger
from flask_cors import CORS, cross_origin
from datetime import timedelta
app = Flask(__name__)
CORS(app)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
UPLOAD_FOLDER = '/home/pi/Desktop/AFKZenCoders/PS12/uploads/'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['CORS_HEADERS'] = 'Content-Type'
app.secret_key = "AFKZenCodersAAS"
app.permanent_session_lifetime = timedelta(minutes=60)
@app.route('/')
def hello():
if "username" in session:
logger.logit("Rendered upload.html - test wali")
return render_template('upload.html')
else:
logger.logit("Session does not exist")
logger.logit("Rendered root '/'")
return render_template('index.html')
@app.route('/restart')
def restart():
logger.logit(f"---GOOGLE RESTART---")
os.system("sudo reboot -h now")
@app.route('/userauth', methods = ['POST','GET'])
def userauth():
username = request.form.get('username')
password = request.form.get('password')
if username=="root" and password=="toor":
logger.logit(f"Success LOGIN Request Username:{username} Password:{password}")
session["username"] = username
session.permanent = True
return redirect(url_for("page_upload"))
else:
logger.logit(f"Failure LOGIN Request Username:{username} Password:{password}")
return redirect("http://www.themedallionschool.com/abhinav/PS12/incorrect.html", code=302)
@app.route('/page_upload')
def page_upload():
if "username" in session:
logger.logit("Rendered upload.html")
return render_template('upload.html')
else:
logger.logit("Session does not exist")
return redirect("/")
@app.route('/page_cdr')
def page_cdr():
if "username" in session:
logger.logit("Rendered cdr.html")
return render_template('cdr.html')
else:
logger.logit("Session does not exist")
return redirect("/")
@app.route('/page_fir')
def page_fir():
if "username" in session:
logger.logit("Rendered fir.html")
return render_template('fir.html')
else:
logger.logit("Session does not exist")
return redirect("/")
@app.route('/logout')
def logout():
if "username" in session:
session.pop("username", None)
logger.logit("Successfull logout")
return redirect("/")
else:
logger.logit("Session does not exist")
return redirect("/")
@app.route('/upload')
def upload_file():
logger.logit("Rendered upload.html - test wali")
return render_template('upload.html')
@app.route('/uploader',methods=['GET','POST'])
def uploader():
uploaded_files = request.files.getlist("file")
#number = request.args.get('number')
#number = "7982345234"
#print(uploaded_files)
logger.logit(f"/° Multiple Files Upload Start")
for file in uploaded_files:
filename = secure_filename(file.filename)
if filename=="917982345234.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
number = filename[2:11]
logger.logit(f"| CDRData Saved {number}")
utilities.addCDRData(path,number)
elif filename=="918367448476.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
number = filename[2:11]
logger.logit(f"| CDRData Saved {number}")
utilities.addCDRData(path,number)
elif filename=="916100080762.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
number = filename[2:11]
logger.logit(f"| CDRData Saved {number}")
utilities.addCDRData(path,number)
elif filename=="CGI_Dataset.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
logger.logit("| CGIData Saved")
utilities.addCGIData(path)
elif filename=="Bank_Details.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
logger.logit("| Bank_Details Saved")
utilities.addBankData(path)
elif filename=="FIR_Dataset.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
logger.logit("| FIR_Dataset Saved")
utilities.addFIRData(path)
elif filename=="Thana.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
file.save(path)
logger.logit("| Thana Saved")
utilities.addThanaData(path)
elif filename=="Thana_list_UP.csv":
path = os.path.join(app.config['UPLOAD_FOLDER'],filename)
# print(path,file,filename)
# /home/pi/Desktop/AFKZenCoders/PS12/uploads/Thana_list_UP.csv <FileStorage: 'Thana_list_UP.csv' ('application/vnd.ms-excel')> Thana_list_UP.csv
file.save(path)
logger.logit("| Thana_list_UP Saved")
utilities.addthanaListData(path)
else:
logger.logit(f"File Upload error - {filename}")
logger.logit(f"\. Multiple Files Uploaded - {len(uploaded_files)}")
return render_template('cdr.html')
@app.route('/uploader/cdr', methods = ['GET', 'POST'])
def upload_cdr_fxn():
if request.method == 'POST':
# Getting the File
file = request.files['file']
number = request.files['number']
filename = secure_filename(file.filename)
# Path for file
path_of_csv = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Saving File
file.save(path_of_csv)
logger.logit("CDRData Saved")
print("CDR File Saved successfully")
# Loading File To Database
utilities.addCDRData(path_of_csv,number)
return "CDR File Saved and Loaded to Database Successfully"
@app.route('/uploader/thana', methods = ['GET', 'POST'])
def upload_thana_fxn():
if request.method == 'POST':
# Getting the File
file = request.files['file']
filename = secure_filename(file.filename)
# Path for file
path_of_csv = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Saving File
file.save(path_of_csv)
logger.logit("ThanaData Saved")
print("Thana File Saved successfully")
# Loading File To Database
utilities.addThanaData(path_of_csv)
return "Thana File Saved and Loaded to Database Successfully"
@app.route('/uploader/bankacc', methods = ['GET', 'POST'])
def upload_bankacc_fxn():
if request.method == 'POST':
# Getting the File
file = request.files['file']
filename = secure_filename(file.filename)
# Path for file
path_of_csv = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Saving File
file.save(path_of_csv)
print("BankAcc File Saved successfully")
logger.logit("BankData Saved")
# Loading File To Database
utilities.addBankData(path_of_csv)
return "BankAcc File Saved and Loaded to Database Successfully"
@app.route('/uploader/cgi', methods = ['GET', 'POST'])
def upload_cgi_fxn():
if request.method == 'POST':
# Getting the File
file = request.files['file']
filename = secure_filename(file.filename)
# Path for file
path_of_csv = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Saving File
file.save(path_of_csv)
print("CGI File Saved successfully")
logger.logit("CGIData Saved")
# Loading File To Database
utilities.addCGIData(path_of_csv)
return "CGI File Saved and Loaded to Database Successfully"
@app.route('/uploader/fir', methods = ['GET', 'POST'])
def upload_fir_fxn():
if request.method == 'POST':
# Getting the File
file = request.files['file']
filename = secure_filename(file.filename)
# Path for file
path_of_csv = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Saving File
file.save(path_of_csv)
print("FIR File Saved successfully")
logger.logit("FIRData Saved")
# Loading File To Database
utilities.addFIRData(path_of_csv)
return "FIR File Saved and Loaded to Database Successfully"
@app.route('/uploader/thanalist', methods = ['GET', 'POST'])
def upload_thanalist_fxn():
if request.method == 'POST':
# Getting the File
file = request.files['file']
filename = secure_filename(file.filename)
# Path for file
path_of_csv = os.path.join(app.config['UPLOAD_FOLDER'], filename)
# Saving File
file.save(path_of_csv)
print("Thana List File Saved successfully")
logger.logit("ThanaListDATA Saved")
# Loading File To Database
utilities.addthanaListData(path_of_csv)
return "Thana File Saved and Loaded to Database Successfully"
# ############################### Queries ##################################
@app.route('/query/1/', methods = ['GET'])
def query_1():
headers = ["Calling Number","Called Number","Start Time","Duration(sec)","Call Type"]
query = "SELECT calling_number, called_number, start_time, duration, cell_type FROM CallData ORDER BY duration DESC"
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 1 Call")
return jsonify(response)
@app.route('/query/2/', methods = ['GET'])
def query_2():
# Parsing the Headers
since = str(request.args.get('since')) + " 00:00:00"
till = str(request.args.get('till')) + " 23:59:59"
headers = ["Calling Number","Called Number","Start Time","End Time","Duration(sec)","Start Tower","End Tower","Call Type","IMEI","IMSI","SMSC","Service Provider"]
query = f'SELECT * FROM CallData WHERE start_time < "{till}" AND start_time > "{since}";'
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
fString = f">>> Query 2 Call since:{since}, till:{till}"
logger.logit(fString)
return jsonify(response)
@app.route('/query/3/', methods = ['GET'])
def query_3():
headers = ["Calling Number","Called Number","Start Time","End Time","Duration(sec)","Start Tower","End Tower","Call Type","IMEI","IMSI","SMSC","Service Provider"]
query = f"SELECT * FROM CallData ORDER BY duration DESC LIMIT 10"
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 3 Call")
return jsonify(response)
@app.route('/query/4/', methods = ['GET'])
def query_4():
headers = ["Dialled Number","Total Dialled Calls","Total Duration"]
query = f'''SELECT called_number, count(*) as 'Frequency', sum(duration) as 'Total Duration' from CallData where cell_type="OUT" GROUP by called_number ORDER by Frequency DESC'''
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 4 Call")
return jsonify(response)
@app.route('/query/5/', methods = ['GET'])
def query_5():
headers = ["Caller","Total Recieved Calls","Total Duration"]
query = f'''SELECT calling_number, count(*) as 'Frequency', sum(duration) as 'Total Duration' from CallData where cell_type="IN" GROUP by calling_number ORDER by Frequency DESC'''
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 5 Call")
return jsonify(response)
@app.route('/query/6/', methods = ['GET'])
def query_6():
headers = ["Called Number","Total Duration(sec)"]
query = f"SELECT DISTINCT called_number, sum(duration) as totalDuration FROM CallData WHERE called_number NOT in (7982345234) GROUP BY called_number ORDER BY totalDuration DESC "
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 6 Call")
return jsonify(response)
@app.route('/query/7/', methods = ['GET'])
def query_7():
headers = ["Called Number","Duration","Call Type"]
query = f'SELECT called_number, duration, cell_type FROM CallData WHERE cell_type="OUT" ORDER by duration DESC'
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 7 Call")
return jsonify(response)
@app.route('/query/8/', methods = ['GET'])
def query_8():
headers = ["Calling Number","Duration","Call Type"]
query = f'SELECT calling_number, duration, cell_type FROM CallData WHERE cell_type="IN" ORDER by duration DESC'
result = queries.runQuery(query)
headers = ["Phone NO","Duration","Call Type"]
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 8 Call")
return jsonify(response)
@app.route('/query/9/', methods = ['GET'])
def query_9():
headers = ["Calling Number","Called Number","Start Time","End Time","Duration(sec)","Start Tower","End Tower","Call Type","IMEI","IMSI","SMSC","Service Provider"]
# Parsing the Headers
date = request.args.get('date')
query = f'SELECT * from CallData where start_time like "{date}%" or end_time like "{date}%"'
result = queries.runQuery(query)
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
fString = f">>> Query 10 Call date:{date}"
logger.logit(fString)
return jsonify(response)
@app.route('/query/10/', methods = ['GET'])
def query_10():
headers = ["Start Time","End Time","Tower 1","Tower 2"]
# Parsing the Headers
date = request.args.get('date')
query = f'''SELECT start_time, end_time, cell1, cell2 from CallData where (start_time like "2021-01-04%" or end_time like "2021-01-04%")'''
result = queries.runQuery(query)
#print(result)
fString = f">>> Query 10 Call date:{date}"
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(fString)
return jsonify(response)
@app.route('/query/11/', methods = ['GET'])
def query_11():
query = f'''SELECT DISTINCT called_number FROM CallData WHERE cell_type="OUT" UNION SELECT DISTINCT calling_number FROM CallData WHERE cell_type="IN"'''
result = queries.runQuery(query)
#print(result)
#res = []
#for item in result:
# res.append(item[0])
headers = ["Mobile Number"]
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
logger.logit(">>> Query 11 Call")
return jsonify(response)
@app.route('/query/12/', methods = ['GET'])
def query_12():
# Parsing the Headers
number = request.args.get('number')
query = f'''SELECT * FROM CallData WHERE called_number="{number}" or calling_number="{number}"'''
result = queries.runQuery(query)
headers = ["Calling Number","Called Number","Start Time","End Time","Duration(sec)","Start Tower","End Tower","Call Type","IMEI","IMSI","SMSC","Service Provider"]
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
fString = f">>> Query 12 Call number:{number}"
logger.logit(fString)
return jsonify(response)
@app.route('/query/20/', methods = ['GET'])
def query_20():
# Parsing the Headers
fir = request.args.get('fir')
query = f'SELECT * from FIR WHERE FIR_No={int(fir)}'
result = queries.runQuery(query)
#print(result)
headers = ["FIR No","District","PS ID","Time of FIR","Complainant","Act","Section","Complainant Mobile Number"]
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
fString = f">>> Query 20 Call for:{fir}"
logger.logit(fString)
return jsonify(response)
@app.route('/query/100/', methods = ['GET'])
def query_100():
# Parsing the Headers
IMEI = request.args.get('imei')
query = f'SELECT * from FIR WHERE FIR_No={int(fir)}'
result = queries.runQuery(query)
#print(result)
headers = ["FIR No","District","PS ID","Time of FIR","Complainant","Act","Section","Complainant Mobile Number"]
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
fString = f">>> Query 100 Call IMEI:{imei}"
logger.logit(fString)
return jsonify(response)
@app.route('/query/101/', methods = ['GET'])
def query_101():
#unique IMEIs
IMEI = []
unique_imeis_query = f'SELECT DISTINCT imei FROM CallData'
resultset = queries.runQuery(unique_imeis_query)
for results in resultset:
print(results)
#unique_imsi_query = f'SELECT * from CallData where imei={results}'
return ("OK", code=200)
#unique_imsi =
@app.route('/loadedfiles', methods = ['GET'])
def loadedfiles():
csv_files = []
for filename in os.listdir("/home/pi/Desktop/AFKZenCoders/PS12/uploads/"):
if filename.endswith(".csv"):
csv_files.append(filename)
logger.logit("Rendered uploaded files")
return jsonify({'CSV files':csv_files})
@app.route('/deleteloaded', methods = ['GET'])
def deleteloaded():
csv_files = []
for filename in os.listdir("/home/pi/Desktop/AFKZenCoders/PS12/uploads/"):
if filename.endswith(".csv"):
fstring = f"/home/pi/Desktop/AFKZenCoders/PS12/uploads/{filename}"
os.remove(fstring)
os.remove("/home/pi/Desktop/AFKZenCoders/PS12/CDRdata.db")
logger.logit("### Files Deleted ###")
return jsonify({'CSV files':csv_files})
# Download API
@app.route("/downloadfile/<filename>", methods = ['GET'])
def download_file(filename):
logger.logit("Rendered download.html")
return render_template('download.html',value=filename)
@app.route('/return-files/<filename>')
def return_files_tut(filename):
file_path = "/home/pi/Desktop/AFKZenCoders/PS12/CDRdata.db"
logger.logit("Database Downloaded")
return send_file(file_path, as_attachment=True, attachment_filename='')
@app.route('/logs')
def logs():
with open("/home/pi/Desktop/AFKZenCoders/PS12/Logs.txt","r") as f:
lines = f.readlines()
f.close()
formated_lines = []
for i in range(len(lines)-1,0,-1):
formated_lines.append(lines[i])
return jsonify({'logs':formated_lines})
@app.route('/graph')
def graph():
query = f'SELECT date,in_count,out_count,sms_count,total from "798234523"'
result = queries.runQuery(query)
#print(result)
headers = ["Date","Incomming Calls","OutGoing Calls","SMS","Total Interactions"]
if len(result) != 0:
response = {'headers':headers,'rows':result}
else:
response = {'headers':["No Data Available"],'rows':[]}
fString = f">>> GRAPH Call"
logger.logit(fString)
return jsonify(response)
if __name__ == "__main__":
app.run(host='0.0.0.0',port = 1313,debug = True)
| 38.16047
| 182
| 0.655385
| 2,475
| 19,500
| 5.080808
| 0.117172
| 0.044612
| 0.012883
| 0.02505
| 0.673797
| 0.629026
| 0.557694
| 0.508072
| 0.493201
| 0.448668
| 0
| 0.014299
| 0.182308
| 19,500
| 511
| 183
| 38.16047
| 0.774287
| 0.051795
| 0
| 0.48227
| 0
| 0.01182
| 0.345304
| 0.025683
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.009456
| 0.016548
| null | null | 0.016548
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c3d1d7e925f2c1752e9865895938aea4dee29d9
| 6,830
|
py
|
Python
|
guardian/decorators.py
|
peopledoc/django-guardian
|
459827c2329975113cbf0d11f4fd476b5689a055
|
[
"BSD-2-Clause"
] | null | null | null |
guardian/decorators.py
|
peopledoc/django-guardian
|
459827c2329975113cbf0d11f4fd476b5689a055
|
[
"BSD-2-Clause"
] | null | null | null |
guardian/decorators.py
|
peopledoc/django-guardian
|
459827c2329975113cbf0d11f4fd476b5689a055
|
[
"BSD-2-Clause"
] | null | null | null |
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseForbidden, HttpResponseRedirect
from django.utils.functional import wraps
from django.utils.http import urlquote
from django.db.models import Model, get_model
from django.db.models.base import ModelBase
from django.db.models.query import QuerySet
from django.shortcuts import get_object_or_404, render_to_response
from django.template import RequestContext, TemplateDoesNotExist
from guardian.conf import settings as guardian_settings
from guardian.exceptions import GuardianError
def permission_required(perm, lookup_variables=None, **kwargs):
"""
Decorator for views that checks whether a user has a particular permission
enabled.
Optionally, instances for which check should be made may be passed as an
second argument or as a tuple parameters same as those passed to
``get_object_or_404`` but must be provided as pairs of strings.
:param login_url: if denied, user would be redirected to location set by
this parameter. Defaults to ``django.conf.settings.LOGIN_URL``.
:param redirect_field_name: name of the parameter passed if redirected.
Defaults to ``django.contrib.auth.REDIRECT_FIELD_NAME``.
:param return_403: if set to ``True`` then instead of redirecting to the
login page, response with status code 403 is returned (
``django.http.HttpResponseForbidden`` instance or rendered template -
see :setting:`GUARDIAN_RENDER_403`). Defaults to ``False``.
:param accept_global_perms: if set to ``True``, then *object level
permission* would be required **only if user does NOT have global
permission** for target *model*. If turned on, makes this decorator
like an extension over standard
``django.contrib.admin.decorators.permission_required`` as it would
check for global permissions first. Defaults to ``False``.
Examples::
@permission_required('auth.change_user', return_403=True)
def my_view(request):
return HttpResponse('Hello')
@permission_required('auth.change_user', (User, 'username', 'username'))
def my_view(request, username):
user = get_object_or_404(User, username=username)
return user.get_absolute_url()
@permission_required('auth.change_user',
(User, 'username', 'username', 'groups__name', 'group_name'))
def my_view(request, username, group_name):
user = get_object_or_404(User, username=username,
group__name=group_name)
return user.get_absolute_url()
"""
login_url = kwargs.pop('login_url', settings.LOGIN_URL)
redirect_field_name = kwargs.pop('redirect_field_name', REDIRECT_FIELD_NAME)
return_403 = kwargs.pop('return_403', False)
accept_global_perms = kwargs.pop('accept_global_perms', False)
# Check if perm is given as string in order not to decorate
# view function itself which makes debugging harder
if not isinstance(perm, basestring):
raise GuardianError("First argument must be in format: "
"'app_label.codename or a callable which return similar string'")
def decorator(view_func):
def _wrapped_view(request, *args, **kwargs):
# if more than one parameter is passed to the decorator we try to
# fetch object for which check would be made
obj = None
if lookup_variables:
model, lookups = lookup_variables[0], lookup_variables[1:]
# Parse model
if isinstance(model, basestring):
splitted = model.split('.')
if len(splitted) != 2:
raise GuardianError("If model should be looked up from "
"string it needs format: 'app_label.ModelClass'")
model = get_model(*splitted)
elif type(model) in (Model, ModelBase, QuerySet):
pass
else:
raise GuardianError("First lookup argument must always be "
"a model, string pointing at app/model or queryset. "
"Given: %s (type: %s)" % (model, type(model)))
# Parse lookups
if len(lookups) % 2 != 0:
raise GuardianError("Lookup variables must be provided "
"as pairs of lookup_string and view_arg")
lookup_dict = {}
for lookup, view_arg in zip(lookups[::2], lookups[1::2]):
if view_arg not in kwargs:
raise GuardianError("Argument %s was not passed "
"into view function" % view_arg)
lookup_dict[lookup] = kwargs[view_arg]
obj = get_object_or_404(model, **lookup_dict)
# Handles both original and with object provided permission check
# as ``obj`` defaults to None
has_perm = accept_global_perms and request.user.has_perm(perm)
if not has_perm and not request.user.has_perm(perm, obj):
if return_403:
if guardian_settings.RENDER_403:
try:
response = render_to_response(
guardian_settings.TEMPLATE_403, {},
RequestContext(request))
response.status_code = 403
return response
except TemplateDoesNotExist, e:
if settings.DEBUG:
raise e
elif guardian_settings.RAISE_403:
raise PermissionDenied
return HttpResponseForbidden()
else:
path = urlquote(request.get_full_path())
tup = login_url, redirect_field_name, path
return HttpResponseRedirect("%s?%s=%s" % tup)
return view_func(request, *args, **kwargs)
return wraps(view_func)(_wrapped_view)
return decorator
def permission_required_or_403(perm, *args, **kwargs):
"""
Simple wrapper for permission_required decorator.
Standard Django's permission_required decorator redirects user to login page
in case permission check failed. This decorator may be used to return
HttpResponseForbidden (status 403) instead of redirection.
The only difference between ``permission_required`` decorator is that this
one always set ``return_403`` parameter to ``True``.
"""
kwargs['return_403'] = True
return permission_required(perm, *args, **kwargs)
| 47.762238
| 80
| 0.630893
| 796
| 6,830
| 5.258794
| 0.273869
| 0.026278
| 0.028428
| 0.016722
| 0.11419
| 0.053989
| 0.043
| 0.043
| 0
| 0
| 0
| 0.014131
| 0.295461
| 6,830
| 142
| 81
| 48.098592
| 0.855777
| 0.048609
| 0
| 0.027027
| 0
| 0
| 0.116797
| 0.005387
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.027027
| 0.175676
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c46086ba91c653227726b101b253bd36be2a7f4
| 5,963
|
py
|
Python
|
boolean2/tokenizer.py
|
AbrahmAB/booleannet
|
a07124047d18a5b7265e050a234969ac58970c7a
|
[
"MIT"
] | null | null | null |
boolean2/tokenizer.py
|
AbrahmAB/booleannet
|
a07124047d18a5b7265e050a234969ac58970c7a
|
[
"MIT"
] | null | null | null |
boolean2/tokenizer.py
|
AbrahmAB/booleannet
|
a07124047d18a5b7265e050a234969ac58970c7a
|
[
"MIT"
] | null | null | null |
"""
Main tokenizer.
"""
from itertools import *
import sys, random
import util
import ply.lex as lex
class Lexer:
"""
Lexer for boolean rules
"""
literals = '=*,'
tokens = (
'LABEL', 'ID','STATE', 'ASSIGN', 'EQUAL',
'AND', 'OR', 'NOT',
'NUMBER', 'LPAREN','RPAREN', 'COMMA',
)
reserved = {
'and' : 'AND',
'or' : 'OR',
'not' : 'NOT',
'True' : 'STATE',
'False' : 'STATE',
'Random' : 'STATE',
}
def __init__(self, **kwargs):
# nothing here yet
self.lexer = lex.lex(object=self, **kwargs)
def t_ID( self, t):
"[a-zA-Z_\+\-][a-zA-Z_0-9\+\-]*"
# check for reserved words
t.type = self.reserved.get( t.value, 'ID')
return t
def t_LABEL (self, t):
"[0-9][0-9]*:"
t.value = int(t.value[:-1])
return t
def t_NUMBER(self, t):
"[\+-]*\d+\.?\d*"
try:
t.value = float(t.value)
except ValueError:
util.error( "value too large", t.value )
return t
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_ASSIGN = r'\*'
t_EQUAL = r'='
t_COMMA = r','
t_ignore = ' \t'
t_ignore_COMMENT = r'\#.*'
def t_newline(self, t):
"Newline handling"
r'\n+'
t.lexer.lineno += t.value.count("\n")
def t_error(self, t):
"Error message"
msg = "lexer error in '%s' at '%s'" % (self.last, t.value)
util.error( msg )
def tokenize_line(self, line ):
"Runs the lexer a single line retutns a list of tokens"
tokens = []
self.last = line
self.lexer.input( line )
while 1:
t = self.lexer.token()
if t:
tokens.append(t)
else:
break
return tokens
def tokenize_text(self, text):
"Runs the lexer on text and returns a list of lists of tokens"
return map( self.tokenize_line, util.split(text) )
def init_tokens( tokenlist ):
"""
Returns elments of the list that are initializers
"""
def cond( elem ):
return elem[1].type == 'EQUAL'
return filter( cond, tokenlist)
def label_tokens( tokenlist ):
"""
Returns elements where the first token is a LABEL
(updating rules with labels)
"""
def cond( elem ):
return elem[0].type == 'LABEL'
return filter( cond, tokenlist)
def async_tokens( tokenlist ):
"""
Returns elements where the second token is ASSIGN
(updating rules with no LABELs)
"""
def cond( elem ):
return elem[1].type == 'ASSIGN'
return filter( cond, tokenlist)
def update_tokens( tokenlist ):
"""
Returns tokens that perform updates
"""
def cond( elem ):
return elem[1].type == 'ASSIGN' or elem[2].type == 'ASSIGN'
return filter( cond, tokenlist)
def get_nodes( tokenlist ):
"""
Flattens the list of tokenlist and returns the value of all ID tokens
"""
def cond ( token ):
return token.type == 'ID'
def get( token):
return token.value
nodes = map(get, filter( cond, chain( *tokenlist )))
nodes = set(nodes)
util.check_case( nodes )
return nodes
def tok2line( tokens ):
"""
Turns a list of tokens into a line that can be parsed again
"""
elems = [ str(t.value) for t in tokens ]
if tokens[0].type == 'LABEL':
elems[0] = elems[0] + ':'
return ' '.join( elems )
def test():
"""
Main testrunnner
>>> import util
>>>
>>> text = '''
... A = B = True
... 1: A* = B
... 2: B* = A and B
... C* = not C
... E = False
... F = (1, 2, 3)
... '''
>>>
>>> lexer = Lexer()
>>> tokens = lexer.tokenize_text( text )
>>> tokens[0]
[LexToken(ID,'A',1,0), LexToken(EQUAL,'=',1,2), LexToken(ID,'B',1,4), LexToken(EQUAL,'=',1,6), LexToken(STATE,'True',1,8)]
>>> tokens[1]
[LexToken(LABEL,1,1,0), LexToken(ID,'A',1,3), LexToken(ASSIGN,'*',1,4), LexToken(EQUAL,'=',1,6), LexToken(ID,'B',1,8)]
>>> tokens[2]
[LexToken(LABEL,2,1,0), LexToken(ID,'B',1,3), LexToken(ASSIGN,'*',1,4), LexToken(EQUAL,'=',1,6), LexToken(ID,'A',1,8), LexToken(AND,'and',1,10), LexToken(ID,'B',1,14)]
>>> tokens[3]
[LexToken(ID,'C',1,0), LexToken(ASSIGN,'*',1,1), LexToken(EQUAL,'=',1,3), LexToken(NOT,'not',1,5), LexToken(ID,'C',1,9)]
>>>
>>> get_nodes( tokens )
set(['A', 'C', 'B', 'E', 'F'])
"""
# runs the local suite
import doctest
doctest.testmod( optionflags=doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE )
def tokenize( text ):
"A one step tokenizer"
lexer = Lexer()
return lexer.tokenize_text( text )
def modify_states( text, turnon=[], turnoff=[] ):
"""
Turns nodes on and off and comments out lines
that contain assignment to any of the nodes
Will use the main lexer.
"""
turnon = util.as_set( turnon )
turnoff = util.as_set( turnoff )
tokens = tokenize( text )
init = init_tokens( tokens )
init_lines = map(tok2line, init)
# override the initial values
init_lines.extend( [ '%s=True' % node for node in turnon ] )
init_lines.extend( [ '%s=False' % node for node in turnoff ] )
alter = turnon | turnoff
update = update_tokens ( tokens )
update_lines = []
for token in update:
line = tok2line( token)
if token[0].value in alter or token[1].value in alter:
line = '#' + line
update_lines.append( line )
all = init_lines + update_lines
return '\n'.join( all )
if __name__ == '__main__':
test()
lexer = Lexer()
text = """
A = B = C = False
D = True
1: A* = B
2: B* = A and B
C* = not C
D* = A
"""
print modify_states( text, turnon=['A', 'B'], turnoff=['C'] )
| 25.374468
| 171
| 0.528928
| 777
| 5,963
| 3.990991
| 0.234234
| 0.017414
| 0.022573
| 0.021928
| 0.159626
| 0.133505
| 0.098355
| 0.060626
| 0.039987
| 0.039987
| 0
| 0.019417
| 0.309073
| 5,963
| 235
| 172
| 25.374468
| 0.733252
| 0.015093
| 0
| 0.097744
| 0
| 0
| 0.139391
| 0.007136
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.037594
| null | null | 0.007519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c478777d84107b3217342ab649b11b3244e8389
| 7,606
|
py
|
Python
|
pyec/distribution/bayes/structure/basic.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | 2
|
2015-03-16T21:18:27.000Z
|
2017-10-09T19:59:24.000Z
|
pyec/distribution/bayes/structure/basic.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | null | null | null |
pyec/distribution/bayes/structure/basic.py
|
hypernicon/pyec
|
7072835c97d476fc45ffc3b34f5c3ec607988e6d
|
[
"MIT"
] | null | null | null |
"""
Copyright (C) 2012 Alan J Lockett
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from numpy import *
import sys
import weakref
class CyclicException(Exception):
pass
class DuplicateEdgeException(Exception):
pass
class IrreversibleEdgeException(Exception):
pass
class StructureSearch(object):
def __init__(self, scorer, autocommit=False):
self.scorer = scorer
self.autocommit = autocommit
self.network = None
def canReverse(self, newChild, newParent):
"""
check to ensure reverse link is not already present
(In a DAG, it should not be)
"""
if newChild.parents.has_key(newParent.index):
return False
return True
def admissibleEdge(self, var1, var2):
"""Is edge admissible in a DAG?"""
if var1.index == var2.index:
return False
if var1.parents.has_key(var2.index):
return False
if var2.parents.has_key(var1.index):
return False
return True
def merge(self, net, other, data, allowCyclic=False):
"""add the edges from other to self, preventing cycles if asked"""
self.network = net
net.computeEdgeStatistics()
other.computeEdgeStatistics()
indexMap = dict([(v.index, v) for v in net.variables])
undoList = []
def undo(update=True):
for undo2 in reversed(undoList):
undo2(False)
for frm, to in other.edges:
try:
frm2 = indexMap[frm.index]
to2 = indexMap[to.index]
undo2 = self.addEdge(to2, frm2, data, allowCyclic)
frm2.children = None
undoList.append(undo2)
except Exception, msg:
pass
self.network = None
return undo
def cross(self, net, other, data, allowCyclic=False):
self.network = net
net.computeEdgeStatistics()
other.computeEdgeStatistics()
indexMap = dict([(v.index, v) for v in net.variables])
indexMap2 = dict([(v.index, v) for v in other.variables])
undoList = []
if len(net.edges) == 0: return other
if len(other.edges) == 0: return net
if len(net.edges) < net.numVariables / 2 and len(other.edges) < other.numVariables / 2:
return net
def undo(update=True):
for undo2 in reversed(undoList):
undo2(False)
for variable in net.variables:
# pick a parent
if random.random_sample < 0.5:
# Add relationships from other, avoiding cycles
ps = len(variable.parents)
for idx, parent in variable.parents.iteritems():
undoList.append(self.removeEdge(idx, variable, allowCyclic))
parent.children = None
for idx, parent2 in v2.parents.iteritems():
try:
parent = indexMap[parent.index]
undoList.append(self.addEdge(variable, parent, data, allowCyclic))
parent.children = None
except Exception, msg:
pass
net.computeEdgeStatistics()
self.network = None
return undo
def removeEdge(self, i, variable, data=None):
self.network.computeEdgeStatistics()
oldstate = self.network.getComputedState()
toRemove = variable.parents[i]
variable.removeParent(toRemove)
toRemove.children = None
self.network.dirty = True
netref = weakref.ref(self.network)
varref = weakref.ref(variable)
remref = weakref.ref(toRemove)
def undo(update=True):
var = varref()
rem = remref()
net = netref()
if var is not None and rem is not None and net is not None:
var.addParent(rem)
rem.children = None
net.restoreComputedState(oldstate)
try:
self.network.updateVar(variable, data)
except:
undo()
raise
return undo
def addEdge(self, child, parent, data = None, allowCyclic = False):
self.network.computeEdgeStatistics()
oldstate = self.network.getComputedState()
if child.parents.has_key(parent.index):
raise DuplicateEdgeException, "Edge already exists"
child.addParent(parent)
parent.children = None
self.network.dirty = True
parentref = weakref.ref(parent)
childref = weakref.ref(child)
netref = weakref.ref(self.network)
def undo(update=True):
parent = parentref()
child = childref()
network = netref()
if parent is not None and child is not None and network is not None:
parent.children = None
child.removeParent(parent)
network.restoreComputedState(oldstate)
if (not allowCyclic) and not self.network.isAcyclic():
undo()
raise CyclicException, "Adding an edge makes network cyclic"
try:
self.network.updateVar(child, data)
except:
undo()
raise
return undo
def reverseEdge(self, i, variable, data=None, allowCyclic = False):
"""toReverse is new child, variable is new parent"""
self.network.computeEdgeStatistics()
oldstate = self.network.getComputedState()
toReverse = variable.parents[i]
if not self.canReverse(toReverse, variable):
raise IrreversibleEdgeException, "Edge reversal disallowed"
variable.removeParent(toReverse)
toReverse.addParent(variable)
variable.children = None
toReverse.children = None
self.network.dirty = True
varref = weakref.ref(variable)
revref = weakref.ref(toReverse)
netref = weakref.ref(self.network)
def undo(update=True):
variable = varref()
toReverse = revref()
network = netref()
if (variable is not None and
toReverse is not None and
network is not None):
variable.addParent(toReverse)
toReverse.removeParent(variable)
network.restoreComputedState(oldstate)
if (not allowCyclic) and not self.network.isAcyclic():
undo()
raise CyclicException, "Reversing an edge makes nework cyclic"
try:
self.network.updateVar(variable, data)
self.network.updateVar(toReverse, data)
except:
undo()
raise
return undo
def attempt(self, fn, exc):
try:
return fn()
except:
exc()
raise
| 35.050691
| 460
| 0.620431
| 858
| 7,606
| 5.48951
| 0.258741
| 0.053716
| 0.017197
| 0.015287
| 0.310403
| 0.278981
| 0.206794
| 0.140127
| 0.128238
| 0.109554
| 0
| 0.006008
| 0.299763
| 7,606
| 217
| 461
| 35.050691
| 0.878333
| 0.007757
| 0
| 0.511905
| 0
| 0
| 0.018501
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.029762
| 0.017857
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c486bb219145330aa050e526b0e111823623d51
| 620
|
py
|
Python
|
projects/code_combat/8_Cloudrip_Mountain/471-Distracting_Dungeon/distracting_dungeon.py
|
only-romano/junkyard
|
b60a25b2643f429cdafee438d20f9966178d6f36
|
[
"MIT"
] | null | null | null |
projects/code_combat/8_Cloudrip_Mountain/471-Distracting_Dungeon/distracting_dungeon.py
|
only-romano/junkyard
|
b60a25b2643f429cdafee438d20f9966178d6f36
|
[
"MIT"
] | null | null | null |
projects/code_combat/8_Cloudrip_Mountain/471-Distracting_Dungeon/distracting_dungeon.py
|
only-romano/junkyard
|
b60a25b2643f429cdafee438d20f9966178d6f36
|
[
"MIT"
] | null | null | null |
def moveBothTo(point):
while hero.distanceTo(point) > 1:
hero.move(point)
hero.command(peasant, "move", point)
peasant = hero.findNearest(hero.findFriends())
while True:
hero.command(peasant, "buildXY", "decoy", peasant.pos.x + 2, peasant.pos.y)
var nextPoint = {"x": hero.pos.x, "y": hero.pos.y + 28}
moveBothTo(nextPoint)
nextPoint = {"x": hero.pos.x + 28, "y": hero.pos.y}
var enemy = hero.findNearestEnemy()
while enemy:
while enemy.health > 0:
hero.attack(enemy)
enemy = hero.findNearestEnemy()
moveBothTo(nextPoint)
| 31
| 80
| 0.606452
| 76
| 620
| 4.947368
| 0.355263
| 0.074468
| 0.095745
| 0.090426
| 0.095745
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014989
| 0.246774
| 620
| 19
| 81
| 32.631579
| 0.79015
| 0
| 0
| 0.125
| 0
| 0
| 0.033278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c4a4e05ac30862172f332ac22daa59c8c1ecce1
| 2,764
|
py
|
Python
|
com/binghe/hacker/tools/script/ak/check_virus.py
|
ffffff0x/python-hacker
|
a2dc7f9031669a86bd2c87892c0a8c1e54bb2a79
|
[
"Apache-2.0"
] | 52
|
2019-02-11T13:02:20.000Z
|
2022-02-06T07:43:55.000Z
|
com/binghe/hacker/tools/script/ak/check_virus.py
|
sunshinelyz/python-hacker
|
a2dc7f9031669a86bd2c87892c0a8c1e54bb2a79
|
[
"Apache-2.0"
] | null | null | null |
com/binghe/hacker/tools/script/ak/check_virus.py
|
sunshinelyz/python-hacker
|
a2dc7f9031669a86bd2c87892c0a8c1e54bb2a79
|
[
"Apache-2.0"
] | 15
|
2019-02-25T03:04:50.000Z
|
2021-10-19T02:13:52.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- coding: gbk -*-
# Date: 2019/2/22
# Created by 冰河
# Description 将生成的bindshell.exe提交到vscan.novirusthanks.org检测
# 用法 python check_virus.py -f bindshell.exe
# 博客 https://blog.csdn.net/l1028386804
import re
import httplib
import time
import os
import optparse
from urlparse import urlparse
def printResults(url):
status = 200
host = urlparse(url)[1]
path = urlparse(url)[2]
if 'analysis' not in path:
while status != 302:
conn = httplib.HTTPConnection(host)
conn.request('GET', path)
resp = conn.getresponse()
status = resp.status
print '[+] Scanning file...'
conn.close()
time.sleep(15)
print '[+] Scan Complete.'
path = path.replace('file', 'analysis')
conn = httplib.HTTPConnection(host)
conn.request('GET', path)
resp = conn.getresponse()
data = resp.read()
conn.close()
reResults = re.findall(r'Detection rate:.*\)', data)
htmlStripRes = reResults[1]. \
replace('<font color=\'red\'>', ''). \
replace('</font>', '')
print '[+] ' + str(htmlStripRes)
def uploadFile(fileName):
print "[+] Uploading file to NoVirusThanks..."
fileContents = open(fileName, 'rb').read()
header = {'Content-Type': 'multipart/form-data; \
boundary=----WebKitFormBoundaryF17rwCZdGuPNPT9U'}
params = "------WebKitFormBoundaryF17rwCZdGuPNPT9U"
params += "\r\nContent-Disposition: form-data; " + \
"name=\"upfile\"; filename=\"" + str(fileName) + "\""
params += "\r\nContent-Type: " + \
"application/octet stream\r\n\r\n"
params += fileContents
params += "\r\n------WebKitFormBoundaryF17rwCZdGuPNPT9U"
params += "\r\nContent-Disposition: form-data; " + \
"name=\"submitfile\"\r\n"
params += "\r\nSubmit File\r\n"
params += "------WebKitFormBoundaryF17rwCZdGuPNPT9U--\r\n"
conn = httplib.HTTPConnection('vscan.novirusthanks.org')
conn.request("POST", "/", params, header)
response = conn.getresponse()
location = response.getheader('location')
conn.close()
return location
def main():
parser = optparse.OptionParser('usage %prog -f <filename>')
parser.add_option('-f', dest='fileName', type='string', \
help='specify filename')
(options, args) = parser.parse_args()
fileName = options.fileName
if fileName == None:
print parser.usage
exit(0)
elif os.path.isfile(fileName) == False:
print '[+] ' + fileName + ' does not exist.'
exit(0)
else:
loc = uploadFile(fileName)
printResults(loc)
if __name__ == '__main__':
main()
| 29.72043
| 67
| 0.599132
| 291
| 2,764
| 5.652921
| 0.4811
| 0.007295
| 0.045593
| 0.035258
| 0.167781
| 0.167781
| 0.167781
| 0.167781
| 0.080243
| 0.080243
| 0
| 0.020457
| 0.239508
| 2,764
| 93
| 68
| 29.72043
| 0.762131
| 0.087192
| 0
| 0.185714
| 0
| 0
| 0.242846
| 0.079889
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.085714
| null | null | 0.114286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c606dd98dcd0e38522a604061eae8d10c8862e6
| 1,844
|
py
|
Python
|
manuscript/link_checker.py
|
wuyang1002431655/tango_with_django_19
|
42d5878e4a12037daf04d785826357cd4351a16d
|
[
"Apache-2.0"
] | 244
|
2016-04-12T15:39:47.000Z
|
2021-09-10T07:43:55.000Z
|
manuscript/link_checker.py
|
wuyang1002431655/tango_with_django_19
|
42d5878e4a12037daf04d785826357cd4351a16d
|
[
"Apache-2.0"
] | 57
|
2016-03-29T22:12:09.000Z
|
2019-08-26T07:50:11.000Z
|
manuscript/link_checker.py
|
wuyang1002431655/tango_with_django_19
|
42d5878e4a12037daf04d785826357cd4351a16d
|
[
"Apache-2.0"
] | 311
|
2016-04-27T04:41:02.000Z
|
2021-09-19T14:03:35.000Z
|
# Checks for broken links in the book chapters, printing the status of each link found to stdout.
# The Python package 'requests' must be installed and available for this simple module to work.
# Author: David Maxwell
# Date: 2017-02-14
import re
import requests
def main(chapters_list_filename, hide_success=True):
"""
hide_success = a boolean switch that determines whether to show URLs that return a HTTP 200.
If set to true, only URLs that fail will be printed.
"""
chapters_f = open(chapters_list_filename, 'r')
pattern = re.compile(r'\[([^]]+)]\(\s*(http[s]?://[^)]+)\s*\)') # http://stackoverflow.com/a/23395483
print 'filename\tline_no\ttitle\turl\tstatus_code'
for filename in chapters_f:
filename = filename.strip()
if not filename or filename.startswith('{'): # Skip non-filename lines
continue
chapter_f = open(filename, 'r')
line_no = 1
for line in chapter_f:
line = line.strip()
for match in re.findall(pattern, line):
title = match[0]
url = match[1]
if '127.0.0.1' in url or 'localhost' in url: # Don't check localhost URLs
continue
request = None
status_code = -1
try:
request = requests.get(url)
status_code = request.status_code
except requests.exceptions.ConnectionError:
request = None
status_code = 'FAILED_TO_CONNECT'
if hide_success and status_code == 200:
continue
title = title.replace('\t', ' ')
print '{filename}\t{line_no}\t{title}\t{url}\t{status_code}'.format(filename=filename,
line_no=line_no,
title=title,
url=url,
status_code=status_code)
line_no = line_no + 1
chapter_f.close()
chapters_f.close()
if __name__ == '__main__':
main('Book.txt', hide_success=False)
| 28.369231
| 103
| 0.645879
| 256
| 1,844
| 4.496094
| 0.441406
| 0.069505
| 0.034752
| 0.03649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023571
| 0.240781
| 1,844
| 65
| 104
| 28.369231
| 0.798571
| 0.170824
| 0
| 0.125
| 0
| 0
| 0.13836
| 0.096633
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.05
| null | null | 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c67194eb5ab82333266efd8ffcbf64d199afeff
| 637
|
py
|
Python
|
Luke 02/02.py
|
Nilzone-/Knowit-Julekalender-2017
|
66ef8a651277e0fef7d9278f3f129410b5b98ee0
|
[
"MIT"
] | null | null | null |
Luke 02/02.py
|
Nilzone-/Knowit-Julekalender-2017
|
66ef8a651277e0fef7d9278f3f129410b5b98ee0
|
[
"MIT"
] | null | null | null |
Luke 02/02.py
|
Nilzone-/Knowit-Julekalender-2017
|
66ef8a651277e0fef7d9278f3f129410b5b98ee0
|
[
"MIT"
] | null | null | null |
import numpy as np
size = 1000
def create_wall(x, y):
return "{0:b}".format(x**3 + 12*x*y + 5*x*y**2).count("1") & 1
def build_grid():
return np.array([create_wall(j+1, i+1) for i in range(size) for j in range(size)]).reshape(size, size)
def visit(grid, x=0, y=0):
if grid[x][y]:
return
grid[x][y] = 1
if x > 0: visit(grid, x-1, y)
if x < size-1: visit(grid, x+1, y)
if y > 0: visit(grid, x, y-1)
if y < size-1: visit(grid, x, y+1)
grid = build_grid()
print "Original grid\n"
print grid
visit(grid)
print "\n\nAfter search\n"
print grid
print "\n%d unvisited points in grid" % (size**2 - np.count_nonzero(grid))
| 20.548387
| 104
| 0.620094
| 133
| 637
| 2.932331
| 0.308271
| 0.035897
| 0.128205
| 0.053846
| 0.187179
| 0.071795
| 0
| 0
| 0
| 0
| 0
| 0.050193
| 0.186813
| 637
| 31
| 105
| 20.548387
| 0.702703
| 0
| 0
| 0.095238
| 0
| 0
| 0.106583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.047619
| null | null | 0.238095
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c6b5cb13f50ba4f535dc82987b58898ad693a5f
| 5,966
|
py
|
Python
|
data/external/repositories/42139/KDDCup13Track2-master/blocking.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/42139/KDDCup13Track2-master/blocking.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories/42139/KDDCup13Track2-master/blocking.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1
|
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
#!/usr/bin/env python
from common import *
import csv
import argparse
from unidecode import unidecode
from nameparser import constants as npc
from collections import defaultdict
import cPickle as pickle
import re
stopwords_custom = set(['document', 'preparation', 'system', 'consortium', 'committee', 'international', 'artificial', 'network', 'distributed', 'based', 'research', 'language', 'technology', 'project', 'design', 'computer', 'control', 'object', 'internet', 'propulsion', 'corp', 'workshop', 'xml', 'world', 'work', 'thesis', 'test', 'tool', 'structure', 'statistical', 'laboratory', 'ltd', 'objects', 'process', 'scheduling', 'september', 'special', 'student', 'programs', 'capacitated', 'balancing', 'assembly', 'aspect', 'model', 'inc', 'psychological', 'psychology', 'mohammed', 'computing', 'software', 'programming', 'new', 'applications', 'jet', 'propulsion', 'classification', 'recommendation'])
stopwords = stopwords_custom | npc.TITLES | npc.PREFIXES | npc.SUFFIXES | npc.CONJUNCTIONS
def bin_exactsamename(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
bins[a['fullname']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
return bins
def bin_samename(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
bins[a['fullname_joined']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
return bins
def bin_fFfL(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
bins[a['fFfL']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
return bins
def bin_fF3L(authors, max_bin_size=20):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if ':' not in a['fFiL'] and len(a['name_last']) >= 3 and len(a['fFiL']) > 2:
bins[a['fFiL'] + a['name_last'][1:3]].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
bk = bins.keys()
for b in bk:
if len(bins[b]) > max_bin_size:
del bins[b]
return bins
def bin_fFiL(authors, max_bin_size=20):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if len(a['fFiL']) > 2:
bins[a['fFiL']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
bk = bins.keys()
for b in bk:
if len(bins[b]) > max_bin_size:
del bins[b]
return bins
def bin_iFfL(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
bins[a['iFfL']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
return bins
def bin_fullparsedname(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
bins[a['fullname_parsed']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
return bins
def bin_iFoffbyoneL(authors, max_bin_size=30):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if ':' not in a['fullname'] and a['name_first'] and a['name_last']:
bins[a['name_first'][0] + a['name_last']].add(id)
if len(a['name_last']) > 1:
bins[a['name_first'][0] + a['name_last'][:-1]].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
bk = bins.keys()
for b in bk:
if len(bins[b]) > max_bin_size:
del bins[b]
return bins
def bin_2FoffbyoneL(authors, max_bin_size=30):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if ':' not in a['fullname'] and len(a['name_first']) >= 2 and a['name_last']:
bins[a['name_first'][0:2] + a['name_last']].add(id)
if len(a['name_last']) > 1:
bins[a['name_first'][0:2] + a['name_last'][:-1]].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
bk = bins.keys()
for b in bk:
if len(bins[b]) > max_bin_size:
del bins[b]
return bins
def bin_metaphone(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if a['metaphone_fullname']:
bins[a['metaphone_fullname']].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
# bk = bins.keys()
# for b in bk:
# if len(bins[b]) > max_bin_size:
# del bins[b]
return bins
def bin_offbylastone(authors):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if ':' not in a['fullname_joined']:
bins[a['fullname_joined']].add(id)
if len(a['fullname_joined']) > 1:
bins[a['fullname_joined'][:-1]].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
return bins
def bin_token(authors, nw=2, max_bin_size=100):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if ':' not in a['name']:
tokens = re.sub("[^\w]", " ", a['name']).split()
tokens = [v for v in tokens if len(v) > 2 and v not in stopwords]
ngrams = zip(*[tokens[j:] for j in range(nw)])
for p in ngrams:
pg = ' '.join(p)
if len(pg) > len(p)*2-1:
bins[pg].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
bk = bins.keys()
for b in bk:
if len(bins[b]) > max_bin_size:
del bins[b]
return bins
def bin_ngrams(authors, n=15, max_bin_size=30):
bins = defaultdict(set)
for i, (id, a) in enumerate(authors.iteritems()):
if ':' not in a['fullname']:
lname = a['fullname']
ngrams = zip(*[lname[j:] for j in range(n)])
for p in ngrams:
if not any(((s in p) for s in stopwords_custom)):
bins[''.join(p)].add(id)
if (i+1) % 10000 == 0:
print_err(i+1)
bk = bins.keys()
for b in bk:
if len(bins[b]) > max_bin_size:
del bins[b]
return bins
def main():
parser = argparse.ArgumentParser()
parser.add_argument('authorprefeat', nargs='?', default='generated/Author_prefeat.pickle')
parser.add_argument('type', nargs='?', default='iFfL')
args = parser.parse_args()
print_err("Loading pickled author pre-features")
authors = pickle.load(open(args.authorprefeat, 'rb'))
bins = globals()["bin_"+args.type](authors)
bins = sorted([(len(bv), blabel, bv) for blabel, bv in bins.iteritems()], reverse=True)
for _, binlabel, binv in bins:
print binlabel + ';' + ','.join(map(str, sorted(binv)))
if __name__ == "__main__":
main()
| 29.979899
| 703
| 0.632417
| 936
| 5,966
| 3.930556
| 0.192308
| 0.014134
| 0.030443
| 0.074205
| 0.563468
| 0.555858
| 0.555858
| 0.537374
| 0.537374
| 0.522153
| 0
| 0.029124
| 0.177003
| 5,966
| 199
| 704
| 29.9799
| 0.720163
| 0.016929
| 0
| 0.58642
| 0
| 0
| 0.15714
| 0.005289
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.049383
| null | null | 0.092593
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c7633cae0980db6c9c40b9c34972bdb7f5c0282
| 7,139
|
py
|
Python
|
Detect.py
|
SymenYang/Vanish-Point-Detect
|
0e83e2b2a86e9523ed4a86f592f3a8dee594d691
|
[
"MIT"
] | 2
|
2017-10-17T10:08:25.000Z
|
2017-10-17T11:17:39.000Z
|
Detect.py
|
SymenYang/Vanish-Point-Detect
|
0e83e2b2a86e9523ed4a86f592f3a8dee594d691
|
[
"MIT"
] | null | null | null |
Detect.py
|
SymenYang/Vanish-Point-Detect
|
0e83e2b2a86e9523ed4a86f592f3a8dee594d691
|
[
"MIT"
] | null | null | null |
import cv2 as cv
import numpy as np
import copy
import math
import Edges
import INTPoint
eps = 1e-7
votes = {}
Groups = []
VPoints = []
Centers = []
Cluster = []
voters = {}
def getEdges(image):
#moved to Edges.py
return Edges.getEdges(image)
def getLines(edges):
#moved to Edges.py
return Edges.getLines(edges)
def checkRound(pos,edges):
#moved to Edges.py
return Edges.checkRound(pos,edges)
def outOfSize(pos,edges):
#moved to Edges.py
return Edges.outOfSize(pos,edges)
def extenLine(line,edges):
#moved to Edges.py
return Edges.extenLine(line,edges)
def extenLines(lines,edges):
#moved to Edges.py
return Edges.extenLines(lines,edges)
def shouldMerge(line1,line2):
#moved to Edges.py
return Edges.shouldMerge(line1,line2)
def mergeLines(lines):
#moved to Edges.py
return Edges.mergeLines(lines)
def getLineABC(line):
#moved to Edges.py
return Edges.getLineABC(line)
def getCirAnch(a,b):
#moved to Edges.py
return Edges.getCirAnch(a,b)
def getCrossPoint(linea,lineb):
#moved to INTPoint.py
return INTPoint.getIntersectPoint(linea,lineb)
def sortLines(lines):
#moved to Edges.py
return Edges.sortLines(lines)
def getVPoints2(lines,arange = 0.2617):
#moved to INTPoint.py
global VPoints
VPoints = INTPoint.getVPoints2(lines,arange)
return VPoints
def getVPoints(num = 16):
#this function is fallen into disuse because of the low speed
for i in range(0,num + 1,1):
lens = len(Groups[i])
for j in range(0,lens,1):
for k in range(j+1,lens,1):
VPoints.append(getCrossPoint(Groups[i][j],Groups[i][k]))
def removeSame(list):
#moved to INTPoint.py
return INTPoint.removeSame(list)
def getLinesLength(line):
#moved to INTPoint.py
return INTPoint.getLinesLength(line)
def getMidPoint(line):
#moved to INTPoint.py
return INTPoint.getMidPoint(line)
def getArch(line,point):
#moved to INTPoint.py
return INTPoint.getArch(line,point)
def voteForPoint(lines):
#moved to INTPoint.py
global votes
global voters
votes,voters = INTPoint.voteForPoint(lines,VPoints)
return
def getGraPoint(points):
count = 1.0
sumx = 0.0
sumy = 0.0
for point in points:
w = votes[point]
count += w
sumx += w * point[0]
sumy += w * point[1]
return (sumx/count,sumy/count)
def devideIntoPoints(Points):
global Cluster
lens = len(Cluster)
for i in range(0,lens,1):
Cluster[i] = []
for point in Points:
if point[0] == 'p' or point[0] == 'h' or point[0] == 'v':
continue
if votes[point] == 0:
continue
minlens = 1e15
minpos = 0
now = -1
for cen in Centers:
now += 1
lens = getLinesLength((point[0],point[1],cen[0],cen[1]))
if lens < minlens:
minlens = lens
minpos = now
Cluster[minpos].append(point)
def KMean(points,K = 3,step = 50):
global Cluster
global Centers
Cluster = []
Centers = []
if K == 1:
step = 1
for i in range(0,K,1):
Cluster.append([])
Centers.append([0,0])
count = 0
for point in points:
if point[0] != 'p' and point[0] != 'v' and point[0] != 'h' and votes[point] != 0:
Centers[count][0] = point[0]
Centers[count][1] = point[1]
count += 1
if count == K:
break
for i in range(0,step,1):
devideIntoPoints(points)
for i in range(0,K,1):
Centers[i] = getGraPoint(Cluster[i])
def getFinal(points):
count = 0.0
num = 0
p1 = 0.0
ret1 = []
p2 = 0.0
ret2 = []
for item in votes:
if item[0] == 'p' or item[0] == 'h' or item[0] == 'v':
if votes[item] > p1:
p2 = p1
ret2 = ret1
p1 = votes[item]
ret1 = item
else:
if votes[item] > p2:
p2 = votes[item]
ret2 = item
else:
count += votes[item]
num += 1
K = 3
ret = []
count = count / num * 0.1
if p1 > count:
K -= 1
ret.append(ret1)
if p2 > count:
K -= 1
ret.append(ret2)
KMean(points,K)
for i in range(0,K,1):
ret.append(Centers[i])
return ret
def deal(inputname,outputname):
global votes
global Groups
global VPoints
global Centers
global Cluster
global voters
votes = {}
Groups = []
VPoints = []
Centers = []
Cluster = []
voters = {}
image = cv.imread(inputname)
edges = getEdges(image)
cv.imwrite(outputname + 'edges.jpg',edges)
lines = getLines(edges)
lines2 = copy.deepcopy(lines)
lines2 = extenLines(lines2,edges)
lines2 = mergeLines(lines2)
#devideIntoGroups(lines2,3)
lines2 = sortLines(lines2)
getVPoints2(lines2)
VPoints = removeSame(VPoints)
voteForPoint(lines2)
votes2 = sorted(votes.iteritems(),key=lambda votes:votes[1],reverse=True)
lenofvotes = min(len(votes2),max(5,int(len(votes2) * 0.2)))
votesFinal = {}
VPoints = []
for i in range(0,lenofvotes,1):
votesFinal[votes2[i][0]] = votes2[i][1]
VPoints.append(votes2[i][0])
for i in range(lenofvotes,len(votes2),1):
if votes2[i][0][0] == 'h' or votes2[i][0][0] == 'v' or votes2[i][0][0] == 'p':
votesFinal[votes2[i][0]] = votes2[i][1]
VPoints.append(votes2[i][0])
votes = votesFinal
ans = getFinal(VPoints)
print ans
edges = cv.cvtColor(edges,cv.COLOR_GRAY2BGR)
edges2 = copy.deepcopy(edges)
for item in lines:
if item[0] == 'N':
continue
cv.line(edges,(item[0],item[1]),(item[2],item[3]),(0,0,255),2)
for item in lines2:
cv.line(edges2,(item[0],item[1]),(item[2],item[3]),(0,0,255),2)
color = [255,0,0,0]
for clu in Cluster:
for i in range(0,4,1):
if color[i] == 255:
color[i+1] = 255
color[i] = 0
break
for point in clu:
if point[0] > 0 and point[1] > 0:
if point[0] < edges.shape[1] and point[1] < edges.shape[0]:
if votes[point] == 0:
continue
cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(color[1],color[2],color[3]),10)
for point in ans:
if point[0] > 0 and point[1] > 0:
if point[0] < edges.shape[1] and point[1] < edges.shape[0]:
cv.line(edges2,(int(point[0]),int(point[1])),(int(point[0]),int(point[1])),(255,255,255),10)
cv.imwrite(outputname + 'linedetect.jpg',edges)
cv.imwrite(outputname + 'answer.jpg',edges2)
fd = open(outputname + 'answer.txt','w')
fd.write('(' + str(ans[0][0]) + ',' + str(ans[0][1]) + ')(' + str(ans[1][0]) + ',' + str(ans[1][1]) + ')(' + str(ans[2][0]) + ',' + str(ans[2][1]) + ')')
fd.close
deal("data/1.jpg",'1')
| 26.838346
| 157
| 0.559462
| 979
| 7,139
| 4.078652
| 0.162411
| 0.030053
| 0.033058
| 0.038567
| 0.314801
| 0.261207
| 0.184323
| 0.118708
| 0.089657
| 0.089657
| 0
| 0.049771
| 0.2964
| 7,139
| 266
| 158
| 26.838346
| 0.745172
| 0.057851
| 0
| 0.237209
| 0
| 0
| 0.011481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.027907
| null | null | 0.004651
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c85f2097ce6518402e3aa24b38cc365cc5ffeaa
| 4,981
|
py
|
Python
|
Whats Cooking/KaggleCookingComparison.py
|
rupakc/Kaggle-Compendium
|
61634ba742f9a0239f2d1e45973c4bb477ac6306
|
[
"MIT"
] | 17
|
2018-01-11T05:49:06.000Z
|
2021-08-22T16:50:10.000Z
|
Whats Cooking/KaggleCookingComparison.py
|
Tuanlase02874/Machine-Learning-Kaggle
|
c31651acd8f2407d8b60774e843a2527ce19b013
|
[
"MIT"
] | null | null | null |
Whats Cooking/KaggleCookingComparison.py
|
Tuanlase02874/Machine-Learning-Kaggle
|
c31651acd8f2407d8b60774e843a2527ce19b013
|
[
"MIT"
] | 8
|
2017-11-27T06:58:50.000Z
|
2021-08-22T16:50:13.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 26 13:20:45 2015
Code for Kaggle What's Cooking Competition
It uses the following classifiers with tf-idf,hashvectors and bag_of_words approach
1. Adaboost
2. Extratrees
3. Bagging
4. Random Forests
@author: Rupak Chakraborty
"""
import numpy as np
import time
import json
import ClassificationUtils
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn import metrics
# Create the feature extractors
bag_of_words = CountVectorizer(stop_words='english')
tfidf = TfidfVectorizer(stop_words='english')
hashvec = HashingVectorizer(stop_words='english')
# Create the Classifier objects
adaboost = AdaBoostClassifier()
randomforest = RandomForestClassifier()
extratrees = ExtraTreesClassifier()
bagging = BaggingClassifier()
filepath = "train.json"
f = open(filepath,"r")
content = f.read()
jsonData = json.loads(content)
cuisine_set = set([])
ingredient_set = set([])
cuisine_map = {}
cuisine_numerical_map = {}
ingredient_numerical_map = {}
ingredient_map = {}
ingredient_list = list([])
c = 0
print "Size of the data set : ", len(jsonData)
print "Starting Loading of Data Set...."
start = time.time()
for recipe in jsonData:
if "cuisine" in recipe:
s = ""
if recipe["cuisine"] in cuisine_set:
cuisine_map[recipe["cuisine"]] = cuisine_map[recipe["cuisine"]] + 1
else:
cuisine_map[recipe["cuisine"]] = 1
cuisine_set.add(recipe["cuisine"])
for ingredient in recipe["ingredients"]:
if ingredient in ingredient_set:
ingredient_map[ingredient] = ingredient_map[ingredient] + 1
else:
ingredient_map[ingredient] = 1
ingredient_set.add(ingredient)
s = s + " " + ingredient
ingredient_list.append(s)
end = time.time()
print "Time Taken to Load the Dataset : ",end-start
for cuisine in cuisine_set:
cuisine_numerical_map[cuisine] = c
c = c+1
c = 0
for ingredient in ingredient_set:
ingredient_numerical_map[ingredient] = c
c = c+1
print "Starting Feature Extracting ......"
start = time.time()
train_labels = np.zeros(len(ingredient_list))
train_data_tfidf = tfidf.fit_transform(ingredient_list)
train_data_hash = hashvec.fit_transform(ingredient_list)
train_data_bag = bag_of_words.fit_transform(ingredient_list)
c = 0
for recipe in jsonData:
if "cuisine" in recipe:
train_labels[c] = cuisine_numerical_map[recipe["cuisine"]]
c = c+1
end = time.time()
print "Time Taken to Train Extract Different Features : ", end-start
test_labels = train_labels[1:30000]
test_data_tfidf = tfidf.transform(ingredient_list[1:30000])
test_data_hash = hashvec.transform(ingredient_list[1:30000])
test_data_bag = bag_of_words.transform(ingredient_list[1:30000])
print "Starting Training of Models for Hash Vectorizer Feature....."
start = time.time()
adaboost.fit(train_data_bag,train_labels)
randomforest.fit(train_data_bag,train_labels)
extratrees.fit(train_data_bag,train_labels)
bagging.fit(train_data_bag,train_labels)
end=time.time()
print "Time Taken to train all Ensemble Models : ", end-start
print "Starting Prediction of Test Labels ...."
start = time.time()
ada_predict = adaboost.predict(test_data_bag)
rf_predict = randomforest.predict(test_data_bag)
extree_predict = extratrees.predict(test_data_bag)
bagging_predict = bagging.predict(test_data_bag)
end = time.time()
print "Time Taken to Test the models : ", end-start
print "Accuracy of AdaBoost Algorithm : ", metrics.accuracy_score(test_labels,ada_predict)
print "Accuracy of Random Forests : ", metrics.accuracy_score(test_labels,rf_predict)
print "Accuracy of Extra Trees : ", metrics.accuracy_score(test_labels,extree_predict)
print "Accuracy of Bagging : ", metrics.accuracy_score(test_labels,bagging_predict)
# Saving the tf-idf model and classifiers
ClassificationUtils.save_classifier("ada_bag_cook.pickle",adaboost)
ClassificationUtils.save_classifier("rf_bag_cook.pickle",randomforest)
ClassificationUtils.save_classifier("extree_bag_cook.pickle",extratrees)
ClassificationUtils.save_classifier("bagging_bag_cook.pickle",bagging)
ClassificationUtils.save_classifier("bag_of_words.pickle",tfidf)
def printIngredientDistribution():
print "----------- Distribution of the Recipe Ingredients ------------------"
for key in ingredient_map.keys():
print key, " : " ,ingredient_map[key]
def printCuisineDistribution():
print "----------- Distribution of the Cuisines ------------------"
for key in cuisine_map.keys():
print key, " : " ,cuisine_map[key]
| 32.344156
| 90
| 0.739611
| 635
| 4,981
| 5.607874
| 0.233071
| 0.019657
| 0.038753
| 0.028082
| 0.251896
| 0.155013
| 0.074136
| 0.038192
| 0
| 0
| 0
| 0.012103
| 0.153985
| 4,981
| 153
| 91
| 32.555556
| 0.832938
| 0.024493
| 0
| 0.183486
| 0
| 0
| 0.171447
| 0.009778
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.110092
| null | null | 0.174312
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7c98495a22a6d3d8755497c989624d8a5c427192
| 60,943
|
py
|
Python
|
elastalert/alerts.py
|
dekhrekh/elastalert
|
0c1ce30302c575bd0be404582cd452f38c01c774
|
[
"Apache-2.0"
] | null | null | null |
elastalert/alerts.py
|
dekhrekh/elastalert
|
0c1ce30302c575bd0be404582cd452f38c01c774
|
[
"Apache-2.0"
] | null | null | null |
elastalert/alerts.py
|
dekhrekh/elastalert
|
0c1ce30302c575bd0be404582cd452f38c01c774
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import copy
import datetime
import json
import logging
import subprocess
import sys
import warnings
from email.mime.text import MIMEText
from email.utils import formatdate
from smtplib import SMTP
from smtplib import SMTP_SSL
from smtplib import SMTPAuthenticationError
from smtplib import SMTPException
from socket import error
import boto3
import requests
import stomp
from exotel import Exotel
from jira.client import JIRA
from jira.exceptions import JIRAError
from requests.exceptions import RequestException
from staticconf.loader import yaml_loader
from texttable import Texttable
from twilio.base.exceptions import TwilioRestException
from twilio.rest import Client as TwilioClient
from util import EAException
from util import elastalert_logger
from util import lookup_es_key
from util import pretty_ts
from util import ts_now
from util import ts_to_dt
class DateTimeEncoder(json.JSONEncoder):
def default(self, obj):
if hasattr(obj, 'isoformat'):
return obj.isoformat()
else:
return json.JSONEncoder.default(self, obj)
class BasicMatchString(object):
""" Creates a string containing fields in match for the given rule. """
def __init__(self, rule, match):
self.rule = rule
self.match = match
def _ensure_new_line(self):
while self.text[-2:] != '\n\n':
self.text += '\n'
def _add_custom_alert_text(self):
missing = '<MISSING VALUE>'
alert_text = unicode(self.rule.get('alert_text', ''))
if 'alert_text_args' in self.rule:
alert_text_args = self.rule.get('alert_text_args')
alert_text_values = [lookup_es_key(self.match, arg) for arg in alert_text_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
for i in xrange(len(alert_text_values)):
if alert_text_values[i] is None:
alert_value = self.rule.get(alert_text_args[i])
if alert_value:
alert_text_values[i] = alert_value
alert_text_values = [missing if val is None else val for val in alert_text_values]
alert_text = alert_text.format(*alert_text_values)
elif 'alert_text_kw' in self.rule:
kw = {}
for name, kw_name in self.rule.get('alert_text_kw').items():
val = lookup_es_key(self.match, name)
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
if val is None:
val = self.rule.get(name)
kw[kw_name] = missing if val is None else val
alert_text = alert_text.format(**kw)
self.text += alert_text
def _add_rule_text(self):
self.text += self.rule['type'].get_match_str(self.match)
def _add_top_counts(self):
for key, counts in self.match.items():
if key.startswith('top_events_'):
self.text += '%s:\n' % (key[11:])
top_events = counts.items()
if not top_events:
self.text += 'No events found.\n'
else:
top_events.sort(key=lambda x: x[1], reverse=True)
for term, count in top_events:
self.text += '%s: %s\n' % (term, count)
self.text += '\n'
def _add_match_items(self):
match_items = self.match.items()
match_items.sort(key=lambda x: x[0])
for key, value in match_items:
if key.startswith('top_events_'):
continue
value_str = unicode(value)
value_str.replace('\\n', '\n')
if type(value) in [list, dict]:
try:
value_str = self._pretty_print_as_json(value)
except TypeError:
# Non serializable object, fallback to str
pass
self.text += '%s: %s\n' % (key, value_str)
def _pretty_print_as_json(self, blob):
try:
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, ensure_ascii=False)
except UnicodeDecodeError:
# This blob contains non-unicode, so lets pretend it's Latin-1 to show something
return json.dumps(blob, cls=DateTimeEncoder, sort_keys=True, indent=4, encoding='Latin-1', ensure_ascii=False)
def __str__(self):
self.text = ''
if 'alert_text' not in self.rule:
self.text += self.rule['name'] + '\n\n'
self._add_custom_alert_text()
self._ensure_new_line()
if self.rule.get('alert_text_type') != 'alert_text_only':
self._add_rule_text()
self._ensure_new_line()
if self.rule.get('top_count_keys'):
self._add_top_counts()
if self.rule.get('alert_text_type') != 'exclude_fields':
self._add_match_items()
return self.text
class JiraFormattedMatchString(BasicMatchString):
def _add_match_items(self):
match_items = dict([(x, y) for x, y in self.match.items() if not x.startswith('top_events_')])
json_blob = self._pretty_print_as_json(match_items)
preformatted_text = u'{{code:json}}{0}{{code}}'.format(json_blob)
self.text += preformatted_text
class Alerter(object):
""" Base class for types of alerts.
:param rule: The rule configuration.
"""
required_options = frozenset([])
def __init__(self, rule):
self.rule = rule
# pipeline object is created by ElastAlerter.send_alert()
# and attached to each alerters used by a rule before calling alert()
self.pipeline = None
self.resolve_rule_references(self.rule)
def resolve_rule_references(self, root):
# Support referencing other top-level rule properties to avoid redundant copy/paste
if type(root) == list:
# Make a copy since we may be modifying the contents of the structure we're walking
for i, item in enumerate(copy.copy(root)):
if type(item) == dict or type(item) == list:
self.resolve_rule_references(root[i])
else:
root[i] = self.resolve_rule_reference(item)
elif type(root) == dict:
# Make a copy since we may be modifying the contents of the structure we're walking
for key, value in root.copy().iteritems():
if type(value) == dict or type(value) == list:
self.resolve_rule_references(root[key])
else:
root[key] = self.resolve_rule_reference(value)
def resolve_rule_reference(self, value):
strValue = unicode(value)
if strValue.startswith('$') and strValue.endswith('$') and strValue[1:-1] in self.rule:
if type(value) == int:
return int(self.rule[strValue[1:-1]])
else:
return self.rule[strValue[1:-1]]
else:
return value
def alert(self, match):
""" Send an alert. Match is a dictionary of information about the alert.
:param match: A dictionary of relevant information to the alert.
"""
raise NotImplementedError()
def get_info(self):
""" Returns a dictionary of data related to this alert. At minimum, this should contain
a field type corresponding to the type of Alerter. """
return {'type': 'Unknown'}
def create_title(self, matches):
""" Creates custom alert title to be used, e.g. as an e-mail subject or JIRA issue summary.
:param matches: A list of dictionaries of relevant information to the alert.
"""
if 'alert_subject' in self.rule:
return self.create_custom_title(matches)
return self.create_default_title(matches)
def create_custom_title(self, matches):
alert_subject = unicode(self.rule['alert_subject'])
if 'alert_subject_args' in self.rule:
alert_subject_args = self.rule['alert_subject_args']
alert_subject_values = [lookup_es_key(matches[0], arg) for arg in alert_subject_args]
# Support referencing other top-level rule properties
# This technically may not work if there is a top-level rule property with the same name
# as an es result key, since it would have been matched in the lookup_es_key call above
for i in xrange(len(alert_subject_values)):
if alert_subject_values[i] is None:
alert_value = self.rule.get(alert_subject_args[i])
if alert_value:
alert_subject_values[i] = alert_value
alert_subject_values = ['<MISSING VALUE>' if val is None else val for val in alert_subject_values]
return alert_subject.format(*alert_subject_values)
return alert_subject
def create_alert_body(self, matches):
body = self.get_aggregation_summary_text(matches)
for match in matches:
body += unicode(BasicMatchString(self.rule, match))
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text(self, matches):
text = ''
if 'aggregation' in self.rule and 'summary_table_fields' in self.rule:
summary_table_fields = self.rule['summary_table_fields']
if not isinstance(summary_table_fields, list):
summary_table_fields = [summary_table_fields]
# Include a count aggregation so that we can see at a glance how many of each aggregation_key were encountered
summary_table_fields_with_count = summary_table_fields + ['count']
text += "Aggregation resulted in the following data for summary_table_fields ==> {0}:\n\n".format(
summary_table_fields_with_count
)
text_table = Texttable()
text_table.header(summary_table_fields_with_count)
match_aggregation = {}
# Maintain an aggregate count for each unique key encountered in the aggregation period
for match in matches:
key_tuple = tuple([unicode(lookup_es_key(match, key)) for key in summary_table_fields])
if key_tuple not in match_aggregation:
match_aggregation[key_tuple] = 1
else:
match_aggregation[key_tuple] = match_aggregation[key_tuple] + 1
for keys, count in match_aggregation.iteritems():
text_table.add_row([key for key in keys] + [count])
text += text_table.draw() + '\n\n'
return unicode(text)
def create_default_title(self, matches):
return self.rule['name']
def get_account(self, account_file):
""" Gets the username and password from an account file.
:param account_file: Name of the file which contains user and password information.
"""
account_conf = yaml_loader(account_file)
if 'user' not in account_conf or 'password' not in account_conf:
raise EAException('Account file must have user and password fields')
self.user = account_conf['user']
self.password = account_conf['password']
class StompAlerter(Alerter):
""" The stomp alerter publishes alerts via stomp to a broker. """
required_options = frozenset(['stomp_hostname', 'stomp_hostport', 'stomp_login', 'stomp_password'])
def alert(self, matches):
alerts = []
qk = self.rule.get('query_key', None)
fullmessage = {}
for match in matches:
if qk in match:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append(
'1)Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field']))
)
fullmessage['match'] = match[qk]
else:
elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
alerts.append(
'2)Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field']))
)
fullmessage['match'] = lookup_es_key(match, self.rule['timestamp_field'])
elastalert_logger.info(unicode(BasicMatchString(self.rule, match)))
fullmessage['alerts'] = alerts
fullmessage['rule'] = self.rule['name']
fullmessage['matching'] = unicode(BasicMatchString(self.rule, match))
fullmessage['alertDate'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
fullmessage['body'] = self.create_alert_body(matches)
self.stomp_hostname = self.rule.get('stomp_hostname', 'localhost')
self.stomp_hostport = self.rule.get('stomp_hostport', '61613')
self.stomp_login = self.rule.get('stomp_login', 'admin')
self.stomp_password = self.rule.get('stomp_password', 'admin')
self.stomp_destination = self.rule.get('stomp_destination', '/queue/ALERT')
conn = stomp.Connection([(self.stomp_hostname, self.stomp_hostport)])
conn.start()
conn.connect(self.stomp_login, self.stomp_password)
conn.send(self.stomp_destination, json.dumps(fullmessage))
conn.disconnect()
def get_info(self):
return {'type': 'stomp'}
class DebugAlerter(Alerter):
""" The debug alerter uses a Python logger (by default, alerting to terminal). """
def alert(self, matches):
qk = self.rule.get('query_key', None)
for match in matches:
if qk in match:
elastalert_logger.info(
'Alert for %s, %s at %s:' % (self.rule['name'], match[qk], lookup_es_key(match, self.rule['timestamp_field'])))
else:
elastalert_logger.info('Alert for %s at %s:' % (self.rule['name'], lookup_es_key(match, self.rule['timestamp_field'])))
elastalert_logger.info(unicode(BasicMatchString(self.rule, match)))
def get_info(self):
return {'type': 'debug'}
class EmailAlerter(Alerter):
""" Sends an email alert """
required_options = frozenset(['email'])
def __init__(self, *args):
super(EmailAlerter, self).__init__(*args)
self.smtp_host = self.rule.get('smtp_host', 'localhost')
self.smtp_ssl = self.rule.get('smtp_ssl', False)
self.from_addr = self.rule.get('from_addr', 'ElastAlert')
self.smtp_port = self.rule.get('smtp_port')
if self.rule.get('smtp_auth_file'):
self.get_account(self.rule['smtp_auth_file'])
self.smtp_key_file = self.rule.get('smtp_key_file')
self.smtp_cert_file = self.rule.get('smtp_cert_file')
# Convert email to a list if it isn't already
if isinstance(self.rule['email'], basestring):
self.rule['email'] = [self.rule['email']]
# If there is a cc then also convert it a list if it isn't
cc = self.rule.get('cc')
if cc and isinstance(cc, basestring):
self.rule['cc'] = [self.rule['cc']]
# If there is a bcc then also convert it to a list if it isn't
bcc = self.rule.get('bcc')
if bcc and isinstance(bcc, basestring):
self.rule['bcc'] = [self.rule['bcc']]
add_suffix = self.rule.get('email_add_domain')
if add_suffix and not add_suffix.startswith('@'):
self.rule['email_add_domain'] = '@' + add_suffix
def alert(self, matches):
body = self.create_alert_body(matches)
# Add JIRA ticket if it exists
if self.pipeline is not None and 'jira_ticket' in self.pipeline:
url = '%s/browse/%s' % (self.pipeline['jira_server'], self.pipeline['jira_ticket'])
body += '\nJIRA ticket: %s' % (url)
to_addr = self.rule['email']
if 'email_from_field' in self.rule:
recipient = lookup_es_key(matches[0], self.rule['email_from_field'])
if isinstance(recipient, basestring):
if '@' in recipient:
to_addr = [recipient]
elif 'email_add_domain' in self.rule:
to_addr = [recipient + self.rule['email_add_domain']]
elif isinstance(recipient, list):
to_addr = recipient
if 'email_add_domain' in self.rule:
to_addr = [name + self.rule['email_add_domain'] for name in to_addr]
email_msg = MIMEText(body.encode('UTF-8'), _charset='UTF-8')
email_msg['Subject'] = self.create_title(matches)
email_msg['To'] = ', '.join(to_addr)
email_msg['From'] = self.from_addr
email_msg['Reply-To'] = self.rule.get('email_reply_to', email_msg['To'])
email_msg['Date'] = formatdate()
if self.rule.get('cc'):
email_msg['CC'] = ','.join(self.rule['cc'])
to_addr = to_addr + self.rule['cc']
if self.rule.get('bcc'):
to_addr = to_addr + self.rule['bcc']
try:
if self.smtp_ssl:
if self.smtp_port:
self.smtp = SMTP_SSL(self.smtp_host, self.smtp_port, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
else:
self.smtp = SMTP_SSL(self.smtp_host, keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
else:
if self.smtp_port:
self.smtp = SMTP(self.smtp_host, self.smtp_port)
else:
self.smtp = SMTP(self.smtp_host)
self.smtp.ehlo()
if self.smtp.has_extn('STARTTLS'):
self.smtp.starttls(keyfile=self.smtp_key_file, certfile=self.smtp_cert_file)
if 'smtp_auth_file' in self.rule:
self.smtp.login(self.user, self.password)
except (SMTPException, error) as e:
raise EAException("Error connecting to SMTP host: %s" % (e))
except SMTPAuthenticationError as e:
raise EAException("SMTP username/password rejected: %s" % (e))
self.smtp.sendmail(self.from_addr, to_addr, email_msg.as_string())
self.smtp.close()
elastalert_logger.info("Sent email to %s" % (to_addr))
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
# If the rule has a query_key, add that value plus timestamp to subject
if 'query_key' in self.rule:
qk = matches[0].get(self.rule['query_key'])
if qk:
subject += ' - %s' % (qk)
return subject
def get_info(self):
return {'type': 'email',
'recipients': self.rule['email']}
class JiraAlerter(Alerter):
""" Creates a Jira ticket for each alert """
required_options = frozenset(['jira_server', 'jira_account_file', 'jira_project', 'jira_issuetype'])
# Maintain a static set of built-in fields that we explicitly know how to set
# For anything else, we will do best-effort and try to set a string value
known_field_list = [
'jira_account_file',
'jira_assignee',
'jira_bump_after_inactivity',
'jira_bump_in_statuses',
'jira_bump_not_in_statuses',
'jira_bump_tickets',
'jira_component',
'jira_components',
'jira_description',
'jira_ignore_in_title',
'jira_issuetype',
'jira_label',
'jira_labels',
'jira_max_age',
'jira_priority',
'jira_project',
'jira_server',
'jira_watchers',
]
# Some built-in jira types that can be used as custom fields require special handling
# Here is a sample of one of them:
# {"id":"customfield_12807","name":"My Custom Field","custom":true,"orderable":true,"navigable":true,"searchable":true,
# "clauseNames":["cf[12807]","My Custom Field"],"schema":{"type":"array","items":"string",
# "custom":"com.atlassian.jira.plugin.system.customfieldtypes:multiselect","customId":12807}}
# There are likely others that will need to be updated on a case-by-case basis
custom_string_types_with_special_handling = [
'com.atlassian.jira.plugin.system.customfieldtypes:multicheckboxes',
'com.atlassian.jira.plugin.system.customfieldtypes:multiselect',
'com.atlassian.jira.plugin.system.customfieldtypes:radiobuttons',
]
def __init__(self, rule):
super(JiraAlerter, self).__init__(rule)
self.server = self.rule['jira_server']
self.get_account(self.rule['jira_account_file'])
self.project = self.rule['jira_project']
self.issue_type = self.rule['jira_issuetype']
# We used to support only a single component. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.components = self.rule.get('jira_components', self.rule.get('jira_component'))
# We used to support only a single label. This allows us to maintain backwards compatibility
# while also giving the user-facing API a more representative name
self.labels = self.rule.get('jira_labels', self.rule.get('jira_label'))
self.description = self.rule.get('jira_description', '')
self.assignee = self.rule.get('jira_assignee')
self.max_age = self.rule.get('jira_max_age', 30)
self.priority = self.rule.get('jira_priority')
self.bump_tickets = self.rule.get('jira_bump_tickets', False)
self.bump_not_in_statuses = self.rule.get('jira_bump_not_in_statuses')
self.bump_in_statuses = self.rule.get('jira_bump_in_statuses')
self.bump_after_inactivity = self.rule.get('jira_bump_after_inactivity', self.max_age)
self.watchers = self.rule.get('jira_watchers')
if self.bump_in_statuses and self.bump_not_in_statuses:
msg = 'Both jira_bump_in_statuses (%s) and jira_bump_not_in_statuses (%s) are set.' % \
(','.join(self.bump_in_statuses), ','.join(self.bump_not_in_statuses))
intersection = list(set(self.bump_in_statuses) & set(self.bump_in_statuses))
if intersection:
msg = '%s Both have common statuses of (%s). As such, no tickets will ever be found.' % (
msg, ','.join(intersection))
msg += ' This should be simplified to use only one or the other.'
logging.warning(msg)
self.jira_args = {'project': {'key': self.project},
'issuetype': {'name': self.issue_type}}
if self.components:
# Support single component or list
if type(self.components) != list:
self.jira_args['components'] = [{'name': self.components}]
else:
self.jira_args['components'] = [{'name': component} for component in self.components]
if self.labels:
# Support single label or list
if type(self.labels) != list:
self.labels = [self.labels]
self.jira_args['labels'] = self.labels
if self.watchers:
# Support single watcher or list
if type(self.watchers) != list:
self.watchers = [self.watchers]
if self.assignee:
self.jira_args['assignee'] = {'name': self.assignee}
try:
self.client = JIRA(self.server, basic_auth=(self.user, self.password))
self.get_priorities()
self.get_arbitrary_fields()
except JIRAError as e:
# JIRAError may contain HTML, pass along only first 1024 chars
raise EAException("Error connecting to JIRA: %s" % (str(e)[:1024]))
try:
if self.priority is not None:
self.jira_args['priority'] = {'id': self.priority_ids[self.priority]}
except KeyError:
logging.error("Priority %s not found. Valid priorities are %s" % (self.priority, self.priority_ids.keys()))
def get_arbitrary_fields(self):
# This API returns metadata about all the fields defined on the jira server (built-ins and custom ones)
fields = self.client.fields()
for jira_field, value in self.rule.iteritems():
# If we find a field that is not covered by the set that we are aware of, it means it is either:
# 1. A built-in supported field in JIRA that we don't have on our radar
# 2. A custom field that a JIRA admin has configured
if jira_field.startswith('jira_') and jira_field not in self.known_field_list:
# Remove the jira_ part. Convert underscores to spaces
normalized_jira_field = jira_field[5:].replace('_', ' ').lower()
# All jira fields should be found in the 'id' or the 'name' field. Therefore, try both just in case
for identifier in ['name', 'id']:
field = next((f for f in fields if normalized_jira_field == f[identifier].replace('_', ' ').lower()), None)
if field:
break
if not field:
# Log a warning to ElastAlert saying that we couldn't find that type?
# OR raise and fail to load the alert entirely? Probably the latter...
raise Exception("Could not find a definition for the jira field '{0}'".format(normalized_jira_field))
arg_name = field['id']
# Check the schema information to decide how to set the value correctly
# If the schema information is not available, raise an exception since we don't know how to set it
# Note this is only the case for two built-in types, id: issuekey and id: thumbnail
if not ('schema' in field or 'type' in field['schema']):
raise Exception("Could not determine schema information for the jira field '{0}'".format(normalized_jira_field))
arg_type = field['schema']['type']
# Handle arrays of simple types like strings or numbers
if arg_type == 'array':
# As a convenience, support the scenario wherein the user only provides
# a single value for a multi-value field e.g. jira_labels: Only_One_Label
if type(value) != list:
value = [value]
array_items = field['schema']['items']
# Simple string types
if array_items in ['string', 'date', 'datetime']:
# Special case for multi-select custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
self.jira_args[arg_name] = value
elif array_items == 'number':
self.jira_args[arg_name] = [int(v) for v in value]
# Also attempt to handle arrays of complex types that have to be passed as objects with an identifier 'key'
elif array_items == 'option':
self.jira_args[arg_name] = [{'value': v} for v in value]
else:
# Try setting it as an object, using 'name' as the key
# This may not work, as the key might actually be 'key', 'id', 'value', or something else
# If it works, great! If not, it will manifest itself as an API error that will bubble up
self.jira_args[arg_name] = [{'name': v} for v in value]
# Handle non-array types
else:
# Simple string types
if arg_type in ['string', 'date', 'datetime']:
# Special case for custom types (the JIRA metadata says that these are strings, but
# in reality, they are required to be provided as an object.
if 'custom' in field['schema'] and field['schema']['custom'] in self.custom_string_types_with_special_handling:
self.jira_args[arg_name] = {'value': value}
else:
self.jira_args[arg_name] = value
# Number type
elif arg_type == 'number':
self.jira_args[arg_name] = int(value)
elif arg_type == 'option':
self.jira_args[arg_name] = {'value': value}
# Complex type
else:
self.jira_args[arg_name] = {'name': value}
def get_priorities(self):
""" Creates a mapping of priority index to id. """
priorities = self.client.priorities()
self.priority_ids = {}
for x in range(len(priorities)):
self.priority_ids[x] = priorities[x].id
def set_assignee(self, assignee):
self.assignee = assignee
if assignee:
self.jira_args['assignee'] = {'name': assignee}
elif 'assignee' in self.jira_args:
self.jira_args.pop('assignee')
def find_existing_ticket(self, matches):
# Default title, get stripped search version
if 'alert_subject' not in self.rule:
title = self.create_default_title(matches, True)
else:
title = self.create_title(matches)
if 'jira_ignore_in_title' in self.rule:
title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '')
# This is necessary for search to work. Other special characters and dashes
# directly adjacent to words appear to be ok
title = title.replace(' - ', ' ')
title = title.replace('\\', '\\\\')
date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d')
jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date)
if self.bump_in_statuses:
jql = '%s and status in (%s)' % (jql, ','.join(self.bump_in_statuses))
if self.bump_not_in_statuses:
jql = '%s and status not in (%s)' % (jql, ','.join(self.bump_not_in_statuses))
try:
issues = self.client.search_issues(jql)
except JIRAError as e:
logging.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e))
return None
if len(issues):
return issues[0]
def comment_on_ticket(self, ticket, match):
text = unicode(JiraFormattedMatchString(self.rule, match))
timestamp = pretty_ts(lookup_es_key(match, self.rule['timestamp_field']))
comment = "This alert was triggered again at %s\n%s" % (timestamp, text)
self.client.add_comment(ticket, comment)
def alert(self, matches):
title = self.create_title(matches)
if self.bump_tickets:
ticket = self.find_existing_ticket(matches)
if ticket:
inactivity_datetime = ts_now() - datetime.timedelta(days=self.bump_after_inactivity)
if ts_to_dt(ticket.fields.updated) >= inactivity_datetime:
if self.pipeline is not None:
self.pipeline['jira_ticket'] = None
self.pipeline['jira_server'] = self.server
return None
elastalert_logger.info('Commenting on existing ticket %s' % (ticket.key))
for match in matches:
try:
self.comment_on_ticket(ticket, match)
except JIRAError as e:
logging.exception("Error while commenting on ticket %s: %s" % (ticket, e))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = ticket
self.pipeline['jira_server'] = self.server
return None
self.jira_args['summary'] = title
self.jira_args['description'] = self.create_alert_body(matches)
try:
self.issue = self.client.create_issue(**self.jira_args)
# You can not add watchers on initial creation. Only as a follow-up action
if self.watchers:
for watcher in self.watchers:
try:
self.client.add_watcher(self.issue.key, watcher)
except Exception as ex:
# Re-raise the exception, preserve the stack-trace, and give some
# context as to which watcher failed to be added
raise Exception(
"Exception encountered when trying to add '{0}' as a watcher. Does the user exist?\n{1}" .format(
watcher,
ex
)), None, sys.exc_info()[2]
except JIRAError as e:
raise EAException("Error creating JIRA ticket using jira_args (%s): %s" % (self.jira_args, e))
elastalert_logger.info("Opened Jira ticket: %s" % (self.issue))
if self.pipeline is not None:
self.pipeline['jira_ticket'] = self.issue
self.pipeline['jira_server'] = self.server
def create_alert_body(self, matches):
body = self.description + '\n'
body += self.get_aggregation_summary_text(matches)
for match in matches:
body += unicode(JiraFormattedMatchString(self.rule, match))
if len(matches) > 1:
body += '\n----------------------------------------\n'
return body
def get_aggregation_summary_text(self, matches):
text = super(JiraAlerter, self).get_aggregation_summary_text(matches)
if text:
text = u'{{noformat}}{0}{{noformat}}'.format(text)
return text
def create_default_title(self, matches, for_search=False):
# If there is a query_key, use that in the title
if 'query_key' in self.rule and lookup_es_key(matches[0], self.rule['query_key']):
title = 'ElastAlert: %s matched %s' % (lookup_es_key(matches[0], self.rule['query_key']), self.rule['name'])
else:
title = 'ElastAlert: %s' % (self.rule['name'])
if for_search:
return title
title += ' - %s' % (pretty_ts(matches[0][self.rule['timestamp_field']], self.rule.get('use_local_time')))
# Add count for spikes
count = matches[0].get('spike_count')
if count:
title += ' - %s+ events' % (count)
return title
def get_info(self):
return {'type': 'jira'}
class CommandAlerter(Alerter):
required_options = set(['command'])
def __init__(self, *args):
super(CommandAlerter, self).__init__(*args)
self.last_command = []
self.shell = False
if isinstance(self.rule['command'], basestring):
self.shell = True
if '%' in self.rule['command']:
logging.warning('Warning! You could be vulnerable to shell injection!')
self.rule['command'] = [self.rule['command']]
self.new_style_string_format = False
if 'new_style_string_format' in self.rule and self.rule['new_style_string_format']:
self.new_style_string_format = True
def alert(self, matches):
# Format the command and arguments
try:
if self.new_style_string_format:
command = [command_arg.format(match=matches[0]) for command_arg in self.rule['command']]
else:
command = [command_arg % matches[0] for command_arg in self.rule['command']]
self.last_command = command
except KeyError as e:
raise EAException("Error formatting command: %s" % (e))
# Run command and pipe data
try:
subp = subprocess.Popen(command, stdin=subprocess.PIPE, shell=self.shell)
if self.rule.get('pipe_match_json'):
match_json = json.dumps(matches, cls=DateTimeEncoder) + '\n'
stdout, stderr = subp.communicate(input=match_json)
if self.rule.get("fail_on_non_zero_exit", False) and subp.wait():
raise EAException("Non-zero exit code while running command %s" % (' '.join(command)))
except OSError as e:
raise EAException("Error while running command %s: %s" % (' '.join(command), e))
def get_info(self):
return {'type': 'command',
'command': ' '.join(self.last_command)}
class SnsAlerter(Alerter):
""" Send alert using AWS SNS service """
required_options = frozenset(['sns_topic_arn'])
def __init__(self, *args):
super(SnsAlerter, self).__init__(*args)
self.sns_topic_arn = self.rule.get('sns_topic_arn', '')
self.aws_access_key_id = self.rule.get('aws_access_key_id')
self.aws_secret_access_key = self.rule.get('aws_secret_access_key')
self.aws_region = self.rule.get('aws_region', 'us-east-1')
self.profile = self.rule.get('boto_profile', None) # Deprecated
self.profile = self.rule.get('aws_profile', None)
def create_default_title(self, matches):
subject = 'ElastAlert: %s' % (self.rule['name'])
return subject
def alert(self, matches):
body = self.create_alert_body(matches)
session = boto3.Session(
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key,
region_name=self.aws_region,
profile_name=self.profile
)
sns_client = session.client('sns')
sns_client.publish(
TopicArn=self.sns_topic_arn,
Message=body,
Subject=self.create_title(matches)
)
elastalert_logger.info("Sent sns notification to %s" % (self.sns_topic_arn))
class HipChatAlerter(Alerter):
""" Creates a HipChat room notification for each alert """
required_options = frozenset(['hipchat_auth_token', 'hipchat_room_id'])
def __init__(self, rule):
super(HipChatAlerter, self).__init__(rule)
self.hipchat_msg_color = self.rule.get('hipchat_msg_color', 'red')
self.hipchat_message_format = self.rule.get('hipchat_message_format', 'html')
self.hipchat_auth_token = self.rule['hipchat_auth_token']
self.hipchat_room_id = self.rule['hipchat_room_id']
self.hipchat_domain = self.rule.get('hipchat_domain', 'api.hipchat.com')
self.hipchat_ignore_ssl_errors = self.rule.get('hipchat_ignore_ssl_errors', False)
self.hipchat_notify = self.rule.get('hipchat_notify', True)
self.hipchat_from = self.rule.get('hipchat_from', '')
self.url = 'https://%s/v2/room/%s/notification?auth_token=%s' % (
self.hipchat_domain, self.hipchat_room_id, self.hipchat_auth_token)
self.hipchat_proxy = self.rule.get('hipchat_proxy', None)
def alert(self, matches):
body = self.create_alert_body(matches)
# HipChat sends 400 bad request on messages longer than 10000 characters
if (len(body) > 9999):
body = body[:9980] + '..(truncated)'
# Use appropriate line ending for text/html
if self.hipchat_message_format == 'html':
body = body.replace('\n', '<br />')
# Post to HipChat
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.hipchat_proxy} if self.hipchat_proxy else None
payload = {
'color': self.hipchat_msg_color,
'message': body,
'message_format': self.hipchat_message_format,
'notify': self.hipchat_notify,
'from': self.hipchat_from
}
try:
if self.hipchat_ignore_ssl_errors:
requests.packages.urllib3.disable_warnings()
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers,
verify=not self.hipchat_ignore_ssl_errors,
proxies=proxies)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to HipChat: %s" % e)
elastalert_logger.info("Alert sent to HipChat room %s" % self.hipchat_room_id)
def get_info(self):
return {'type': 'hipchat',
'hipchat_room_id': self.hipchat_room_id}
class MsTeamsAlerter(Alerter):
""" Creates a Microsoft Teams Conversation Message for each alert """
required_options = frozenset(['ms_teams_webhook_url', 'ms_teams_alert_summary'])
def __init__(self, rule):
super(MsTeamsAlerter, self).__init__(rule)
self.ms_teams_webhook_url = self.rule['ms_teams_webhook_url']
if isinstance(self.ms_teams_webhook_url, basestring):
self.ms_teams_webhook_url = [self.ms_teams_webhook_url]
self.ms_teams_proxy = self.rule.get('ms_teams_proxy', None)
self.ms_teams_alert_summary = self.rule.get('ms_teams_alert_summary', 'ElastAlert Message')
self.ms_teams_alert_fixed_width = self.rule.get('ms_teams_alert_fixed_width', False)
self.ms_teams_theme_color = self.rule.get('ms_teams_theme_color', '')
def format_body(self, body):
body = body.encode('UTF-8')
if self.ms_teams_alert_fixed_width:
body = body.replace('`', "'")
body = "```{0}```".format('```\n\n```'.join(x for x in body.split('\n'))).replace('\n``````', '')
return body
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
# post to Teams
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.ms_teams_proxy} if self.ms_teams_proxy else None
payload = {
'@type': 'MessageCard',
'@context': 'http://schema.org/extensions',
'summary': self.ms_teams_alert_summary,
'title': self.create_title(matches),
'text': body
}
if self.ms_teams_theme_color != '':
payload['themeColor'] = self.ms_teams_theme_color
for url in self.ms_teams_webhook_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to ms teams: %s" % e)
elastalert_logger.info("Alert sent to MS Teams")
def get_info(self):
return {'type': 'ms_teams',
'ms_teams_webhook_url': self.ms_teams_webhook_url}
class SlackAlerter(Alerter):
""" Creates a Slack room message for each alert """
required_options = frozenset(['slack_webhook_url'])
def __init__(self, rule):
super(SlackAlerter, self).__init__(rule)
self.slack_webhook_url = self.rule['slack_webhook_url']
if isinstance(self.slack_webhook_url, basestring):
self.slack_webhook_url = [self.slack_webhook_url]
self.slack_proxy = self.rule.get('slack_proxy', None)
self.slack_username_override = self.rule.get('slack_username_override', 'elastalert')
self.slack_channel_override = self.rule.get('slack_channel_override', '')
self.slack_emoji_override = self.rule.get('slack_emoji_override', ':ghost:')
self.slack_icon_url_override = self.rule.get('slack_icon_url_override', '')
self.slack_msg_color = self.rule.get('slack_msg_color', 'danger')
self.slack_parse_override = self.rule.get('slack_parse_override', 'none')
self.slack_text_string = self.rule.get('slack_text_string', '')
def format_body(self, body):
# https://api.slack.com/docs/formatting
body = body.encode('UTF-8')
body = body.replace('&', '&')
body = body.replace('<', '<')
body = body.replace('>', '>')
return body
def alert(self, matches):
body = self.create_alert_body(matches)
body = self.format_body(body)
# post to slack
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.slack_proxy} if self.slack_proxy else None
payload = {
'username': self.slack_username_override,
'channel': self.slack_channel_override,
'parse': self.slack_parse_override,
'text': self.slack_text_string,
'attachments': [
{
'color': self.slack_msg_color,
'title': self.create_title(matches),
'text': body,
'mrkdwn_in': ['text', 'pretext'],
'fields': []
}
]
}
if self.slack_icon_url_override != '':
payload['icon_url'] = self.slack_icon_url_override
else:
payload['icon_emoji'] = self.slack_emoji_override
for url in self.slack_webhook_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to slack: %s" % e)
elastalert_logger.info("Alert sent to Slack")
def get_info(self):
return {'type': 'slack',
'slack_username_override': self.slack_username_override,
'slack_webhook_url': self.slack_webhook_url}
class PagerDutyAlerter(Alerter):
""" Create an incident on PagerDuty for each alert """
required_options = frozenset(['pagerduty_service_key', 'pagerduty_client_name'])
def __init__(self, rule):
super(PagerDutyAlerter, self).__init__(rule)
self.pagerduty_service_key = self.rule['pagerduty_service_key']
self.pagerduty_client_name = self.rule['pagerduty_client_name']
self.pagerduty_incident_key = self.rule.get('pagerduty_incident_key', '')
self.pagerduty_incident_key_args = self.rule.get('pagerduty_incident_key_args', None)
self.pagerduty_proxy = self.rule.get('pagerduty_proxy', None)
self.url = 'https://events.pagerduty.com/generic/2010-04-15/create_event.json'
def alert(self, matches):
body = self.create_alert_body(matches)
# post to pagerduty
headers = {'content-type': 'application/json'}
payload = {
'service_key': self.pagerduty_service_key,
'description': self.create_title(matches),
'event_type': 'trigger',
'incident_key': self.get_incident_key(matches),
'client': self.pagerduty_client_name,
'details': {
"information": body.encode('UTF-8'),
},
}
# set https proxy, if it was provided
proxies = {'https': self.pagerduty_proxy} if self.pagerduty_proxy else None
try:
response = requests.post(
self.url,
data=json.dumps(payload, cls=DateTimeEncoder, ensure_ascii=False),
headers=headers,
proxies=proxies
)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to pagerduty: %s" % e)
elastalert_logger.info("Trigger sent to PagerDuty")
def get_incident_key(self, matches):
if self.pagerduty_incident_key_args:
incident_key_values = [lookup_es_key(matches[0], arg) for arg in self.pagerduty_incident_key_args]
# Populate values with rule level properties too
for i in range(len(incident_key_values)):
if incident_key_values[i] is None:
key_value = self.rule.get(self.pagerduty_incident_key_args[i])
if key_value:
incident_key_values[i] = key_value
incident_key_values = ['<MISSING VALUE>' if val is None else val for val in incident_key_values]
return self.pagerduty_incident_key.format(*incident_key_values)
else:
return self.pagerduty_incident_key
def get_info(self):
return {'type': 'pagerduty',
'pagerduty_client_name': self.pagerduty_client_name}
class ExotelAlerter(Alerter):
required_options = frozenset(['exotel_account_sid', 'exotel_auth_token', 'exotel_to_number', 'exotel_from_number'])
def __init__(self, rule):
super(ExotelAlerter, self).__init__(rule)
self.exotel_account_sid = self.rule['exotel_account_sid']
self.exotel_auth_token = self.rule['exotel_auth_token']
self.exotel_to_number = self.rule['exotel_to_number']
self.exotel_from_number = self.rule['exotel_from_number']
self.sms_body = self.rule.get('exotel_message_body', '')
def alert(self, matches):
client = Exotel(self.exotel_account_sid, self.exotel_auth_token)
try:
message_body = self.rule['name'] + self.sms_body
response = client.sms(self.rule['exotel_from_number'], self.rule['exotel_to_number'], message_body)
if response != 200:
raise EAException("Error posting to Exotel, response code is %s" % response)
except:
raise EAException("Error posting to Exotel"), None, sys.exc_info()[2]
elastalert_logger.info("Trigger sent to Exotel")
def get_info(self):
return {'type': 'exotel', 'exotel_account': self.exotel_account_sid}
class TwilioAlerter(Alerter):
required_options = frozenset(['twilio_account_sid', 'twilio_auth_token', 'twilio_to_number', 'twilio_from_number'])
def __init__(self, rule):
super(TwilioAlerter, self).__init__(rule)
self.twilio_account_sid = self.rule['twilio_account_sid']
self.twilio_auth_token = self.rule['twilio_auth_token']
self.twilio_to_number = self.rule['twilio_to_number']
self.twilio_from_number = self.rule['twilio_from_number']
def alert(self, matches):
client = TwilioClient(self.twilio_account_sid, self.twilio_auth_token)
try:
client.messages.create(body=self.rule['name'],
to=self.twilio_to_number,
from_=self.twilio_from_number)
except TwilioRestException as e:
raise EAException("Error posting to twilio: %s" % e)
elastalert_logger.info("Trigger sent to Twilio")
def get_info(self):
return {'type': 'twilio',
'twilio_client_name': self.twilio_from_number}
class VictorOpsAlerter(Alerter):
""" Creates a VictorOps Incident for each alert """
required_options = frozenset(['victorops_api_key', 'victorops_routing_key', 'victorops_message_type'])
def __init__(self, rule):
super(VictorOpsAlerter, self).__init__(rule)
self.victorops_api_key = self.rule['victorops_api_key']
self.victorops_routing_key = self.rule['victorops_routing_key']
self.victorops_message_type = self.rule['victorops_message_type']
self.victorops_entity_display_name = self.rule.get('victorops_entity_display_name', 'no entity display name')
self.url = 'https://alert.victorops.com/integrations/generic/20131114/alert/%s/%s' % (
self.victorops_api_key, self.victorops_routing_key)
self.victorops_proxy = self.rule.get('victorops_proxy', None)
def alert(self, matches):
body = self.create_alert_body(matches)
# post to victorops
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.victorops_proxy} if self.victorops_proxy else None
payload = {
"message_type": self.victorops_message_type,
"entity_display_name": self.victorops_entity_display_name,
"monitoring_tool": "ElastAlert",
"state_message": body
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to VictorOps: %s" % e)
elastalert_logger.info("Trigger sent to VictorOps")
def get_info(self):
return {'type': 'victorops',
'victorops_routing_key': self.victorops_routing_key}
class TelegramAlerter(Alerter):
""" Send a Telegram message via bot api for each alert """
required_options = frozenset(['telegram_bot_token', 'telegram_room_id'])
def __init__(self, rule):
super(TelegramAlerter, self).__init__(rule)
self.telegram_bot_token = self.rule['telegram_bot_token']
self.telegram_room_id = self.rule['telegram_room_id']
self.telegram_api_url = self.rule.get('telegram_api_url', 'api.telegram.org')
self.url = 'https://%s/bot%s/%s' % (self.telegram_api_url, self.telegram_bot_token, "sendMessage")
self.telegram_proxy = self.rule.get('telegram_proxy', None)
def alert(self, matches):
body = u'⚠ *%s* ⚠ ```\n' % (self.create_title(matches))
for match in matches:
body += unicode(BasicMatchString(self.rule, match))
# Separate text of aggregated alerts with dashes
if len(matches) > 1:
body += '\n----------------------------------------\n'
body += u' ```'
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.telegram_proxy} if self.telegram_proxy else None
payload = {
'chat_id': self.telegram_room_id,
'text': body,
'parse_mode': 'markdown',
'disable_web_page_preview': True
}
try:
response = requests.post(self.url, data=json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
warnings.resetwarnings()
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Telegram: %s" % e)
elastalert_logger.info(
"Alert sent to Telegram room %s" % self.telegram_room_id)
def get_info(self):
return {'type': 'telegram',
'telegram_room_id': self.telegram_room_id}
class GitterAlerter(Alerter):
""" Creates a Gitter activity message for each alert """
required_options = frozenset(['gitter_webhook_url'])
def __init__(self, rule):
super(GitterAlerter, self).__init__(rule)
self.gitter_webhook_url = self.rule['gitter_webhook_url']
self.gitter_proxy = self.rule.get('gitter_proxy', None)
self.gitter_msg_level = self.rule.get('gitter_msg_level', 'error')
def alert(self, matches):
body = self.create_alert_body(matches)
# post to Gitter
headers = {'content-type': 'application/json'}
# set https proxy, if it was provided
proxies = {'https': self.gitter_proxy} if self.gitter_proxy else None
payload = {
'message': body,
'level': self.gitter_msg_level
}
try:
response = requests.post(self.gitter_webhook_url, json.dumps(payload, cls=DateTimeEncoder), headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to Gitter: %s" % e)
elastalert_logger.info("Alert sent to Gitter")
def get_info(self):
return {'type': 'gitter',
'gitter_webhook_url': self.gitter_webhook_url}
class ServiceNowAlerter(Alerter):
""" Creates a ServiceNow alert """
required_options = set([
'username',
'password',
'servicenow_rest_url',
'short_description',
'comments',
'assignment_group',
'category',
'subcategory',
'cmdb_ci',
'caller_id'
])
def __init__(self, rule):
super(ServiceNowAlerter, self).__init__(rule)
self.servicenow_rest_url = self.rule['servicenow_rest_url']
self.servicenow_proxy = self.rule.get('servicenow_proxy', None)
def alert(self, matches):
for match in matches:
# Parse everything into description.
description = str(BasicMatchString(self.rule, match))
# Set proper headers
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
proxies = {'https': self.servicenow_proxy} if self.servicenow_proxy else None
payload = {
"description": description,
"short_description": self.rule['short_description'],
"comments": self.rule['comments'],
"assignment_group": self.rule['assignment_group'],
"category": self.rule['category'],
"subcategory": self.rule['subcategory'],
"cmdb_ci": self.rule['cmdb_ci'],
"caller_id": self.rule["caller_id"]
}
try:
response = requests.post(
self.servicenow_rest_url,
auth=(self.rule['username'], self.rule['password']),
headers=headers,
data=json.dumps(payload, cls=DateTimeEncoder),
proxies=proxies
)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting to ServiceNow: %s" % e)
elastalert_logger.info("Alert sent to ServiceNow")
def get_info(self):
return {'type': 'ServiceNow',
'self.servicenow_rest_url': self.servicenow_rest_url}
class HTTPPostAlerter(Alerter):
""" Requested elasticsearch indices are sent by HTTP POST. Encoded with JSON. """
def __init__(self, rule):
super(HTTPPostAlerter, self).__init__(rule)
post_url = self.rule.get('http_post_url')
if isinstance(post_url, basestring):
post_url = [post_url]
self.post_url = post_url
self.post_proxy = self.rule.get('http_post_proxy')
self.post_payload = self.rule.get('http_post_payload', {})
self.post_static_payload = self.rule.get('http_post_static_payload', {})
self.post_all_values = self.rule.get('http_post_all_values', not self.post_payload)
def alert(self, matches):
""" Each match will trigger a POST to the specified endpoint(s). """
for match in matches:
payload = match if self.post_all_values else {}
payload.update(self.post_static_payload)
for post_key, es_key in self.post_payload.items():
payload[post_key] = lookup_es_key(match, es_key)
headers = {
"Content-Type": "application/json",
"Accept": "application/json;charset=utf-8"
}
proxies = {'https': self.post_proxy} if self.post_proxy else None
for url in self.post_url:
try:
response = requests.post(url, data=json.dumps(payload, cls=DateTimeEncoder),
headers=headers, proxies=proxies)
response.raise_for_status()
except RequestException as e:
raise EAException("Error posting HTTP Post alert: %s" % e)
elastalert_logger.info("HTTP Post alert sent.")
def get_info(self):
return {'type': 'http_post',
'http_post_webhook_url': self.post_url}
| 44.289971
| 137
| 0.607814
| 7,440
| 60,943
| 4.771371
| 0.104301
| 0.05048
| 0.026959
| 0.006704
| 0.427646
| 0.314544
| 0.252373
| 0.207583
| 0.181695
| 0.173526
| 0
| 0.002861
| 0.283101
| 60,943
| 1,375
| 138
| 44.322182
| 0.809613
| 0.093514
| 0
| 0.245411
| 0
| 0.000966
| 0.15868
| 0.024326
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.011594
| 0.029952
| null | null | 0.002899
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7cb439e7ed9a5e950d6cf894c40e5a62043d06e9
| 5,183
|
py
|
Python
|
vendor/packages/translate-toolkit/translate/convert/test_po2tmx.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | 2
|
2019-08-19T17:08:47.000Z
|
2019-10-05T11:37:02.000Z
|
vendor/packages/translate-toolkit/translate/convert/test_po2tmx.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | null | null | null |
vendor/packages/translate-toolkit/translate/convert/test_po2tmx.py
|
jgmize/kitsune
|
8f23727a9c7fcdd05afc86886f0134fb08d9a2f0
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from translate.convert import po2tmx
from translate.convert import test_convert
from translate.misc import wStringIO
from translate.storage import tmx
from translate.storage import lisa
class TestPO2TMX:
def po2tmx(self, posource, sourcelanguage='en', targetlanguage='af'):
"""helper that converts po source to tmx source without requiring files"""
inputfile = wStringIO.StringIO(posource)
outputfile = wStringIO.StringIO()
outputfile.tmxfile = tmx.tmxfile(inputfile=None, sourcelanguage=sourcelanguage)
po2tmx.convertpo(inputfile, outputfile, templatefile=None, sourcelanguage=sourcelanguage, targetlanguage=targetlanguage)
return outputfile.tmxfile
def test_basic(self):
minipo = r"""# Afrikaans translation of program ABC
#
msgid ""
msgstr ""
"Project-Id-Version: program 2.1-branch\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2006-01-09 07:15+0100\n"
"PO-Revision-Date: 2004-03-30 17:02+0200\n"
"Last-Translator: Zuza Software Foundation <xxx@translate.org.za>\n"
"Language-Team: Afrikaans <translate-discuss-xxx@lists.sourceforge.net>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
# Please remember to do something
#: ../dir/file.xml.in.h:1 ../dir/file2.xml.in.h:4
msgid "Applications"
msgstr "Toepassings"
"""
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate("Applications") == "Toepassings"
assert tmx.translate("bla") is None
xmltext = str(tmx)
assert xmltext.index('creationtool="Translate Toolkit - po2tmx"')
assert xmltext.index('adminlang')
assert xmltext.index('creationtoolversion')
assert xmltext.index('datatype')
assert xmltext.index('o-tmf')
assert xmltext.index('segtype')
assert xmltext.index('srclang')
def test_sourcelanguage(self):
minipo = 'msgid "String"\nmsgstr "String"\n'
tmx = self.po2tmx(minipo, sourcelanguage="xh")
print "The generated xml:"
print str(tmx)
header = tmx.document.find("header")
assert header.get("srclang") == "xh"
def test_targetlanguage(self):
minipo = 'msgid "String"\nmsgstr "String"\n'
tmx = self.po2tmx(minipo, targetlanguage="xh")
print "The generated xml:"
print str(tmx)
tuv = tmx.document.findall(".//%s" % tmx.namespaced("tuv"))[1]
#tag[0] will be the source, we want the target tuv
assert tuv.get("{%s}lang" % lisa.XML_NS) == "xh"
def test_multiline(self):
"""Test multiline po entry"""
minipo = r'''msgid "First part "
"and extra"
msgstr "Eerste deel "
"en ekstra"'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate('First part and extra') == 'Eerste deel en ekstra'
def test_escapednewlines(self):
"""Test the escaping of newlines"""
minipo = r'''msgid "First line\nSecond line"
msgstr "Eerste lyn\nTweede lyn"
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate("First line\nSecond line") == "Eerste lyn\nTweede lyn"
def test_escapedtabs(self):
"""Test the escaping of tabs"""
minipo = r'''msgid "First column\tSecond column"
msgstr "Eerste kolom\tTweede kolom"
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate("First column\tSecond column") == "Eerste kolom\tTweede kolom"
def test_escapedquotes(self):
"""Test the escaping of quotes (and slash)"""
minipo = r'''msgid "Hello \"Everyone\""
msgstr "Good day \"All\""
msgid "Use \\\"."
msgstr "Gebruik \\\"."
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert tmx.translate('Hello "Everyone"') == 'Good day "All"'
assert tmx.translate(r'Use \".') == r'Gebruik \".'
def test_exclusions(self):
"""Test that empty and fuzzy messages are excluded"""
minipo = r'''#, fuzzy
msgid "One"
msgstr "Een"
msgid "Two"
msgstr ""
msgid ""
msgstr "Drie"
'''
tmx = self.po2tmx(minipo)
print "The generated xml:"
print str(tmx)
assert "<tu" not in str(tmx)
assert len(tmx.units) == 0
def test_nonascii(self):
"""Tests that non-ascii conversion works."""
minipo = r'''msgid "Bézier curve"
msgstr "Bézier-kurwe"
'''
tmx = self.po2tmx(minipo)
print str(tmx)
assert tmx.translate(u"Bézier curve") == u"Bézier-kurwe"
class TestPO2TMXCommand(test_convert.TestConvertCommand, TestPO2TMX):
"""Tests running actual po2tmx commands on files"""
convertmodule = po2tmx
def test_help(self):
"""tests getting help"""
options = test_convert.TestConvertCommand.test_help(self)
options = self.help_check(options, "-l LANG, --language=LANG")
options = self.help_check(options, "--source-language=LANG", last=True)
| 33.43871
| 128
| 0.641134
| 636
| 5,183
| 5.198113
| 0.349057
| 0.019964
| 0.03539
| 0.051724
| 0.22686
| 0.185723
| 0.176951
| 0.176951
| 0.156987
| 0.156987
| 0
| 0.015672
| 0.224387
| 5,183
| 154
| 129
| 33.655844
| 0.806716
| 0.017557
| 0
| 0.289256
| 0
| 0.016529
| 0.349851
| 0.045087
| 0
| 0
| 0
| 0
| 0.157025
| 0
| null | null | 0.016529
| 0.041322
| null | null | 0.140496
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7cbe3198f6071ec0d541441f81f18f624a937b6f
| 5,044
|
py
|
Python
|
t2k/bin/cmttags.py
|
tianluyuan/pyutils
|
2cd3a90dbbd3d0eec3054fb9493ca0f6e0272e50
|
[
"MIT"
] | 1
|
2019-02-22T10:57:13.000Z
|
2019-02-22T10:57:13.000Z
|
t2k/bin/cmttags.py
|
tianluyuan/pyutils
|
2cd3a90dbbd3d0eec3054fb9493ca0f6e0272e50
|
[
"MIT"
] | null | null | null |
t2k/bin/cmttags.py
|
tianluyuan/pyutils
|
2cd3a90dbbd3d0eec3054fb9493ca0f6e0272e50
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
A script to create tags for CMT managed packages.
Call from within cmt/ directory
"""
import subprocess
import sys
import os
from optparse import OptionParser
__author__ = 'Tianlu Yuan'
__email__ = 'tianlu.yuan [at] colorado.edu'
# Ignore large external packages for now
IGNORES = ['CMT', 'EXTERN', 'GSL', 'MYSQL', 'GEANT', 'CLHEP']
# Extensions for finding src files, must satisfy unix wildcard rules
EXTENSIONS = {'cpp': ('*.[hc]', '*.[hc]xx', '*.[hc]pp', '*.cc', '*.hh'),
'python':('*.py'),
'java':('*.java')}
# Ignore these files and dirs, key specifies argument to find
# (e.g. '-iname')
PRUNE = {'iname':['*_Dict.[hc]*', '*linkdef.h']}
def check_dir():
""" Are we inside cmt/
"""
if os.path.basename(os.getcwd()) != 'cmt':
sys.exit('Not inside cmt directory!')
def check_requirements():
""" Ensure that requirements file exists in cmt dir
"""
if not os.path.isfile('requirements'):
sys.exit('No requirements file!')
def init_use_dict():
"""Returns the initial use_dict which contains the current (cwd)
package and its path. 'cmt show uses' does not include the
package itself.
"""
# Must call os.path.dirname because the cwd should be inside a cmt
# directory
return {'this':os.path.dirname(os.getcwd())}
def parse_uses():
""" Returns a dict of used packages and their root dir paths.
e.g. {ROOT:/path/to/cmt/installed/ROOT/vXrY}
"""
check_dir()
check_requirements()
proc = subprocess.Popen(['cmt', 'show', 'uses'],
stdout=subprocess.PIPE)
use_dict = init_use_dict()
for line in iter(proc.stdout.readline, ''):
tokens = line.split()
# ignore lines that start with '#'
if line[0] != '#' and tokens[1] not in IGNORES:
basepath = tokens[-1].strip('()')
# highland and psyche do not strictly follow CMT path
# organization. They have subpackages within a master, so
# we need to take that into account
relpath_list = [master for master in tokens[3:-1]]
relpath_list.extend([tokens[1], tokens[2]])
use_dict[tokens[1]] = os.path.join(basepath, *relpath_list)
return use_dict
def get_exts(opts):
if opts.python:
return EXTENSIONS['python']
elif opts.java:
return EXTENSIONS['java']
else:
return EXTENSIONS['cpp']
def build_find_args(exts):
""" ext is a list of file extensions corresponding to the files we want
to search. This will return a list of arguments that can be passed to `find`
"""
find_args = []
for a_ext in exts:
# -o for "or"
find_args.extend(['-o', '-iname'])
find_args.append('{0}'.format(a_ext))
# replace first '-o' with '( for grouping matches
find_args[0] = '('
# append parens for grouping negation
find_args.extend([')', '('])
# Add prune files
for match_type in PRUNE:
for aprune in PRUNE[match_type]:
find_args.append('-not')
find_args.append('-'+match_type)
find_args.append('{0}'.format(aprune))
find_args.append(')')
return find_args
def build_find_cmd(opts, paths):
""" Builds teh cmd file using ctags. Returns cmd based on the following
template: 'find {0} -type f {1} | etags -'
"""
find_args = build_find_args(get_exts(opts))
return ['find']+paths+['-type', 'f']+find_args
def build_tags_cmd():
return ['etags', '-']
def main():
""" Uses ctags to generate TAGS file in cmt directory based on cmt show uses
"""
parser = OptionParser()
parser.add_option('--cpp',
dest='cpp',
action='store_true',
default=False,
help='tag only c/cpp files (default)')
parser.add_option('--python',
dest='python',
action='store_true',
default=False,
help='tag only python files')
parser.add_option('--java',
dest='java',
action='store_true',
default=False,
help='tag only java files')
parser.add_option('-n',
dest='dry_run',
action='store_true',
default=False,
help='dry run')
(opts, args) = parser.parse_args()
# get the cmt show uses dictionary of programs and paths
use_dict = parse_uses()
# build the commands
find_cmd = build_find_cmd(opts, list(use_dict.itervalues()))
tags_cmd = build_tags_cmd()
print 'Creating TAGS file based on dependencies:'
print use_dict
if not opts.dry_run:
find_proc = subprocess.Popen(find_cmd, stdout=subprocess.PIPE)
tags_proc = subprocess.Popen(tags_cmd, stdin=find_proc.stdout)
tags_proc.communicate()
if __name__ == '__main__':
main()
| 28.822857
| 80
| 0.585052
| 639
| 5,044
| 4.482003
| 0.353678
| 0.039106
| 0.024441
| 0.030726
| 0.076466
| 0.050628
| 0.039804
| 0.039804
| 0
| 0
| 0
| 0.003606
| 0.285289
| 5,044
| 174
| 81
| 28.988506
| 0.790846
| 0.126289
| 0
| 0.086022
| 0
| 0
| 0.136083
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.043011
| null | null | 0.021505
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7cbea5a7d278dcb466c16a1d3e035b7e14f3c77c
| 63,630
|
py
|
Python
|
salt/daemons/masterapi.py
|
rickh563/salt
|
02822d6466c47d0daafd6e98b4e767a396b0ed48
|
[
"Apache-2.0"
] | null | null | null |
salt/daemons/masterapi.py
|
rickh563/salt
|
02822d6466c47d0daafd6e98b4e767a396b0ed48
|
[
"Apache-2.0"
] | null | null | null |
salt/daemons/masterapi.py
|
rickh563/salt
|
02822d6466c47d0daafd6e98b4e767a396b0ed48
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
This module contains all of the routines needed to set up a master server, this
involves preparing the three listeners and the workers needed by the master.
'''
from __future__ import absolute_import
# Import python libs
import fnmatch
import logging
import os
import re
import time
import stat
import tempfile
# Import salt libs
import salt.crypt
import salt.utils
import salt.client
import salt.payload
import salt.pillar
import salt.state
import salt.runner
import salt.auth
import salt.wheel
import salt.minion
import salt.search
import salt.key
import salt.fileserver
import salt.utils.atomicfile
import salt.utils.event
import salt.utils.verify
import salt.utils.minions
import salt.utils.gzip_util
import salt.utils.jid
from salt.pillar import git_pillar
from salt.utils.event import tagify
from salt.exceptions import SaltMasterError
# Import 3rd-party libs
import salt.ext.six as six
try:
import pwd
HAS_PWD = True
except ImportError:
# pwd is not available on windows
HAS_PWD = False
log = logging.getLogger(__name__)
# Things to do in lower layers:
# only accept valid minion ids
def init_git_pillar(opts):
'''
Clear out the ext pillar caches, used when the master starts
'''
pillargitfs = []
for opts_dict in [x for x in opts.get('ext_pillar', [])]:
if 'git' in opts_dict:
try:
import git
except ImportError:
return pillargitfs
parts = opts_dict['git'].strip().split()
try:
br = parts[0]
loc = parts[1]
except IndexError:
log.critical(
'Unable to extract external pillar data: {0}'
.format(opts_dict['git'])
)
else:
pillargitfs.append(
git_pillar.GitPillar(
br,
loc,
opts
)
)
return pillargitfs
def clean_fsbackend(opts):
'''
Clean out the old fileserver backends
'''
# Clear remote fileserver backend caches so they get recreated
for backend in ('git', 'hg', 'svn'):
if backend in opts['fileserver_backend']:
env_cache = os.path.join(
opts['cachedir'],
'{0}fs'.format(backend),
'envs.p'
)
if os.path.isfile(env_cache):
log.debug('Clearing {0}fs env cache'.format(backend))
try:
os.remove(env_cache)
except OSError as exc:
log.critical(
'Unable to clear env cache file {0}: {1}'
.format(env_cache, exc)
)
file_lists_dir = os.path.join(
opts['cachedir'],
'file_lists',
'{0}fs'.format(backend)
)
try:
file_lists_caches = os.listdir(file_lists_dir)
except OSError:
continue
for file_lists_cache in fnmatch.filter(file_lists_caches, '*.p'):
cache_file = os.path.join(file_lists_dir, file_lists_cache)
try:
os.remove(cache_file)
except OSError as exc:
log.critical(
'Unable to file_lists cache file {0}: {1}'
.format(cache_file, exc)
)
def clean_expired_tokens(opts):
'''
Clean expired tokens from the master
'''
serializer = salt.payload.Serial(opts)
for (dirpath, dirnames, filenames) in os.walk(opts['token_dir']):
for token in filenames:
token_path = os.path.join(dirpath, token)
with salt.utils.fopen(token_path) as token_file:
token_data = serializer.loads(token_file.read())
if 'expire' not in token_data or token_data.get('expire', 0) < time.time():
try:
os.remove(token_path)
except (IOError, OSError):
pass
def clean_pub_auth(opts):
try:
auth_cache = os.path.join(opts['cachedir'], 'publish_auth')
if not os.path.exists(auth_cache):
return
else:
for (dirpath, dirnames, filenames) in os.walk(auth_cache):
for auth_file in filenames:
auth_file_path = os.path.join(dirpath, auth_file)
if not os.path.isfile(auth_file_path):
continue
if os.path.getmtime(auth_file_path) - time.time() > opts['keep_jobs']:
os.remove(auth_file_path)
except (IOError, OSError):
log.error('Unable to delete pub auth file')
def clean_old_jobs(opts):
'''
Clean out the old jobs from the job cache
'''
# TODO: better way to not require creating the masterminion every time?
mminion = salt.minion.MasterMinion(
opts,
states=False,
rend=False,
)
# If the master job cache has a clean_old_jobs, call it
fstr = '{0}.clean_old_jobs'.format(opts['master_job_cache'])
if fstr in mminion.returners:
mminion.returners[fstr]()
def access_keys(opts):
'''
A key needs to be placed in the filesystem with permissions 0400 so
clients are required to run as root.
'''
users = []
keys = {}
acl_users = set(opts['client_acl'].keys())
if opts.get('user'):
acl_users.add(opts['user'])
acl_users.add(salt.utils.get_user())
if HAS_PWD:
for user in pwd.getpwall():
users.append(user.pw_name)
for user in acl_users:
log.info(
'Preparing the {0} key for local communication'.format(
user
)
)
if HAS_PWD:
if user not in users:
try:
user = pwd.getpwnam(user).pw_name
except KeyError:
log.error('ACL user {0} is not available'.format(user))
continue
keyfile = os.path.join(
opts['cachedir'], '.{0}_key'.format(user)
)
if os.path.exists(keyfile):
log.debug('Removing stale keyfile: {0}'.format(keyfile))
os.unlink(keyfile)
key = salt.crypt.Crypticle.generate_key_string()
cumask = os.umask(191)
with salt.utils.fopen(keyfile, 'w+') as fp_:
fp_.write(key)
os.umask(cumask)
# 600 octal: Read and write access to the owner only.
# Write access is necessary since on subsequent runs, if the file
# exists, it needs to be written to again. Windows enforces this.
os.chmod(keyfile, 0o600)
if HAS_PWD:
try:
os.chown(keyfile, pwd.getpwnam(user).pw_uid, -1)
except OSError:
# The master is not being run as root and can therefore not
# chown the key file
pass
keys[user] = key
return keys
def fileserver_update(fileserver):
'''
Update the fileserver backends, requires that a built fileserver object
be passed in
'''
try:
if not fileserver.servers:
log.error(
'No fileservers loaded, the master will not be able to '
'serve files to minions'
)
raise SaltMasterError('No fileserver backends available')
fileserver.update()
except Exception as exc:
log.error(
'Exception {0} occurred in file server update'.format(exc),
exc_info_on_loglevel=logging.DEBUG
)
class AutoKey(object):
'''
Implement the methods to run auto key acceptance and rejection
'''
def __init__(self, opts):
self.opts = opts
def check_permissions(self, filename):
'''
Check if the specified filename has correct permissions
'''
if salt.utils.is_windows():
return True
# After we've ascertained we're not on windows
try:
user = self.opts['user']
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
groups = salt.utils.get_gid_list(user, include_default=False)
except KeyError:
log.error(
'Failed to determine groups for user {0}. The user is not '
'available.\n'.format(
user
)
)
return False
fmode = os.stat(filename)
if os.getuid() == 0:
if fmode.st_uid == uid or fmode.st_gid != gid:
return True
elif self.opts.get('permissive_pki_access', False) \
and fmode.st_gid in groups:
return True
else:
if stat.S_IWOTH & fmode.st_mode:
# don't allow others to write to the file
return False
# check group flags
if self.opts.get('permissive_pki_access', False) and stat.S_IWGRP & fmode.st_mode:
return True
elif stat.S_IWGRP & fmode.st_mode:
return False
# check if writable by group or other
if not (stat.S_IWGRP & fmode.st_mode or
stat.S_IWOTH & fmode.st_mode):
return True
return False
def check_signing_file(self, keyid, signing_file):
'''
Check a keyid for membership in a signing file
'''
if not signing_file or not os.path.exists(signing_file):
return False
if not self.check_permissions(signing_file):
message = 'Wrong permissions for {0}, ignoring content'
log.warn(message.format(signing_file))
return False
with salt.utils.fopen(signing_file, 'r') as fp_:
for line in fp_:
line = line.strip()
if line.startswith('#'):
continue
else:
if salt.utils.expr_match(keyid, line):
return True
return False
def check_autosign_dir(self, keyid):
'''
Check a keyid for membership in a autosign directory.
'''
autosign_dir = os.path.join(self.opts['pki_dir'], 'minions_autosign')
# cleanup expired files
expire_minutes = self.opts.get('autosign_expire_minutes', 10)
if expire_minutes > 0:
min_time = time.time() - (60 * int(expire_minutes))
for root, dirs, filenames in os.walk(autosign_dir):
for f in filenames:
stub_file = os.path.join(autosign_dir, f)
mtime = os.path.getmtime(stub_file)
if mtime < min_time:
log.warn('Autosign keyid expired {0}'.format(stub_file))
os.remove(stub_file)
stub_file = os.path.join(autosign_dir, keyid)
if not os.path.exists(stub_file):
return False
os.remove(stub_file)
return True
def check_autoreject(self, keyid):
'''
Checks if the specified keyid should automatically be rejected.
'''
return self.check_signing_file(
keyid,
self.opts.get('autoreject_file', None)
)
def check_autosign(self, keyid):
'''
Checks if the specified keyid should automatically be signed.
'''
if self.opts['auto_accept']:
return True
if self.check_signing_file(keyid, self.opts.get('autosign_file', None)):
return True
if self.check_autosign_dir(keyid):
return True
return False
class RemoteFuncs(object):
'''
Funcitons made available to minions, this class includes the raw routines
post validation that make up the minion access to the master
'''
def __init__(self, opts):
self.opts = opts
self.event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
self.serial = salt.payload.Serial(opts)
self.ckminions = salt.utils.minions.CkMinions(opts)
# Create the tops dict for loading external top data
self.tops = salt.loader.tops(self.opts)
# Make a client
self.local = salt.client.get_local_client(mopts=self.opts)
# Create the master minion to access the external job cache
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
self.__setup_fileserver()
def __setup_fileserver(self):
'''
Set the local file objects from the file server interface
'''
fs_ = salt.fileserver.Fileserver(self.opts)
self._serve_file = fs_.serve_file
self._file_hash = fs_.file_hash
self._file_list = fs_.file_list
self._file_list_emptydirs = fs_.file_list_emptydirs
self._dir_list = fs_.dir_list
self._symlink_list = fs_.symlink_list
self._file_envs = fs_.envs
def __verify_minion_publish(self, load):
'''
Verify that the passed information authorized a minion to execute
'''
# Verify that the load is valid
if 'peer' not in self.opts:
return False
if not isinstance(self.opts['peer'], dict):
return False
if any(key not in load for key in ('fun', 'arg', 'tgt', 'ret', 'id')):
return False
# If the command will make a recursive publish don't run
if re.match('publish.*', load['fun']):
return False
# Check the permissions for this minion
perms = []
for match in self.opts['peer']:
if re.match(match, load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer'][match], list):
perms.extend(self.opts['peer'][match])
if ',' in load['fun']:
# 'arg': [['cat', '/proc/cpuinfo'], [], ['foo']]
load['fun'] = load['fun'].split(',')
arg_ = []
for arg in load['arg']:
arg_.append(arg.split())
load['arg'] = arg_
good = self.ckminions.auth_check(
perms,
load['fun'],
load['tgt'],
load.get('tgt_type', 'glob'),
publish_validate=True)
if not good:
return False
return True
def _master_opts(self, load):
'''
Return the master options to the minion
'''
mopts = {}
file_roots = {}
envs = self._file_envs()
for saltenv in envs:
if saltenv not in file_roots:
file_roots[saltenv] = []
mopts['file_roots'] = file_roots
if load.get('env_only'):
return mopts
mopts['renderer'] = self.opts['renderer']
mopts['failhard'] = self.opts['failhard']
mopts['state_top'] = self.opts['state_top']
mopts['nodegroups'] = self.opts['nodegroups']
mopts['state_auto_order'] = self.opts['state_auto_order']
mopts['state_events'] = self.opts['state_events']
mopts['state_aggregate'] = self.opts['state_aggregate']
mopts['jinja_lstrip_blocks'] = self.opts['jinja_lstrip_blocks']
mopts['jinja_trim_blocks'] = self.opts['jinja_trim_blocks']
return mopts
def _ext_nodes(self, load, skip_verify=False):
'''
Return the results from an external node classifier if one is
specified
'''
if not skip_verify:
if 'id' not in load:
log.error('Received call for external nodes without an id')
return {}
if not salt.utils.verify.valid_id(self.opts, load['id']):
return {}
# Evaluate all configured master_tops interfaces
opts = {}
grains = {}
ret = {}
if 'opts' in load:
opts = load['opts']
if 'grains' in load['opts']:
grains = load['opts']['grains']
for fun in self.tops:
if fun not in self.opts.get('master_tops', {}):
continue
try:
ret.update(self.tops[fun](opts=opts, grains=grains))
except Exception as exc:
# If anything happens in the top generation, log it and move on
log.error(
'Top function {0} failed with error {1} for minion '
'{2}'.format(
fun, exc, load['id']
)
)
return ret
def _mine_get(self, load, skip_verify=False):
'''
Gathers the data from the specified minions' mine
'''
if not skip_verify:
if any(key not in load for key in ('id', 'tgt', 'fun')):
return {}
if 'mine_get' in self.opts:
# If master side acl defined.
if not isinstance(self.opts['mine_get'], dict):
return {}
perms = set()
for match in self.opts['mine_get']:
if re.match(match, load['id']):
if isinstance(self.opts['mine_get'][match], list):
perms.update(self.opts['mine_get'][match])
if not any(re.match(perm, load['fun']) for perm in perms):
return {}
ret = {}
if not salt.utils.verify.valid_id(self.opts, load['id']):
return ret
match_type = load.get('expr_form', 'glob')
if match_type.lower() == 'pillar':
match_type = 'pillar_exact'
if match_type.lower() == 'compound':
match_type = 'compound_pillar_exact'
checker = salt.utils.minions.CkMinions(self.opts)
minions = checker.check_minions(
load['tgt'],
match_type,
greedy=False
)
for minion in minions:
mine = os.path.join(
self.opts['cachedir'],
'minions',
minion,
'mine.p')
try:
with salt.utils.fopen(mine, 'rb') as fp_:
fdata = self.serial.load(fp_).get(load['fun'])
if fdata:
ret[minion] = fdata
except Exception:
continue
return ret
def _mine(self, load, skip_verify=False):
'''
Return the mine data
'''
if not skip_verify:
if 'id' not in load or 'data' not in load:
return False
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'mine.p')
if not load.get('clear', False):
if os.path.isfile(datap):
with salt.utils.fopen(datap, 'rb') as fp_:
new = self.serial.load(fp_)
if isinstance(new, dict):
new.update(load['data'])
load['data'] = new
with salt.utils.fopen(datap, 'w+b') as fp_:
fp_.write(self.serial.dumps(load['data']))
return True
def _mine_delete(self, load):
'''
Allow the minion to delete a specific function from its own mine
'''
if 'id' not in load or 'fun' not in load:
return False
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
return False
datap = os.path.join(cdir, 'mine.p')
if os.path.isfile(datap):
try:
with salt.utils.fopen(datap, 'rb') as fp_:
mine_data = self.serial.load(fp_)
if isinstance(mine_data, dict):
if mine_data.pop(load['fun'], False):
with salt.utils.fopen(datap, 'w+b') as fp_:
fp_.write(self.serial.dumps(mine_data))
except OSError:
return False
return True
def _mine_flush(self, load, skip_verify=False):
'''
Allow the minion to delete all of its own mine contents
'''
if not skip_verify and 'id' not in load:
return False
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
return False
datap = os.path.join(cdir, 'mine.p')
if os.path.isfile(datap):
try:
os.remove(datap)
except OSError:
return False
return True
def _file_recv(self, load):
'''
Allows minions to send files to the master, files are sent to the
master file cache
'''
if any(key not in load for key in ('id', 'path', 'loc')):
return False
if not self.opts['file_recv'] or os.path.isabs(load['path']):
return False
if os.path.isabs(load['path']) or '../' in load['path']:
# Can overwrite master files!!
return False
if not salt.utils.verify.valid_id(self.opts, load['id']):
return False
file_recv_max_size = 1024*1024 * self.opts['file_recv_max_size']
if 'loc' in load and load['loc'] < 0:
log.error('Invalid file pointer: load[loc] < 0')
return False
if len(load['data']) + load.get('loc', 0) > file_recv_max_size:
log.error(
'Exceeding file_recv_max_size limit: {0}'.format(
file_recv_max_size
)
)
return False
# Normalize Windows paths
normpath = load['path']
if ':' in normpath:
# make sure double backslashes are normalized
normpath = normpath.replace('\\', '/')
normpath = os.path.normpath(normpath)
cpath = os.path.join(
self.opts['cachedir'],
'minions',
load['id'],
'files',
normpath)
cdir = os.path.dirname(cpath)
if not os.path.isdir(cdir):
try:
os.makedirs(cdir)
except os.error:
pass
if os.path.isfile(cpath) and load['loc'] != 0:
mode = 'ab'
else:
mode = 'wb'
with salt.utils.fopen(cpath, mode) as fp_:
if load['loc']:
fp_.seek(load['loc'])
fp_.write(load['data'])
return True
def _pillar(self, load):
'''
Return the pillar data for the minion
'''
if any(key not in load for key in ('id', 'grains')):
return False
pillar = salt.pillar.Pillar(
self.opts,
load['grains'],
load['id'],
load.get('saltenv', load.get('env')),
load.get('ext'),
self.mminion.functions,
pillar=load.get('pillar_override', {}))
pillar_dirs = {}
data = pillar.compile_pillar(pillar_dirs=pillar_dirs)
if self.opts.get('minion_data_cache', False):
cdir = os.path.join(self.opts['cachedir'], 'minions', load['id'])
if not os.path.isdir(cdir):
os.makedirs(cdir)
datap = os.path.join(cdir, 'data.p')
tmpfh, tmpfname = tempfile.mkstemp(dir=cdir)
os.close(tmpfh)
with salt.utils.fopen(tmpfname, 'w+b') as fp_:
fp_.write(
self.serial.dumps(
{'grains': load['grains'],
'pillar': data})
)
# On Windows, os.rename will fail if the destination file exists.
salt.utils.atomicfile.atomic_rename(tmpfname, datap)
return data
def _minion_event(self, load):
'''
Receive an event from the minion and fire it on the master event
interface
'''
if 'id' not in load:
return False
if 'events' not in load and ('tag' not in load or 'data' not in load):
return False
if 'events' in load:
for event in load['events']:
self.event.fire_event(event, event['tag']) # old dup event
if load.get('pretag') is not None:
if 'data' in event:
self.event.fire_event(event['data'], tagify(event['tag'], base=load['pretag']))
else:
self.event.fire_event(event, tagify(event['tag'], base=load['pretag']))
else:
tag = load['tag']
self.event.fire_event(load, tag)
return True
def _return(self, load):
'''
Handle the return data sent from the minions
'''
# Generate EndTime
endtime = salt.utils.jid.jid_to_time(salt.utils.jid.gen_jid())
# If the return data is invalid, just ignore it
if any(key not in load for key in ('return', 'jid', 'id')):
return False
if load['jid'] == 'req':
# The minion is returning a standalone job, request a jobid
prep_fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
load['jid'] = self.mminion.returners[prep_fstr](nocache=load.get('nocache', False))
# save the load, since we don't have it
saveload_fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[saveload_fstr](load['jid'], load)
log.info('Got return from {id} for job {jid}'.format(**load))
self.event.fire_event(load, load['jid']) # old dup event
self.event.fire_event(load, tagify([load['jid'], 'ret', load['id']], 'job'))
self.event.fire_ret_load(load)
if not self.opts['job_cache'] or self.opts.get('ext_job_cache'):
return
fstr = '{0}.update_endtime'.format(self.opts['master_job_cache'])
if (self.opts.get('job_cache_store_endtime')
and fstr in self.mminion.returners):
self.mminion.returners[fstr](load['jid'], endtime)
fstr = '{0}.returner'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load)
def _syndic_return(self, load):
'''
Receive a syndic minion return and format it to look like returns from
individual minions.
'''
# Verify the load
if any(key not in load for key in ('return', 'jid', 'id')):
return None
# if we have a load, save it
if 'load' in load:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load['load'])
# Format individual return loads
for key, item in six.iteritems(load['return']):
ret = {'jid': load['jid'],
'id': key,
'return': item}
if 'out' in load:
ret['out'] = load['out']
self._return(ret)
def minion_runner(self, load):
'''
Execute a runner from a minion, return the runner's function data
'''
if 'peer_run' not in self.opts:
return {}
if not isinstance(self.opts['peer_run'], dict):
return {}
if any(key not in load for key in ('fun', 'arg', 'id')):
return {}
perms = set()
for match in self.opts['peer_run']:
if re.match(match, load['id']):
# This is the list of funcs/modules!
if isinstance(self.opts['peer_run'][match], list):
perms.update(self.opts['peer_run'][match])
good = False
for perm in perms:
if re.match(perm, load['fun']):
good = True
if not good:
# The minion is not who it says it is!
# We don't want to listen to it!
log.warn(
'Minion id {0} is not who it says it is!'.format(
load['id']
)
)
return {}
# Prepare the runner object
opts = {'fun': load['fun'],
'arg': load['arg'],
'id': load['id'],
'doc': False,
'conf_file': self.opts['conf_file']}
opts.update(self.opts)
runner = salt.runner.Runner(opts)
return runner.run()
def pub_ret(self, load, skip_verify=False):
'''
Request the return data from a specific jid, only allowed
if the requesting minion also initialted the execution.
'''
if not skip_verify and any(key not in load for key in ('jid', 'id')):
return {}
else:
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, load['jid'])
with salt.utils.fopen(jid_fn, 'r') as fp_:
if not load['id'] == fp_.read():
return {}
return self.local.get_cache_returns(load['jid'])
def minion_pub(self, load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(load):
return {}
# Set up the publication payload
pub_load = {
'fun': load['fun'],
'arg': load['arg'],
'expr_form': load.get('tgt_type', 'glob'),
'tgt': load['tgt'],
'ret': load['ret'],
'id': load['id'],
}
if 'tgt_type' in load:
if load['tgt_type'].startswith('node'):
if load['tgt'] in self.opts['nodegroups']:
pub_load['tgt'] = self.opts['nodegroups'][load['tgt']]
pub_load['expr_form_type'] = 'compound'
pub_load['expr_form'] = load['tgt_type']
else:
return {}
else:
pub_load['expr_form'] = load['tgt_type']
ret = {}
ret['jid'] = self.local.cmd_async(**pub_load)
ret['minions'] = self.ckminions.check_minions(
load['tgt'],
pub_load['expr_form'])
auth_cache = os.path.join(
self.opts['cachedir'],
'publish_auth')
if not os.path.isdir(auth_cache):
os.makedirs(auth_cache)
jid_fn = os.path.join(auth_cache, str(ret['jid']))
with salt.utils.fopen(jid_fn, 'w+') as fp_:
fp_.write(load['id'])
return ret
def minion_publish(self, load):
'''
Publish a command initiated from a minion, this method executes minion
restrictions so that the minion publication will only work if it is
enabled in the config.
The configuration on the master allows minions to be matched to
salt functions, so the minions can only publish allowed salt functions
The config will look like this:
peer:
.*:
- .*
This configuration will enable all minions to execute all commands.
peer:
foo.example.com:
- test.*
This configuration will only allow the minion foo.example.com to
execute commands from the test module
'''
if not self.__verify_minion_publish(load):
return {}
# Set up the publication payload
pub_load = {
'fun': load['fun'],
'arg': load['arg'],
'expr_form': load.get('tgt_type', 'glob'),
'tgt': load['tgt'],
'ret': load['ret'],
'id': load['id'],
}
if 'tmo' in load:
try:
pub_load['timeout'] = int(load['tmo'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
load['tmo'])
log.warn(msg)
return {}
if 'timeout' in load:
try:
pub_load['timeout'] = int(load['timeout'])
except ValueError:
msg = 'Failed to parse timeout value: {0}'.format(
load['timeout'])
log.warn(msg)
return {}
if 'tgt_type' in load:
if load['tgt_type'].startswith('node'):
if load['tgt'] in self.opts['nodegroups']:
pub_load['tgt'] = self.opts['nodegroups'][load['tgt']]
pub_load['expr_form_type'] = 'compound'
else:
return {}
else:
pub_load['expr_form'] = load['tgt_type']
pub_load['raw'] = True
ret = {}
for minion in self.local.cmd_iter(**pub_load):
if load.get('form', '') == 'full':
data = minion
if 'jid' in minion:
ret['__jid__'] = minion['jid']
data['ret'] = data.pop('return')
ret[minion['id']] = data
else:
ret[minion['id']] = minion['return']
if 'jid' in minion:
ret['__jid__'] = minion['jid']
for key, val in six.iteritems(self.local.get_cache_returns(ret['__jid__'])):
if key not in ret:
ret[key] = val
if load.get('form', '') != 'full':
ret.pop('__jid__')
return ret
def revoke_auth(self, load):
'''
Allow a minion to request revocation of its own key
'''
if 'id' not in load:
return False
keyapi = salt.key.Key(self.opts)
keyapi.delete_key(load['id'],
preserve_minions=load.get('preserve_minion_cache',
False))
return True
class LocalFuncs(object):
'''
Set up methods for use only from the local system
'''
# The ClearFuncs object encapsulates the functions that can be executed in
# the clear:
# publish (The publish from the LocalClient)
# _auth
def __init__(self, opts, key):
self.opts = opts
self.serial = salt.payload.Serial(opts)
self.key = key
# Create the event manager
self.event = salt.utils.event.get_event(
'master',
self.opts['sock_dir'],
self.opts['transport'],
opts=self.opts,
listen=False)
# Make a client
self.local = salt.client.get_local_client(mopts=self.opts)
# Make an minion checker object
self.ckminions = salt.utils.minions.CkMinions(opts)
# Make an Auth object
self.loadauth = salt.auth.LoadAuth(opts)
# Stand up the master Minion to access returner data
self.mminion = salt.minion.MasterMinion(
self.opts,
states=False,
rend=False)
# Make a wheel object
self.wheel_ = salt.wheel.Wheel(opts)
def runner(self, load):
'''
Send a master control function back to the runner system
'''
if 'token' in load:
try:
token = self.loadauth.get_tok(load['token'])
except Exception as exc:
msg = 'Exception occurred when generating auth token: {0}'.format(
exc)
log.error(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if not token:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['eauth'] not in self.opts['external_auth']:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
good = self.ckminions.runner_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
try:
fun = load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(
fun,
load.get('kwarg', {}),
token['name'])
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
if 'eauth' not in load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
name = self.loadauth.load_name(load)
if not (name in self.opts['external_auth'][load['eauth']]) | ('*' in self.opts['external_auth'][load['eauth']]):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if not self.loadauth.time_auth(load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
good = self.ckminions.runner_check(
self.opts['external_auth'][load['eauth']][name] if name in self.opts['external_auth'][load['eauth']] else self.opts['external_auth'][load['eauth']]['*'],
load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
fun = load.pop('fun')
runner_client = salt.runner.RunnerClient(self.opts)
return runner_client.async(fun,
load.get('kwarg', {}),
load.get('username', 'UNKNOWN'))
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
except Exception as exc:
log.error(
'Exception occurred in the runner system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
def wheel(self, load):
'''
Send a master control function back to the wheel system
'''
# All wheel ops pass through eauth
if 'token' in load:
try:
token = self.loadauth.get_tok(load['token'])
except Exception as exc:
msg = 'Exception occurred when generating auth token: {0}'.format(
exc)
log.error(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if not token:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
if token['eauth'] not in self.opts['external_auth']:
msg = 'Authentication failure of type "token" occurred.'
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
good = self.ckminions.wheel_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
load['fun'])
if not good:
msg = ('Authentication failure of type "token" occurred for '
'user {0}.').format(token['name'])
log.warning(msg)
return dict(error=dict(name='TokenAuthenticationError',
message=msg))
jid = salt.utils.jid.gen_jid()
fun = load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': token['name']}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(exc)
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
data['success'] = False
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
if 'eauth' not in load:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
try:
name = self.loadauth.load_name(load)
if not ((name in self.opts['external_auth'][load['eauth']]) |
('*' in self.opts['external_auth'][load['eauth']])):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
if not self.loadauth.time_auth(load):
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
good = self.ckminions.wheel_check(
self.opts['external_auth'][load['eauth']][name]
if name in self.opts['external_auth'][load['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
load['fun'])
if not good:
msg = ('Authentication failure of type "eauth" occurred for '
'user {0}.').format(load.get('username', 'UNKNOWN'))
log.warning(msg)
return dict(error=dict(name='EauthAuthenticationError',
message=msg))
jid = salt.utils.jid.gen_jid()
fun = load.pop('fun')
tag = tagify(jid, prefix='wheel')
data = {'fun': "wheel.{0}".format(fun),
'jid': jid,
'tag': tag,
'user': load.get('username', 'UNKNOWN')}
try:
self.event.fire_event(data, tagify([jid, 'new'], 'wheel'))
ret = self.wheel_.call_func(fun, **load)
data['return'] = ret
data['success'] = True
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error('Exception occurred while '
'introspecting {0}: {1}'.format(fun, exc))
data['return'] = 'Exception occurred in wheel {0}: {1}: {2}'.format(
fun,
exc.__class__.__name__,
exc,
)
self.event.fire_event(data, tagify([jid, 'ret'], 'wheel'))
return {'tag': tag,
'data': data}
except Exception as exc:
log.error(
'Exception occurred in the wheel system: {0}'.format(exc)
)
return dict(error=dict(name=exc.__class__.__name__,
args=exc.args,
message=str(exc)))
def mk_token(self, load):
'''
Create and return an authentication token, the clear load needs to
contain the eauth key and the needed authentication creds.
'''
if 'eauth' not in load:
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if load['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning('Authentication failure of type "eauth" occurred.')
return ''
try:
name = self.loadauth.load_name(load)
if not ((name in self.opts['external_auth'][load['eauth']]) |
('*' in self.opts['external_auth'][load['eauth']])):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
if not self.loadauth.time_auth(load):
log.warning('Authentication failure of type "eauth" occurred.')
return ''
return self.loadauth.mk_token(load)
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
def get_token(self, load):
'''
Return the name associated with a token or False if the token is invalid
'''
if 'token' not in load:
return False
return self.loadauth.get_tok(load['token'])
def publish(self, load):
'''
This method sends out publications to the minions, it can only be used
by the LocalClient.
'''
extra = load.get('kwargs', {})
# check blacklist/whitelist
good = True
# Check if the user is blacklisted
for user_re in self.opts['client_acl_blacklist'].get('users', []):
if re.match(user_re, load['user']):
good = False
break
# check if the cmd is blacklisted
for module_re in self.opts['client_acl_blacklist'].get('modules', []):
# if this is a regular command, its a single function
if isinstance(load['fun'], str):
funs_to_check = [load['fun']]
# if this a compound function
else:
funs_to_check = load['fun']
for fun in funs_to_check:
if re.match(module_re, fun):
good = False
break
if good is False:
log.error(
'{user} does not have permissions to run {function}. Please '
'contact your local administrator if you believe this is in '
'error.\n'.format(
user=load['user'],
function=load['fun']
)
)
return ''
# to make sure we don't step on anyone else's toes
del good
# Check for external auth calls
if extra.get('token', False):
# A token was passed, check it
try:
token = self.loadauth.get_tok(extra['token'])
except Exception as exc:
log.error(
'Exception occurred when generating auth token: {0}'.format(
exc
)
)
return ''
if not token:
log.warning('Authentication failure of type "token" occurred. \
Token could not be retrieved.')
return ''
if token['eauth'] not in self.opts['external_auth']:
log.warning('Authentication failure of type "token" occurred. \
Authentication type of {0} not present.').format(token['eauth'])
return ''
if not ((token['name'] in self.opts['external_auth'][token['eauth']]) |
('*' in self.opts['external_auth'][token['eauth']])):
log.warning('Authentication failure of type "token" occurred. \
Token does not verify against eauth provider: {0}').format(
self.opts['external_auth'])
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][token['eauth']][token['name']]
if token['name'] in self.opts['external_auth'][token['eauth']]
else self.opts['external_auth'][token['eauth']]['*'],
load['fun'],
load['tgt'],
load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "token" occurred.'
)
return ''
load['user'] = token['name']
log.debug('Minion tokenized user = "{0}"'.format(load['user']))
elif 'eauth' in extra:
if extra['eauth'] not in self.opts['external_auth']:
# The eauth system is not enabled, fail
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
try:
name = self.loadauth.load_name(extra)
if not ((name in self.opts['external_auth'][extra['eauth']]) |
('*' in self.opts['external_auth'][extra['eauth']])):
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
if not self.loadauth.time_auth(extra):
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
except Exception as exc:
log.error(
'Exception occurred while authenticating: {0}'.format(exc)
)
return ''
good = self.ckminions.auth_check(
self.opts['external_auth'][extra['eauth']][name]
if name in self.opts['external_auth'][extra['eauth']]
else self.opts['external_auth'][extra['eauth']]['*'],
load['fun'],
load['tgt'],
load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "eauth" occurred.'
)
return ''
load['user'] = name
# Verify that the caller has root on master
elif 'user' in load:
if load['user'].startswith('sudo_'):
# If someone can sudo, allow them to act as root
if load.get('key', 'invalid') == self.key.get('root'):
load.pop('key')
elif load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif load['user'] == self.opts.get('user', 'root'):
if load.pop('key') != self.key[self.opts.get('user', 'root')]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif load['user'] == 'root':
if load.pop('key') != self.key.get(self.opts.get('user', 'root')):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
elif load['user'] == salt.utils.get_user():
if load.pop('key') != self.key.get(load['user']):
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if load['user'] in self.key:
# User is authorised, check key and check perms
if load.pop('key') != self.key[load['user']]:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
if load['user'] not in self.opts['client_acl']:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
good = self.ckminions.auth_check(
self.opts['client_acl'][load['user']],
load['fun'],
load['tgt'],
load.get('tgt_type', 'glob'))
if not good:
# Accept find_job so the CLI will function cleanly
if load['fun'] != 'saltutil.find_job':
log.warning(
'Authentication failure of type "user" '
'occurred.'
)
return ''
else:
log.warning(
'Authentication failure of type "user" occurred.'
)
return ''
else:
if load.pop('key') != self.key[salt.utils.get_user()]:
log.warning(
'Authentication failure of type "other" occurred.'
)
return ''
# Retrieve the minions list
minions = self.ckminions.check_minions(
load['tgt'],
load.get('tgt_type', 'glob')
)
# If we order masters (via a syndic), don't short circuit if no minions
# are found
if not self.opts.get('order_masters'):
# Check for no minions
if not minions:
return {
'enc': 'clear',
'load': {
'jid': None,
'minions': minions
}
}
# Retrieve the jid
if not load['jid']:
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
load['jid'] = self.mminion.returners[fstr](nocache=extra.get('nocache', False))
self.event.fire_event({'minions': minions}, load['jid'])
new_job_load = {
'jid': load['jid'],
'tgt_type': load['tgt_type'],
'tgt': load['tgt'],
'user': load['user'],
'fun': load['fun'],
'arg': load['arg'],
'minions': minions,
}
# Announce the job on the event bus
self.event.fire_event(new_job_load, 'new_job') # old dup event
self.event.fire_event(new_job_load, tagify([load['jid'], 'new'], 'job'))
# Save the invocation information
if self.opts['ext_job_cache']:
try:
fstr = '{0}.save_load'.format(self.opts['ext_job_cache'])
self.mminion.returners[fstr](load['jid'], load)
except KeyError:
log.critical(
'The specified returner used for the external job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['ext_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# always write out to the master job cache
try:
fstr = '{0}.save_load'.format(self.opts['master_job_cache'])
self.mminion.returners[fstr](load['jid'], load)
except KeyError:
log.critical(
'The specified returner used for the master job cache '
'"{0}" does not have a save_load function!'.format(
self.opts['master_job_cache']
)
)
except Exception:
log.critical(
'The specified returner threw a stack trace:\n',
exc_info=True
)
# Altering the contents of the publish load is serious!! Changes here
# break compatibility with minion/master versions and even tiny
# additions can have serious implications on the performance of the
# publish commands.
#
# In short, check with Thomas Hatch before you even think about
# touching this stuff, we can probably do what you want to do another
# way that won't have a negative impact.
pub_load = {
'fun': load['fun'],
'arg': load['arg'],
'tgt': load['tgt'],
'jid': load['jid'],
'ret': load['ret'],
}
if 'id' in extra:
pub_load['id'] = extra['id']
if 'tgt_type' in load:
pub_load['tgt_type'] = load['tgt_type']
if 'to' in load:
pub_load['to'] = load['to']
if 'kwargs' in load:
if 'ret_config' in load['kwargs']:
pub_load['ret_config'] = load['kwargs'].get('ret_config')
if 'metadata' in load['kwargs']:
pub_load['metadata'] = load['kwargs'].get('metadata')
if 'user' in load:
log.info(
'User {user} Published command {fun} with jid {jid}'.format(
**load
)
)
pub_load['user'] = load['user']
else:
log.info(
'Published command {fun} with jid {jid}'.format(
**load
)
)
log.debug('Published command details {0}'.format(pub_load))
return {'ret': {
'jid': load['jid'],
'minions': minions
},
'pub': pub_load
}
| 39.253547
| 173
| 0.49604
| 6,893
| 63,630
| 4.476861
| 0.098796
| 0.036553
| 0.027577
| 0.032373
| 0.547879
| 0.500891
| 0.476717
| 0.438413
| 0.407499
| 0.389643
| 0
| 0.002932
| 0.394232
| 63,630
| 1,620
| 174
| 39.277778
| 0.797665
| 0.055603
| 0
| 0.510989
| 0
| 0
| 0.154149
| 0.010159
| 0
| 0
| 0
| 0.000617
| 0
| 0
| null | null | 0.002355
| 0.027473
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7cd4c163b81a2a9f7a9f4fb51454b97b7933bffd
| 1,565
|
py
|
Python
|
dwh_analytic/dags/data_warehouse_prod/schema/dim_process.py
|
dnguyenngoc/analytic
|
d609a93e96e7c546ad3ee3ebd4e13309ddf575f8
|
[
"MIT"
] | null | null | null |
dwh_analytic/dags/data_warehouse_prod/schema/dim_process.py
|
dnguyenngoc/analytic
|
d609a93e96e7c546ad3ee3ebd4e13309ddf575f8
|
[
"MIT"
] | null | null | null |
dwh_analytic/dags/data_warehouse_prod/schema/dim_process.py
|
dnguyenngoc/analytic
|
d609a93e96e7c546ad3ee3ebd4e13309ddf575f8
|
[
"MIT"
] | null | null | null |
resource ='human ad machime'
class DimProcess:
def __init__(
self,
*kwargs,
process_key: int,
module: str,
type: str,
step: str,
sub_step: str,
resource: str = 'human',
):
def step(self):
return ['qc', 'auto_qc', 'apr_qc', 'keyer_input']
def example_data(self):
data = {
'process_key': 1,
'resource': 'human',
'module': 'keyed_data',
'step': 'qc',
'sub_step': None,
'process_key': 2,
'resource': 'machine',
'module': 'keyed_data',
'step': 'transform',
'sub_step': None,
}
class FactDataExtractionModel:
def __init__(
self,
*kwargs,
project_id: str,
document_id: str,
doc_set_id: str,
last_modified_time_key: int,
last_modified_date_key: int,
user_name: str = None,
process_key: int,
field_name: str,
field_value: str = None,
last_modified_timestamp: str
):
self.project_id = project_id
self.document_id = document_id
self.doc_set_id = doc_set_id
self.last_modified_time_key = last_modified_time_key
self.last_modified_date_key = last_modified_date_key
self.user_name = user_name
self.process_key = process_key
self.field_name = field_name
self.field_value = field_value
self.last_modified_timestamp = last_modified_timestamp
| 27.946429
| 62
| 0.548882
| 173
| 1,565
| 4.572254
| 0.271676
| 0.136536
| 0.030341
| 0.072061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001988
| 0.357189
| 1,565
| 56
| 62
| 27.946429
| 0.784294
| 0
| 0
| 0.269231
| 0
| 0
| 0.104725
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7cd6ff8a4443655a42df05eccf62b0e804763fb0
| 2,898
|
py
|
Python
|
service.py
|
Tigge/script.filmtipset-grade
|
a5b438dc478d6ef40f611585e9cd196c2ff49cf6
|
[
"BSD-2-Clause"
] | 1
|
2015-02-19T08:45:57.000Z
|
2015-02-19T08:45:57.000Z
|
service.py
|
Tigge/script.filmtipset-grade
|
a5b438dc478d6ef40f611585e9cd196c2ff49cf6
|
[
"BSD-2-Clause"
] | 1
|
2015-02-01T19:28:17.000Z
|
2015-03-18T22:27:14.000Z
|
service.py
|
Tigge/script.filmtipset-grade
|
a5b438dc478d6ef40f611585e9cd196c2ff49cf6
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2013, Gustav Tiger
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import xbmc
import xbmcaddon
import xbmcgui
import filmtipset
FILMTIPSET_ACCESS_KEY = "7ndg3Q3qwW8dPzbJMrB5Rw"
class XBMCPlayer(xbmc.Player):
def __init__(self, *args):
self.imdb = None
self.time = None
self.time_total = None
def onPlayBackStarted(self):
self.update()
def onPlayBackEnded(self):
self.onDone()
def onPlayBackStopped(self):
self.onDone()
def update(self):
info = self.getVideoInfoTag()
self.imdb = info.getIMDBNumber()
self.time = self.getTime()
self.time_total = self.getTotalTime()
def onDone(self):
print "getTime", self.time
print "getTotalTime", self.time_total
print "imdb", self.imdb
addon = xbmcaddon.Addon(id='script.filmtipset-grade')
key = addon.getSetting("key")
user = addon.getSetting("user")
grader = filmtipset.Filmtipset(FILMTIPSET_ACCESS_KEY, key, user)
movie = grader.get_movie_imdb(self.imdb)
print movie
if movie["grade"]["type"] != "seen":
dialog = xbmcgui.Dialog()
grade = dialog.select("Grade " + movie["orgname"] + " on filmtipset:",
["Skip", "1", "2", "3", "4", "5"])
if grade != 0:
print dialog, grade
print grader.grade(movie["id"], grade)
player = XBMCPlayer()
while(not xbmc.abortRequested):
if player.isPlayingVideo():
player.update()
xbmc.sleep(1000)
| 34.094118
| 82
| 0.670807
| 352
| 2,898
| 5.485795
| 0.463068
| 0.024858
| 0.020197
| 0.023822
| 0.095287
| 0.07043
| 0.07043
| 0.07043
| 0.07043
| 0.07043
| 0
| 0.00866
| 0.242926
| 2,898
| 84
| 83
| 34.5
| 0.871468
| 0.446515
| 0
| 0.046512
| 0
| 0
| 0.080482
| 0.028517
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.093023
| null | null | 0.139535
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7ce16cb7ee2c1e090289468a70fd88401aba8ddc
| 339
|
py
|
Python
|
examples/xml-rpc/echoserver.py
|
keobox/yap101
|
26913da9f61ef3d0d9cb3ef54bbfc451a9ef9de9
|
[
"MIT"
] | null | null | null |
examples/xml-rpc/echoserver.py
|
keobox/yap101
|
26913da9f61ef3d0d9cb3ef54bbfc451a9ef9de9
|
[
"MIT"
] | null | null | null |
examples/xml-rpc/echoserver.py
|
keobox/yap101
|
26913da9f61ef3d0d9cb3ef54bbfc451a9ef9de9
|
[
"MIT"
] | null | null | null |
import SimpleXMLRPCServer as xmls
def echo(msg):
print 'Got', msg
return msg
class echoserver(xmls.SimpleXMLRPCServer):
allow_reuse_address = True
server = echoserver(('127.0.0.1', 8001))
server.register_function(echo, 'echo')
print 'Listening on port 8001'
try:
server.serve_forever()
except:
server.server_close()
| 19.941176
| 42
| 0.728614
| 45
| 339
| 5.377778
| 0.688889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049123
| 0.159292
| 339
| 16
| 43
| 21.1875
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0.112094
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.076923
| null | null | 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7ce32e38118a236e7f22400e28b670e7f2079e82
| 869
|
py
|
Python
|
practice/2008/qualification/C-Fly_swatter/c.py
|
victorWeiFreelancer/CodeJam
|
edb8f921860a35985823cb3dbd3ebec8a8f3c12f
|
[
"MIT"
] | null | null | null |
practice/2008/qualification/C-Fly_swatter/c.py
|
victorWeiFreelancer/CodeJam
|
edb8f921860a35985823cb3dbd3ebec8a8f3c12f
|
[
"MIT"
] | null | null | null |
practice/2008/qualification/C-Fly_swatter/c.py
|
victorWeiFreelancer/CodeJam
|
edb8f921860a35985823cb3dbd3ebec8a8f3c12f
|
[
"MIT"
] | null | null | null |
import sys
sys.dont_write_bytecode = True
def hitP(f, R, t, r, g):
if f>=g/2 :
return 0.0
missArea = 0.0
gridL = g+2*r
nGrids = (R - t) // gridL
missGridSideLength = g - 2*f
print("gridL %.12f; nGrids %d" %(gridL, nGrids) )
indentSquareLength = nGrids*gridL
remain = (R - t) - indentSquareLength
missArea += (nGrids * missGridSideLength)**2
remainMissArea = 0
if remain - 2*r > 2*f
if remain > g+r:
totalArea = R**2 / 4.0
print( "missed a %.12f, total area %.12f" %(missR**2, (R-t)**2) )
return (totalArea - missArea) / (R-t)**2
def main():
numTestCases = int(input())
for i in range(numTestCases):
f, R, t, r, g = list(map(float, input().split()))
p = hitP(f, R, t, r, g)
print( "Case #%d: %.6f" %(i+1, p))
if __name__ == '__main__':
main()
| 25.558824
| 69
| 0.537399
| 127
| 869
| 3.598425
| 0.393701
| 0.030635
| 0.019694
| 0.026258
| 0.050328
| 0.039387
| 0
| 0
| 0
| 0
| 0
| 0.040519
| 0.289988
| 869
| 34
| 70
| 25.558824
| 0.700162
| 0
| 0
| 0
| 0
| 0
| 0.087356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.037037
| null | null | 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
7cec971c07d5ed98dc62f84b80e44472db92d7d3
| 531
|
py
|
Python
|
Uber/validExpression.py
|
Nithanaroy/random_scripts
|
908e539e2b7050a09e03b4fc0d2621b23733d65a
|
[
"MIT"
] | null | null | null |
Uber/validExpression.py
|
Nithanaroy/random_scripts
|
908e539e2b7050a09e03b4fc0d2621b23733d65a
|
[
"MIT"
] | null | null | null |
Uber/validExpression.py
|
Nithanaroy/random_scripts
|
908e539e2b7050a09e03b4fc0d2621b23733d65a
|
[
"MIT"
] | null | null | null |
def main(expr):
openingParams = '({['
closingParams = ')}]'
stack = []
for c in expr:
if c in openingParams:
stack.append(c)
elif c in closingParams:
topOfStack = stack.pop()
openingIndex = openingParams.find(topOfStack)
closingIndex = closingParams.find(c)
if openingIndex is not closingIndex:
return False
if len(stack) == 0:
return True
return False
if __name__ =='__main__':
print main('{(abc})')
| 25.285714
| 57
| 0.551789
| 53
| 531
| 5.377358
| 0.509434
| 0.031579
| 0.091228
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002874
| 0.344633
| 531
| 20
| 58
| 26.55
| 0.816092
| 0
| 0
| 0.111111
| 0
| 0
| 0.039548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
6b052c373e2583931e7668595c831adfd5fed432
| 491
|
py
|
Python
|
read_sensor.py
|
shivupoojar/openfaas-pi
|
5eda501368a1ac321954cb2aaf58be617977bd58
|
[
"Apache-2.0"
] | 1
|
2020-11-24T03:31:26.000Z
|
2020-11-24T03:31:26.000Z
|
read_sensor.py
|
shivupoojar/openfaas-pi
|
5eda501368a1ac321954cb2aaf58be617977bd58
|
[
"Apache-2.0"
] | null | null | null |
read_sensor.py
|
shivupoojar/openfaas-pi
|
5eda501368a1ac321954cb2aaf58be617977bd58
|
[
"Apache-2.0"
] | null | null | null |
import requests
from sense_hat import SenseHat
import smbus
import time
while True:
try:
pressure=0
sense = SenseHat()
pressure = sense.get_pressure()
data = {'pressure':pressure}
print(pressure)
#send http request to sense serverless function with pressure
#data
r=requests.post('http://127.0.0.1:8080/function/sensor',data)
print(r.text)
sense=SenseHat()
sense.show_message(r.text)
except KeyboardInterrupt:
sys.exit()
| 21.347826
| 69
| 0.672098
| 63
| 491
| 5.190476
| 0.571429
| 0.079511
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028947
| 0.226069
| 491
| 22
| 70
| 22.318182
| 0.831579
| 0.130346
| 0
| 0.117647
| 0
| 0
| 0.105882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.235294
| null | null | 0.117647
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
6b2b5f9728064d787e0b3474fa79a57d993dda3b
| 594
|
py
|
Python
|
main.py
|
Meat0Project/ChatBot
|
35ebadc71b100d861f9c9e211e1e751175f47c50
|
[
"MIT"
] | 4
|
2020-10-30T07:46:39.000Z
|
2020-10-30T18:20:57.000Z
|
main.py
|
Meat0Project/ChatBot
|
35ebadc71b100d861f9c9e211e1e751175f47c50
|
[
"MIT"
] | null | null | null |
main.py
|
Meat0Project/ChatBot
|
35ebadc71b100d861f9c9e211e1e751175f47c50
|
[
"MIT"
] | null | null | null |
'''
Made by - Aditya mangal
Purpose - Python mini project
Date - 18 october 2020
'''
from chatterbot import ChatBot
from chatterbot.trainers import ChatterBotCorpusTrainer
form termcolor import cprint
import time
chatbot = ChatBot('Bot')
trainer = ChatterBotCorpusTrainer(chatbot)
trainer.train('chatterbot.corpus.english')
cprint("#" * 50, "magenta")
cprint((f"A Chatot ").center(50), "yellow")
cprint("#" * 50, "magenta")
print('You can exit by type exit\n')
while True:
query = input(">> ")
if 'exit' in query:
exit()
else:
print(chatbot.get_response(query))
| 22.846154
| 55
| 0.69697
| 74
| 594
| 5.581081
| 0.662162
| 0.067797
| 0.072639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024341
| 0.170034
| 594
| 25
| 56
| 23.76
| 0.813387
| 0
| 0
| 0.117647
| 0
| 0
| 0.182711
| 0.049116
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.235294
| null | null | 0.352941
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
860b82a531bcd228b8d28c903681d9b70c4a8b49
| 2,793
|
py
|
Python
|
topology.py
|
destinysky/nsh_sfc
|
290fa49df2880527e0b7844bf3bec4d55c4945a6
|
[
"Apache-2.0"
] | 2
|
2020-10-26T17:22:04.000Z
|
2020-11-11T13:19:08.000Z
|
topology.py
|
destinysky/nsh_sfc
|
290fa49df2880527e0b7844bf3bec4d55c4945a6
|
[
"Apache-2.0"
] | null | null | null |
topology.py
|
destinysky/nsh_sfc
|
290fa49df2880527e0b7844bf3bec4d55c4945a6
|
[
"Apache-2.0"
] | 3
|
2020-03-28T12:53:35.000Z
|
2021-06-29T18:13:43.000Z
|
#!/usr/bin/python
"""
"""
from mininet.net import Mininet
from mininet.node import Controller, RemoteController, OVSKernelSwitch,UserSwitch
#OVSLegacyKernelSwitch, UserSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.link import Link, TCLink
#conf_port=50000
conf_ip_1='10.0.0.254'
conf_mac_1='11:12:13:14:15:16'
def topology():
"Create a network."
net = Mininet( controller=RemoteController, link=TCLink, switch=OVSKernelSwitch )
print "*** Creating nodes"
h1 = net.addHost( 'h1', mac='00:00:00:00:00:01', ip='10.0.0.1/24' )
h2 = net.addHost( 'h2', mac='00:00:00:00:00:02', ip='10.0.0.2/24' )
h3 = net.addHost( 'h3', mac='00:00:00:00:00:03', ip='10.0.0.3/24' )
h4 = net.addHost( 'h4', mac='00:00:00:00:00:04', ip='10.0.0.4/24' )
h5 = net.addHost( 'h5', mac='00:00:00:00:00:05', ip='10.0.0.5/24' )
s1 = net.addSwitch( 's1', listenPort=6671 )
s2 = net.addSwitch( 's2', listenPort=6672 )
s3 = net.addSwitch( 's3', listenPort=6673 )
s4 = net.addSwitch( 's4', listenPort=6674 )
s5 = net.addSwitch( 's5', listenPort=6675 )
c1 = net.addController( 'c1', controller=RemoteController, ip='127.0.0.1', port=6633 )
print "*** Creating links"
net.addLink(s1, h1)
net.addLink(s2, h2)
net.addLink(s3, h3)
net.addLink(s4, h4)
net.addLink(s5, h5)
net.addLink(s1, s2)
net.addLink(s2, s3)
net.addLink(s3, s4)
net.addLink(s4, s5)
print "*** Starting network"
net.build()
h1.cmd('ip route add '+conf_ip_1+'/32 dev h1-eth0')
h1.cmd('sudo arp -i h1-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h1.cmd('sysctl -w net.ipv4.ip_forward=1')
h1.cmd('python3 listen.py &')
h2.cmd('ip route add '+conf_ip_1+'/32 dev h2-eth0')
h2.cmd('sudo arp -i h2-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h2.cmd('sysctl -w net.ipv4.ip_forward=1')
h2.cmd('python3 listen.py &')
h3.cmd('ip route add '+conf_ip_1+'/32 dev h3-eth0')
h3.cmd('sudo arp -i h3-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h3.cmd('sysctl -w net.ipv4.ip_forward=1')
h3.cmd('python3 listen.py &')
h4.cmd('ip route add '+conf_ip_1+'/32 dev h4-eth0')
h4.cmd('sudo arp -i h4-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h4.cmd('sysctl -w net.ipv4.ip_forward=1')
h4.cmd('python3 listen.py &')
h5.cmd('ip route add '+conf_ip_1+'/32 dev h5-eth0')
h5.cmd('sudo arp -i h5-eth0 -s '+conf_ip_1+' '+conf_mac_1)
h5.cmd('sysctl -w net.ipv4.ip_forward=1')
h5.cmd('python3 listen.py &')
c1.start()
s1.start( [c1] )
s2.start( [c1] )
s3.start( [c1] )
s4.start( [c1] )
s5.start( [c1] )
print "*** Running CLI"
CLI( net )
print "*** Stopping network"
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
topology()
| 30.358696
| 90
| 0.617257
| 474
| 2,793
| 3.535865
| 0.219409
| 0.047733
| 0.053699
| 0.047733
| 0.25358
| 0.25358
| 0.214797
| 0.214797
| 0.074582
| 0
| 0
| 0.125
| 0.192266
| 2,793
| 92
| 91
| 30.358696
| 0.617908
| 0.022914
| 0
| 0
| 0
| 0
| 0.304412
| 0.038603
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.074627
| null | null | 0.074627
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
862625f0bd5d6882a14a812018126e427778e14a
| 11,603
|
py
|
Python
|
build/lib.linux-x86_64-2.7_ucs4/mx/Misc/PackageTools.py
|
mkubux/egenix-mx-base
|
3e6f9186334d9d73743b0219ae857564c7208247
|
[
"eGenix"
] | null | null | null |
build/lib.linux-x86_64-2.7_ucs4/mx/Misc/PackageTools.py
|
mkubux/egenix-mx-base
|
3e6f9186334d9d73743b0219ae857564c7208247
|
[
"eGenix"
] | null | null | null |
build/lib.linux-x86_64-2.7_ucs4/mx/Misc/PackageTools.py
|
mkubux/egenix-mx-base
|
3e6f9186334d9d73743b0219ae857564c7208247
|
[
"eGenix"
] | null | null | null |
""" PackageTools - A set of tools to aid working with packages.
Copyright (c) 1998-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2015, eGenix.com Software GmbH; mailto:info@egenix.com
See the documentation for further information on copyrights,
or contact the author. All Rights Reserved.
"""
__version__ = '0.4.0'
import os,types,sys,re,imp,__builtin__
import mx.Tools.NewBuiltins
# RE to identify Python modules
suffixes = projection(imp.get_suffixes(),0)
module_name = re.compile('(.*)(' + '|'.join(suffixes) + ')$')
initmodule_name = re.compile('__init__(' + '|'.join(suffixes) + ')$')
initmodule_names = []
for suffix in suffixes:
initmodule_names.append('__init__' + suffix)
def find_packages(dir=os.curdir, files_only=0, recursive=0, ignore_modules=0,
pkgbasename='', pkgdict=None,
isdir=os.path.isdir,exists=os.path.exists,
isfile=os.path.isfile,join=os.path.join,listdir=os.listdir,
module_name=module_name,initmodule_name=initmodule_name):
""" Return a list of package names found in dir.
Packages are Python modules and subdirectories that provide an
__init__ module. The .py extension is removed from the
files. The __init__ modules are not considered being seperate
packages.
If files_only is true, only Python files are included in the
search (subdirectories are *not* taken into account). If
ignore_modules is true (default is false), modules are
ignored. If recursive is true the search recurses into package
directories.
pkgbasename and pkgdict are only used during recursion.
"""
l = listdir(dir)
if pkgdict is None:
pkgdict = {}
if files_only:
for filename in l:
m = module_name.match(filename)
if m is not None and \
m.group(1) != '__init__':
pkgdict[pkgbasename + m.group(1)] = 1
else:
for filename in l:
path = join(dir, filename)
if isdir(path):
# Check for __init__ module(s)
for name in initmodule_names:
if isfile(join(path, name)):
pkgname = pkgbasename + filename
pkgdict[pkgname] = 1
if recursive:
find_packages(path,
recursive=1,
pkgbasename=pkgname + '.',
pkgdict=pkgdict)
break
elif not ignore_modules:
m = module_name.match(filename)
if m is not None and \
m.group(1) != '__init__':
pkgdict[pkgbasename + m.group(1)] = 1
return pkgdict.keys()
def find_subpackages(package, recursive=0,
splitpath=os.path.split):
""" Assuming that package points to a loaded package module, this
function tries to identify all subpackages of that package.
Subpackages are all Python files included in the same
directory as the module plus all subdirectories having an
__init__.py file. The modules name is prepended to all
subpackage names.
The module location is found by looking at the __file__
attribute that non-builtin modules define. The function uses
the __all__ attribute from the package __init__ module if
available.
If recursive is true (default is false), then subpackages of
subpackages are recursively also included in the search.
"""
if not recursive:
# Try the __all__ attribute...
try:
subpackages = list(package.__all__)
except (ImportError, AttributeError):
# Did not work, then let's try to find the subpackages by looking
# at the directory where package lives...
subpackages = find_packages(package.__path__[0], recursive=recursive)
else:
# XXX Recursive search does not support the __all__ attribute
subpackages = find_packages(package.__path__[0], recursive=recursive)
basename = package.__name__ + '.'
for i,name in irange(subpackages):
subpackages[i] = basename + name
return subpackages
def _thismodule(upcount=1,
exc_info=sys.exc_info,trange=trange):
""" Returns the module object that the callee is calling from.
upcount can be given to indicate how far up the execution
stack the function is supposed to look (1 == direct callee, 2
== callee of callee, etc.).
"""
try:
1/0
except:
frame = exc_info()[2].tb_frame
for i in trange(upcount):
frame = frame.f_back
name = frame.f_globals['__name__']
del frame
return sys.modules[name]
def _module_loader(name, locals, globals, sysmods, errors='strict',
importer=__import__, reloader=reload, from_list=['*']):
""" Internal API for loading a module
"""
if not sysmods.has_key(name):
is_new = 1
else:
is_new = 0
try:
mod = importer(name, locals, globals, from_list)
if reload and not is_new:
mod = reloader(mod)
except KeyboardInterrupt:
# Pass through; SystemExit will be handled by the error handler
raise
except Exception, why:
if errors == 'ignore':
pass
elif errors == 'strict':
raise
elif callable(errors):
errors(name, sys.exc_info()[0], sys.exc_info()[1])
else:
raise ValueError,'unknown errors value'
else:
return mod
return None
def import_modules(modnames,module=None,errors='strict',reload=0,
thismodule=_thismodule):
""" Import all modules given in modnames into module.
module defaults to the caller's module. modnames may contain
dotted package names.
If errors is 'strict' (default), then ImportErrors and
SyntaxErrors are raised. If set to 'ignore', they are silently
ignored. If errors is a callable object, then it is called
with arguments (modname, errorclass, errorvalue). If the
handler returns, processing continues.
If reload is true (default is false), all already modules
among the list will be forced to reload.
"""
if module is None:
module = _thismodule(2)
locals = module.__dict__
sysmods = sys.modules
for name in modnames:
mod = _module_loader(name, locals, locals, sysmods, errors=errors)
if mod is not None:
locals[name] = mod
def load_modules(modnames,locals=None,globals=None,errors='strict',reload=0):
""" Imports all modules in modnames using the given namespaces and returns
list of corresponding module objects.
If errors is 'strict' (default), then ImportErrors and
SyntaxErrors are raised. If set to 'ignore', they are silently
ignored. If errors is a callable object, then it is called
with arguments (modname, errorclass, errorvalue). If the
handler returns, processing continues.
If reload is true (default is false), all already modules
among the list will be forced to reload.
"""
modules = []
append = modules.append
sysmods = sys.modules
for name in modnames:
mod = _module_loader(name, locals, globals, sysmods, errors=errors)
if mod is not None:
append(mod)
return modules
def import_subpackages(module, reload=0, recursive=0,
import_modules=import_modules,
find_subpackages=find_subpackages):
""" Does a subpackages scan using find_subpackages(module) and then
imports all submodules found into module.
The module location is found by looking at the __file__
attribute that non-builtin modules define. The function uses
the __all__ attribute from the package __init__ module if
available.
If reload is true (default is false), all already modules
among the list will be forced to reload.
"""
import_modules(find_subpackages(module, recursive=recursive),
module, reload=reload)
def load_subpackages(module, locals=None, globals=None, errors='strict', reload=0,
recursive=0,
load_modules=load_modules,
find_subpackages=find_subpackages):
""" Same as import_subpackages but with load_modules
functionality, i.e. imports the modules and also returns a list of
module objects.
If errors is 'strict' (default), then ImportErrors are
raised. If set to 'ignore', they are silently ignored.
If reload is true (default is false), all already modules
among the list will be forced to reload.
"""
return load_modules(find_subpackages(module, recursive=recursive),
locals, globals,
errors=errors, reload=reload)
def modules(names,
extract=extract):
""" Converts a list of module names into a list of module objects.
The modules must already be loaded.
"""
return extract(sys.modules, names)
def package_modules(pkgname):
""" Returns a list of all modules belonging to the package with the
given name.
The package must already be loaded. Only the currently
registered modules are included in the list.
"""
match = pkgname + '.'
match_len = len(match)
mods = [sys.modules[pkgname]]
for k,v in sys.modules.items():
if k[:match_len] == match and v is not None:
mods.append(v)
return mods
def find_classes(mods,baseclass=None,annotated=0,
ClassType=types.ClassType,issubclass=issubclass):
""" Find all subclasses of baseclass or simply all classes (if baseclass
is None) defined by the module objects in list mods.
If annotated is true the returned list will contain tuples
(module_object,name,class_object) for each class found where
module_object is the module where the class is defined.
"""
classes = []
for mod in mods:
for name,obj in mod.__dict__.items():
if type(obj) is ClassType:
if baseclass and not issubclass(obj,baseclass):
continue
if annotated:
classes.append((mod, name, obj))
else:
classes.append(obj)
return classes
def find_instances(mods,baseclass,annotated=0,
InstanceType=types.InstanceType,issubclass=issubclass):
""" Find all instances of baseclass defined by the module objects
in list mods.
If annotated is true the returned list will contain tuples
(module_object,name,instances_object) for each instances found where
module_object is the module where the instances is defined.
"""
instances = []
for mod in mods:
for name,obj in mod.__dict__.items():
if isinstance(obj,baseclass):
if annotated:
instances.append((mod,name,obj))
else:
instances.append(obj)
return instances
| 35.375
| 82
| 0.613031
| 1,395
| 11,603
| 4.967025
| 0.21147
| 0.008659
| 0.011257
| 0.012989
| 0.340597
| 0.311156
| 0.297878
| 0.290807
| 0.254438
| 0.233223
| 0
| 0.00666
| 0.314143
| 11,603
| 327
| 83
| 35.48318
| 0.864036
| 0.026976
| 0
| 0.242038
| 0
| 0
| 0.018765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.006369
| 0.057325
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8630b3c80464d13f544a914873b82ed141f94bf1
| 9,098
|
py
|
Python
|
qstklearn/1knn.py
|
elxavicio/QSTK
|
4981506c37227a72404229d5e1e0887f797a5d57
|
[
"BSD-3-Clause"
] | 339
|
2015-01-01T10:06:49.000Z
|
2022-03-23T23:32:24.000Z
|
QSTK/qstklearn/1knn.py
|
jenniyanjie/QuantSoftwareToolkit
|
0eb2c7a776c259a087fdcac1d3ff883eb0b5516c
|
[
"BSD-3-Clause"
] | 19
|
2015-01-04T13:12:33.000Z
|
2021-07-19T11:13:47.000Z
|
QSTK/qstklearn/1knn.py
|
jenniyanjie/QuantSoftwareToolkit
|
0eb2c7a776c259a087fdcac1d3ff883eb0b5516c
|
[
"BSD-3-Clause"
] | 154
|
2015-01-30T09:41:15.000Z
|
2022-03-19T02:27:59.000Z
|
'''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Feb 20, 2011
@author: John Cornwell
@organization: Georgia Institute of Technology
@contact: JohnWCornwellV@gmail.com
@summary: This is an implementation of the 1-KNN algorithm for ranking features quickly.
It uses the knn implementation.
@status: oneKNN functions correctly, optimized to use n^2/2 algorithm.
'''
import matplotlib.pyplot as plt
from pylab import gca
import itertools
import string
import numpy as np
import math
import knn
from time import clock
'''
@summary: Query function for 1KNN, return value is a double between 0 and 1.
@param naData: A 2D numpy array. Each row is a data point with the final column containing the classification.
'''
def oneKnn( naData ):
if naData.ndim != 2:
raise Exception( "Data should have two dimensions" )
lLen = naData.shape[0]
''' # of dimensions, subtract one for classification '''
lDim = naData.shape[1] - 1
''' Start best distances as very large '''
ldDistances = [1E300] * lLen
llIndexes = [-1] * lLen
dDistance = 0.0;
''' Loop through finding closest neighbors '''
for i in range( lLen ):
for j in range( i+1, lLen ):
dDistance = 0.0
for k in range( 0, lDim ):
dDistance += (naData[i][k] - naData[j][k])**2
dDistance = math.sqrt( dDistance )
''' Two distances to check, for i's best, and j's best '''
if dDistance < ldDistances[i]:
ldDistances[i] = dDistance
llIndexes[i] = j
if dDistance < ldDistances[j]:
ldDistances[j] = dDistance
llIndexes[j] = i
lCount = 0
''' Now count # of matching pairs '''
for i in range( lLen ):
if naData[i][-1] == naData[ llIndexes[i] ][-1]:
lCount = lCount + 1
return float(lCount) / lLen
''' Test function to plot results '''
def _plotResults( naDist1, naDist2, lfOneKnn, lf5Knn ):
plt.clf()
plt.subplot(311)
plt.scatter( naDist1[:,0], naDist1[:,1] )
plt.scatter( naDist2[:,0], naDist2[:,1], color='r' )
#plt.ylabel( 'Feature 2' )
#plt.xlabel( 'Feature 1' )
#gca().annotate( '', xy=( .8, 0 ), xytext=( -.3 , 0 ), arrowprops=dict(facecolor='red', shrink=0.05) )
gca().annotate( '', xy=( .7, 0 ), xytext=( 1.5 , 0 ), arrowprops=dict(facecolor='black', shrink=0.05) )
plt.title( 'Data Distribution' )
plt.subplot(312)
plt.plot( range( len(lfOneKnn) ), lfOneKnn )
plt.ylabel( '1-KNN Value' )
#plt.xlabel( 'Distribution Merge' )
plt.title( '1-KNN Performance' )
plt.subplot(313)
plt.plot( range( len(lf5Knn) ), lf5Knn )
plt.ylabel( '% Correct Classification' )
#plt.xlabel( 'Distribution Merge' )
plt.title( '5-KNN Performance' )
plt.subplots_adjust()
plt.show()
''' Function to plot 2 distributions '''
def _plotDist( naDist1, naDist2, i ):
plt.clf()
plt.scatter( naDist1[:,0], naDist1[:,1] )
plt.scatter( naDist2[:,0], naDist2[:,1], color='r' )
plt.ylabel( 'Feature 2' )
plt.xlabel( 'Feature 1' )
plt.title( 'Iteration ' + str(i) )
plt.show()
''' Function to test KNN performance '''
def _knnResult( naData ):
''' Split up data into training/testing '''
lSplit = naData.shape[0] * .7
naTrain = naData[:lSplit, :]
naTest = naData[lSplit:, :]
knn.addEvidence( naTrain.astype(float), 1 );
''' Query with last column omitted and 5 nearest neighbors '''
naResults = knn.query( naTest[:,:-1], 5, 'mode')
''' Count returns which are correct '''
lCount = 0
for i, dVal in enumerate(naResults):
if dVal == naTest[i,-1]:
lCount = lCount + 1
dResult = float(lCount) / naResults.size
return dResult
''' Tests performance of 1-KNN '''
def _test1():
''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance '''
for i in range(3):
''' Select one of three distributions '''
if i == 0:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[500,2] )
naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) )
elif i == 1:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.1,size=[500,2] )
naTest2 = np.hstack( (naTest2, np.ones(500).reshape(-1,1) ) )
else:
naTest1 = np.random.normal( loc=[0,0],scale=.25,size=[500,2] )
naTest1 = np.hstack( (naTest1, np.zeros(500).reshape(-1,1) ) )
naTest2 = np.random.normal( loc=[1.5,0],scale=.25,size=[250,2] )
naTest2 = np.hstack( (naTest2, np.ones(250).reshape(-1,1) ) )
naOrig = np.vstack( (naTest1, naTest2) )
naBoth = np.vstack( (naTest1, naTest2) )
''' Keep track of runtimes '''
t = clock()
cOneRuntime = t-t;
cKnnRuntime = t-t;
lfResults = []
lfKnnResults = []
for i in range( 15 ):
#_plotDist( naTest1, naBoth[100:,:], i )
t = clock()
lfResults.append( oneKnn( naBoth ) )
cOneRuntime = cOneRuntime + (clock() - t)
t = clock()
lfKnnResults.append( _knnResult( np.random.permutation(naBoth) ) )
cKnnRuntime = cKnnRuntime + (clock() - t)
naBoth[500:,0] = naBoth[500:,0] - .1
print 'Runtime OneKnn:', cOneRuntime
print 'Runtime 5-KNN:', cKnnRuntime
_plotResults( naTest1, naTest2, lfResults, lfKnnResults )
''' Tests performance of 1-KNN '''
def _test2():
''' Generate three random samples to show the value of 1-KNN compared to 5KNN learner performance '''
np.random.seed( 12345 )
''' Create 5 distributions for each of the 5 attributes '''
dist1 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist2 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist3 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist4 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
dist5 = np.random.uniform( -1, 1, 1000 ).reshape( -1, 1 )
lDists = [ dist1, dist2, dist3, dist4, dist5 ]
''' All features used except for distribution 4 '''
distY = np.sin( dist1 ) + np.sin( dist2 ) + np.sin( dist3 ) + np.sin( dist5 )
distY = distY.reshape( -1, 1 )
for i, fVal in enumerate( distY ):
if fVal >= 0:
distY[i] = 1
else:
distY[i] = 0
for i in range( 1, 6 ):
lsNames = []
lf1Vals = []
lfVals = []
for perm in itertools.combinations( '12345', i ):
''' set test distribution to first element '''
naTest = lDists[ int(perm[0]) - 1 ]
sPerm = perm[0]
''' stack other distributions on '''
for j in range( 1, len(perm) ):
sPerm = sPerm + str(perm[j])
naTest = np.hstack( (naTest, lDists[ int(perm[j]) - 1 ] ) )
''' finally stack y values '''
naTest = np.hstack( (naTest, distY) )
lf1Vals.append( oneKnn( naTest ) )
lfVals.append( _knnResult( np.random.permutation(naTest) ) )
lsNames.append( sPerm )
''' Plot results '''
plt1 = plt.bar( np.arange(len(lf1Vals)), lf1Vals, .2, color='r' )
plt2 = plt.bar( np.arange(len(lfVals)) + 0.2, lfVals, .2, color='b' )
plt.legend( (plt1[0], plt2[0]), ('1-KNN', 'KNN, K=5') )
plt.ylabel('1-KNN Value/KNN Classification')
plt.xlabel('Feature Set')
plt.title('Combinations of ' + str(i) + ' Features')
plt.ylim( (0,1) )
if len(lf1Vals) < 2:
plt.xlim( (-1,1) )
gca().xaxis.set_ticks( np.arange(len(lf1Vals)) + .2 )
gca().xaxis.set_ticklabels( lsNames )
plt.show()
if __name__ == '__main__':
_test1()
#_test2()
| 31.811189
| 112
| 0.523522
| 1,069
| 9,098
| 4.434051
| 0.282507
| 0.008017
| 0.022785
| 0.021519
| 0.277004
| 0.228481
| 0.203587
| 0.197468
| 0.197468
| 0.165823
| 0
| 0.056915
| 0.339525
| 9,098
| 285
| 113
| 31.922807
| 0.731902
| 0.029677
| 0
| 0.189189
| 0
| 0
| 0.041518
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.054054
| null | null | 0.013514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
86609708c6740fc5dcff69f746034012abb3d227
| 1,112
|
py
|
Python
|
01_basics/01_building_expressions/02_vector_mat_soln.py
|
johny-c/theano_exercises
|
7fd43315bf7c475a6f218091316c0bd34e0688c4
|
[
"BSD-3-Clause"
] | 711
|
2015-01-10T05:39:21.000Z
|
2022-03-15T23:45:45.000Z
|
01_basics/01_building_expressions/02_vector_mat_soln.py
|
rsingh2083/theano_exercises
|
7fd43315bf7c475a6f218091316c0bd34e0688c4
|
[
"BSD-3-Clause"
] | 2
|
2016-06-13T06:46:58.000Z
|
2017-04-14T08:21:20.000Z
|
01_basics/01_building_expressions/02_vector_mat_soln.py
|
rsingh2083/theano_exercises
|
7fd43315bf7c475a6f218091316c0bd34e0688c4
|
[
"BSD-3-Clause"
] | 371
|
2015-01-16T01:31:41.000Z
|
2022-03-15T11:37:30.000Z
|
import numpy as np
from theano import function
import theano.tensor as T
def make_vector():
"""
Returns a new Theano vector.
"""
return T.vector()
def make_matrix():
"""
Returns a new Theano matrix.
"""
return T.matrix()
def elemwise_mul(a, b):
"""
a: A theano matrix
b: A theano matrix
Returns the elementwise product of a and b
"""
return a * b
def matrix_vector_mul(a, b):
"""
a: A theano matrix
b: A theano vector
Returns the matrix-vector product of a and b
"""
return T.dot(a, b)
if __name__ == "__main__":
a = make_vector()
b = make_vector()
c = elemwise_mul(a, b)
d = make_matrix()
e = matrix_vector_mul(d, c)
f = function([a, b, d], e)
rng = np.random.RandomState([1, 2, 3])
a_value = rng.randn(5).astype(a.dtype)
b_value = rng.rand(5).astype(b.dtype)
c_value = a_value * b_value
d_value = rng.randn(5, 5).astype(d.dtype)
expected = np.dot(d_value, c_value)
actual = f(a_value, b_value, d_value)
assert np.allclose(actual, expected)
print "SUCCESS!"
| 19.508772
| 48
| 0.607014
| 176
| 1,112
| 3.664773
| 0.289773
| 0.018605
| 0.023256
| 0.052713
| 0.20155
| 0.20155
| 0.083721
| 0.083721
| 0.083721
| 0.083721
| 0
| 0.008589
| 0.267086
| 1,112
| 56
| 49
| 19.857143
| 0.782822
| 0
| 0
| 0
| 0
| 0
| 0.019656
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0
| null | null | 0
| 0.111111
| null | null | 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8671b6a372caa3589eb77dcc566a9b3713aa80a9
| 6,499
|
py
|
Python
|
genlist.py
|
truckli/technotes
|
11d3cc0a1bd33141a22eaa2247cac1be1d74718a
|
[
"Apache-2.0"
] | null | null | null |
genlist.py
|
truckli/technotes
|
11d3cc0a1bd33141a22eaa2247cac1be1d74718a
|
[
"Apache-2.0"
] | null | null | null |
genlist.py
|
truckli/technotes
|
11d3cc0a1bd33141a22eaa2247cac1be1d74718a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import shutil, re, os, sys
file_model = "Model.template"
bookname = "TechNotes"
file_bibtex = "thebib.bib"
folder_target = "../pdf/"
#if name is a chapter, return its sections
def get_sections(name):
if not os.path.isdir(name):
return []
files = os.listdir(name)
sections = []
for section in files:
if re.match('.*\.tex$', section) and not re.match(".*lmz0610.*", section):
sections.append(name + "/" + section)
return sections
def is_updated(pdffile, texfiles):
def depend_modified(fname, ims):
depend_mtime = os.path.getmtime(fname)
if depend_mtime > ims:
print pdffile, ' mtime: ',ims
print fname, ' mtime: ', depend_mtime
return True
return False
old_pdffile = folder_target + pdffile
if not os.path.isfile(old_pdffile):
return False
pdf_mtime = os.path.getmtime(old_pdffile)
#if depend_modified(sys.argv[0], pdf_mtime):
#return False
#if depend_modified(file_model, pdf_mtime):
#return False
for section in texfiles:
if depend_modified(section, pdf_mtime):
return False
return True
def remove_tmp(tmpname):
if os.path.isfile(tmpname):
os.remove(tmpname)
def remove_latex_tmps(texname):
remove_tmp(texname + ".pdf")
remove_tmp(texname + ".tex")
remove_tmp(texname + ".blg")
remove_tmp(texname + ".bbl")
remove_tmp(texname + ".out")
remove_tmp(texname + ".toc")
remove_tmp(texname + ".aux")
remove_tmp(texname + ".idx")
remove_tmp(texname + ".log")
remove_tmp(texname + ".lof")
remove_tmp(texname + ".lot")
def read_bbl_file(object_name):
file_bbl = object_name + ".bbl"
if not os.path.isfile(file_bbl):
return ""
with open(file_bbl, 'r') as f:
return f.read()
#if depend_files contains citation
def need_bibtex(object_name, depend_files):
#if a file contains latex citation command \cite{}
def contain_citation(section_name):
with open(section_name, "r") as f:
content_section = f.read()
if content_section.find("\\cite{") == -1:
return False
return True
for section in depend_files:
if contain_citation(section):
return True
return False
def gen_pdf(object_name):
object_pdf = object_name + ".pdf"
if object_name == bookname:
depend_files = book_sections
targets = [folder_target + object_pdf, folder_target + "AAAAAAAAAAA.pdf"]
chapter_start_counter = 0
else:
depend_files = chap_sections[object_name]
targets = [folder_target + object_pdf]
chapter_start_counter = book_chapters.index(object_name)
# if is_updated(object_pdf, depend_files):
# print(object_pdf + " is updated")
# return False
obj_need_bibtex = need_bibtex(object_name, depend_files)
model = ''
with open(file_model) as model_file:
model = model_file.read()
model = model.replace("OBJECTNAME", object_name)
if object_name == 'Report':
model = model.replace("CHAPTERSTART", "0")
model = model.replace("\\tableofcontents", "%\\tableofcontents")
model = model.replace("ctexrep", "ctexart")
model = model.replace("\\setcounter{chapter}", "%\\setcounter{chapter}")
else:
model = model.replace("CHAPTERSTART", str(chapter_start_counter))
insert_word = "TOADD"
insert_pos = model.find(insert_word)
latex_text = model[:insert_pos] + insert_word
for section in depend_files:
latex_text = latex_text + "\n\\input{"+ section + "}"
#prepend text encoding mode line
section_text = ""
with open(section, 'r') as f:
line = f.readline()
if line[:6] != '%!Mode':
section_text = '%!Mode:: "TeX:UTF-8"\n' + line + f.read()
if section_text != "":
with open(section, 'w') as f:
f.write(section_text)
if obj_need_bibtex:
latex_text = latex_text + "\n\n"
latex_text = latex_text + "\\bibliographystyle{unsrt}\n"
latex_text = latex_text + "\\bibliography{thebib}\n"
latex_text = latex_text + model[insert_pos+len(insert_word):]
object_tex = object_name + ".tex"
with open(object_tex, "w") as f:
f.write(latex_text)
# os.system("xelatex " + object_name)
# if len(sys.argv) < 3 or sys.argv[2] != "fast":
# if obj_need_bibtex:
# old_bbl = read_bbl_file(object_name)
# os.system("bibtex " + object_name)
# if old_bbl != read_bbl_file(object_name):
# os.system("xelatex " + object_name)
# os.system("xelatex " + object_name)
#
# if os.path.isfile(object_pdf):
# for target in targets:
# shutil.copy(object_pdf, target)
return True
#trim trailing slash
def trim_chap_name(name):
if name[len(name) - 1] == '/':
name = name[:len(name)-1]
return name
def merge_chapter_pdfs():
mergecmd = 'pdftk '
for chap in book_chapters:
chappdf = folder_target + chap + '.pdf'
if os.path.isfile(chappdf):
mergecmd += chappdf + ' '
mergecmd += 'cat output ' + folder_target + 'AAABBBBBBBB.pdf'
print mergecmd
os.system(mergecmd)
##################################################
#now work starts
files = os.listdir('.')
chap_sections = {}
book_sections = []
book_chapters = []
for chap in files:
sections = get_sections(chap)
if len(sections):
chap_sections[chap] = sections
book_sections.extend(sections)
book_chapters.append(chap)
cmd = "one"
if cmd == "one":
gen_pdf(bookname)
elif cmd == "all":
modified = False
for chap in chap_sections:
modified = gen_pdf(chap) or modified
if modified:
merge_chapter_pdfs()
elif cmd == "clean":
for chap in chap_sections:
remove_latex_tmps(chap)
remove_latex_tmps(bookname)
else:
chap = trim_chap_name(cmd)
if chap in book_sections:
#chap is actually a section
section = chap
chap = 'Report'
chap_sections[chap] = [section]
book_chapters.append(chap)
if not chap_sections.has_key(chap):
print(chap + " is not a valid chapter name")
sys.exit(1)
modified = gen_pdf(chap)
if modified and chap != 'Report':
merge_chapter_pdfs()
| 27.892704
| 82
| 0.610248
| 810
| 6,499
| 4.687654
| 0.201235
| 0.047406
| 0.046352
| 0.023703
| 0.1596
| 0.053463
| 0.029497
| 0.018436
| 0.018436
| 0
| 0
| 0.003137
| 0.264348
| 6,499
| 232
| 83
| 28.012931
| 0.791048
| 0.142791
| 0
| 0.135484
| 0
| 0
| 0.088342
| 0.017304
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.006452
| null | null | 0.025806
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8674487bc14ab6d974246602ccaa1b9927159028
| 4,724
|
py
|
Python
|
rr_ml/nodes/end_to_end/train.py
|
ebretl/roboracing-software
|
8803c97a885500069d04e70894b19f807ae5baf9
|
[
"MIT"
] | null | null | null |
rr_ml/nodes/end_to_end/train.py
|
ebretl/roboracing-software
|
8803c97a885500069d04e70894b19f807ae5baf9
|
[
"MIT"
] | null | null | null |
rr_ml/nodes/end_to_end/train.py
|
ebretl/roboracing-software
|
8803c97a885500069d04e70894b19f807ae5baf9
|
[
"MIT"
] | null | null | null |
import os
import math
import string
import numpy as np
import rospy
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, \
GaussianNoise, BatchNormalization
import cv2
import collections
import random
import time
from example_set import ExampleSet
from params import input_shape, expand_categories
n_examples_to_load = 8000 # if the number of training examples is below this, load more data
batch_size = 16
categories = [None]
def defineCategory(steering):
differences = [abs(steering - category) for category in categories]
category = np.argmin(differences)
oneHot = [1 if i == category else 0 for i in range(len(categories))]
return oneHot
def format_inputs(examples):
data2 = np.zeros((len(examples),) + input_shape, dtype='float32')
for i, ex in enumerate(examples):
data2[i] = ex.get_image()
data2 /= 255.0
return data2
def make_model():
model = Sequential()
# 128 x 48
model.add(GaussianNoise(0.05, input_shape=input_shape))
model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((4, 4)))
# 32 x 12
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(MaxPooling2D((2, 2)))
# 16 x 6
model.add(Flatten())
model.add(Dropout(0.25))
model.add(Dense(128, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.35))
model.add(Dense(len(categories), activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
return model
def main():
global categories, n_examples_to_load, batch_size
rospy.init_node("nn_training")
startTime = time.time()
model_path = rospy.get_param("~model_output_path")
exampleSetDir = rospy.get_param("~example_set_dir")
epochs = int(rospy.get_param("~epochs"))
categories = rospy.get_param("~positive_nonzero_categories")
categories = string.strip(categories).split(" ")
categories = [float(x) for x in categories]
categories = expand_categories(categories)
model = make_model()
model.summary()
exampleSetFiles_const = tuple(f for f in os.listdir(exampleSetDir) if '.pkl.lz4' in f)
n_training_examples = 0
n_test_examples = 0
cnt = collections.Counter()
for f in exampleSetFiles_const:
data = ExampleSet.load(os.path.join(exampleSetDir, f))
n_training_examples += len(data.train)
n_test_examples += len(data.test)
for ex in data.train:
i = np.argmax(defineCategory(ex.angle))
cnt[i] += 1
print "total training examples:", n_training_examples
print "training label counts:", cnt
def batch_generator(isValidation = False):
gen_epochs = 1 if isValidation else epochs
for epoch in range(gen_epochs):
exampleSetFiles = list(exampleSetFiles_const)
random.shuffle(exampleSetFiles)
while len(exampleSetFiles) > 0:
D = []
while len(exampleSetFiles) > 0 and len(D) < n_examples_to_load:
data = ExampleSet.load(os.path.join(exampleSetDir, exampleSetFiles.pop()))
D += data.test if isValidation else data.train
if not isValidation: random.shuffle(D)
X = format_inputs(D)
# create output bins
labels = np.array([defineCategory(ex.angle) for ex in D])
if not isValidation:
for i in range(len(X)):
if random.random() < 0.4: # 40% of images are flipped
X[i] = cv2.flip(X[i], 1)
labels[i] = labels[i][::-1]
for i in range(0, len(X), batch_size):
xs = X[i: i + batch_size]
ys = labels[i: i + batch_size]
yield (xs, ys)
try:
n_minibatches = int(math.ceil(float(n_training_examples) / batch_size))
model.fit_generator(batch_generator(),
steps_per_epoch=n_minibatches,
epochs=epochs,
verbose=1)
print "elapsed time:", time.time() - startTime
n_minibatches = int(math.ceil(float(n_test_examples) / batch_size))
loss, acc = model.evaluate_generator(batch_generator(True), steps=n_minibatches)
print "validation loss:", loss, "| validation accuracy:", acc
finally:
model.save(model_path)
print "\nsaved model to", model_path
| 33.503546
| 94
| 0.622777
| 583
| 4,724
| 4.919383
| 0.319039
| 0.030683
| 0.018131
| 0.01569
| 0.108787
| 0.08159
| 0.08159
| 0.032775
| 0.032775
| 0
| 0
| 0.022932
| 0.270745
| 4,724
| 140
| 95
| 33.742857
| 0.809579
| 0.028154
| 0
| 0
| 0
| 0
| 0.054101
| 0.006108
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.130841
| null | null | 0.046729
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
86864787ee128fda8f0e696df8fc12952938543c
| 4,356
|
py
|
Python
|
run_mod.py
|
fpl-analytics/gr_crypto
|
2b0ab451c9c205a9f572c4bca23fffbb68ca188f
|
[
"MIT"
] | null | null | null |
run_mod.py
|
fpl-analytics/gr_crypto
|
2b0ab451c9c205a9f572c4bca23fffbb68ca188f
|
[
"MIT"
] | null | null | null |
run_mod.py
|
fpl-analytics/gr_crypto
|
2b0ab451c9c205a9f572c4bca23fffbb68ca188f
|
[
"MIT"
] | null | null | null |
"""
Setup:
- Import Libraries
- Setup tf on multiple cores
- Import Data
"""
import pandas as pd
import numpy as np
import tensorflow as tf
import seaborn as sns
from time import time
import multiprocessing
import random
import os
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, ConvLSTM2D, Flatten
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from joblib import dump, load
from mod.prep import log_return, log_return_np, preprocess
from mod.model import return_pred
from mod.eval import evaluate_regression, evaluate_up_down
cores = multiprocessing.cpu_count()
tf.config.threading.set_inter_op_parallelism_threads(cores-1)
root_folder = "data"
wide_close = pd.read_csv(root_folder + "/working/wide_close.csv")
wide_target = pd.read_csv(root_folder + "/working/wide_target.csv")
asset_details = pd.read_csv(root_folder + "/asset_details.csv")
assets = [str(i) for i in asset_details["Asset_ID"]]
"""
Preprocess
"""
close_returns = wide_close[assets].apply(log_return)
close_returns["time"] = wide_close["time"]
close_returns[assets] = close_returns[assets].replace([np.inf,-np.inf],np.nan)
"""
Linear Regression
"""
x_steps, y_steps = 60, [1, 15]
col_in, col_out = "1", "1"
train_x, test_x, train_y, test_y, time_d = preprocess(data_in = wide_close, col_in,
col_out, time_col="time", x_steps, y_steps)
# 1 step
lr_1 = LinearRegression()
lr_1.fit(train_x.reshape(-1, x_steps), train_y[:,0,:].reshape(-1, 1))
true, pred = return_pred(test_x, test_y[:,0,:], lr_1)
evaluate_regression(true, pred)
evaluate_up_down(true, pred)
# 15 step
lr_15 = LinearRegression()
lr_15.fit(train_x.reshape(-1, x_steps), train_y[:,1,:].reshape(-1, 1))
true, pred = return_pred(test_x, test_y[:,1,:], lr_1)
evaluate_regression(true, pred)
evaluate_up_down(true, pred)
"""
calculate and store components seperately
process:
- first, get rolling values for each timestamp
- then, predict 1 and 15 gaps and store in array
"""
# Production
"""
Steps:
- Get train, val test and test indices. Importantly, this
needs to cover all assets (even though not all assets exist)
for the whole time period.
- Build models
"""
assets = list(asset_details["Asset_ID"].astype(str))
# Get indexes
i = np.select(
[
(wide_close.index >= 0) & (wide_close.index <= (len(wide_close)*0.7)),
(wide_close.index > (len(wide_close)*0.7)) & (wide_close.index <= (len(wide_close)*0.8))
],
["train", "val"],
default = "test")
indexes = pd.DataFrame({"time":wide_close["time"],
"set":i})
for a in assets:
print("asset", a)
filt = indexes["set"][~pd.isna(wide_close[a])]
counts = filt.value_counts()
df = pd.DataFrame({"counts":counts,
"pct":counts/np.sum(counts)})
print(df, "\n\n")
indexes_d = {}
for s in indexes["set"].unique():
indexes_d[s] = indexes["time"][indexes["set"] == s]
mkdir "model_files"
mkdir "model_files/linear_regression"
for a in assets:
print("Asset", a)
x_steps, y_steps = 60, [1, 16]
cols_in, cols_out = a, a
train_x, test_x, train_y, test_y, time_d = preprocess(wide_close, cols_in,
cols_out, "time", x_steps, y_steps)
# 1 step
lr_1 = LinearRegression()
lr_1.fit(train_x.reshape(-1, x_steps), train_y[:,0,:].reshape(-1, 1))
true, pred = return_pred(test_x, test_y[:,0,:], lr_1)
print("Model 1 Metrics")
evaluate_regression(true, pred)
evaluate_up_down(true, pred)
# 16 step
lr_16 = LinearRegression()
lr_16.fit(train_x.reshape(-1, x_steps), train_y[:,1,:].reshape(-1, 1))
true, pred = return_pred(test_x, test_y[:,1,:], lr_16)
print("Model 16 Metrics")
evaluate_regression(true, pred)
evaluate_up_down(true, pred)
dump(lr_1, f"model_files/linear_regression/lr_{a}_1")
dump(lr_16, f"model_files/linear_regression/lr_{a}_16")
dump(time_d, "model_files/linear_regression/lr_times")
"""
Random Forest
"""
rf = RandomForestRegressor(n_jobs=-1)
# start = time.time()
rf.fit(train_x.reshape(-1, x_steps), train_y.reshape(-1))
# print("Took:", round(start-time.time()))
| 25.623529
| 93
| 0.677456
| 657
| 4,356
| 4.266362
| 0.252664
| 0.048163
| 0.024973
| 0.028541
| 0.358188
| 0.34142
| 0.330717
| 0.271495
| 0.271495
| 0.261149
| 0
| 0.022166
| 0.181818
| 4,356
| 169
| 94
| 25.775148
| 0.76431
| 0.025941
| 0
| 0.186047
| 0
| 0
| 0.093323
| 0.05122
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.197674
| null | null | 0.05814
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
86a7a933257c5b58ca131b6e09db3e5af93d5f4e
| 19,069
|
py
|
Python
|
netesto/local/psPlot.py
|
fakeNetflix/facebook-repo-fbkutils
|
16ec0c024322c163e7dbe691812ba8fdf5b511ad
|
[
"BSD-3-Clause"
] | 346
|
2016-04-08T17:04:29.000Z
|
2021-09-30T06:05:47.000Z
|
netesto/local/psPlot.py
|
fakeNetflix/facebook-repo-fbkutils
|
16ec0c024322c163e7dbe691812ba8fdf5b511ad
|
[
"BSD-3-Clause"
] | 38
|
2016-04-26T14:58:17.000Z
|
2021-10-07T20:43:39.000Z
|
netesto/local/psPlot.py
|
fakeNetflix/facebook-repo-fbkutils
|
16ec0c024322c163e7dbe691812ba8fdf5b511ad
|
[
"BSD-3-Clause"
] | 76
|
2016-04-08T17:59:23.000Z
|
2021-09-05T13:18:27.000Z
|
#!/usr/bin/env python2
import sys
import random
import os.path
import shutil
import commands
import types
import math
#gsPath = '/usr/local/bin/gs'
gsPath = 'gs'
logFile = '/dev/null'
#logFile = 'plot.log'
#--- class PsPlot(fname, pageHeader, pageSubHeader, plotsPerPage)
#
class PsPlot(object):
def __init__(self, fname, pageHeader, pageSubHeader, plotsPerPage):
self.foutPath = os.path.dirname(fname)+'/'
if self.foutPath == '/':
self.foutPath = ''
self.foutName = os.path.basename(fname)
self.fname = fname+'.ps'
self.pageHeader = pageHeader
self.pageSubHeader = pageSubHeader
self.plotsPerPage = plotsPerPage
self.yfix1 = ''
self.yfix2 = ''
self.xGrid = 1
self.yGrid = 1
self.xUniform = False
self.xLen = 6.5 #inches
self.seriesTitle = ' '
self.x0 = 0
self.xInc = 0
self.xCount = 0
self.xList = []
self.xDict = {}
self.y1Inc = 0
self.y1Count = 0
self.y1LogScale = 0
self.y2Inc = 0
self.y2Count = 0
self.y2LogScale = 0
self.xOffset = 0
self.colors = [ (0.7,0.7,0.7), (0,0,0.8), (0.8,0,0),
(0.42,0.55,0.14), (0.6,0.5,0.3), (0.6,0.2,0.8),
(0,0.8,0),
(0.4,0.3,0.5), (0.5,0.5,0.5), (0.8,0.0,0.0), (0,0,0) ]
self.colorsN = 11
self.colorRed = (0.8,0,0)
self.colorGreen = (0,0.8,0)
self.colorBlue = (0,0,0.8)
self.colorAqua = (0,0.5,0.5)
self.colorWhite = (1,1,1)
self.ColorBlack = (0,0,0)
self.xSize = 1800
self.ySize = 900
shutil.copy('plot-header.ps', self.fname)
self.fout = open(self.fname, 'a')
self.flog = open(logFile, 'a')
# self.flog = open('./psPlot.out', 'a')
if plotsPerPage == 4:
print >>self.fout, '/doGraph { graph4v } def'
print >>self.fout, '/nextGraph { nextGraph4v } def'
elif plotsPerPage == 3:
print >>self.fout, '/doGraph { graph3v } def'
print >>self.fout, '/nextGraph { nextGraph3v } def'
elif plotsPerPage == 2:
print >>self.fout, '/doGraph { graph2v } def'
print >>self.fout, '/nextGraph { nextGraph2v } def'
else:
print >>self.fout, '/doGraph { graph1v } def'
print >>self.fout, '/nextGraph { nextGraph1v } def'
print >>self.fout, '/showpage {\n 40 742 moveto'
print >>self.fout, '/Helvetica findfont 12 scalefont setfont'
if self.pageHeader != '':
print >>self.fout, '(',self.pageHeader,') show'
if self.pageSubHeader != '':
print >>self.fout, '40 726 moveto\n (',self.pageSubHeader,') show'
print >>self.fout, 'showpage\n} bind def'
print >>self.fout, 'doGraph'
#--- End()
#
def End(self):
print >>self.fout, '\nshowpage\nend'
self.fout.close()
#--- GetInc(vMin, vMax)
def GetInc(self,vMin, vMax):
ff = 1.0
while vMax <= 1 and vMax > 0:
ff *= 0.10
vMin *= 10
vMax *= 10
v0 = int(vMin)
v1 = int(vMax+0.99)
f = 1
w = v1 - v0
if w == 0:
v1 = v0 + 1
w = 1
while w/f >= 100:
f *= 10
# w = int(w/f)
v0 = int(v0/f)
v1 = int(v1/f)
if (vMin % f) != 0 and vMax == v1:
v1 += 1
w = v1 - v0
if w <= 10:
vInc = 1
elif w <= 20:
vInc = 2
else:
m = 10
while w/m > 100:
m *= 10
if (v0 >= 0) and (v0 % m) != 0:
v0 = int(v0 / m) * m
if (v1 % m) != 0:
v1 = int(v1 / m) * m + m
w = v1 - v0
if w <= 5*m:
vInc = m/2
else:
vInc = m
else:
vInc = m
# if (vMax/f)%vInc != 0 or v1 % vInc != 0:
if v1 % vInc != 0:
v1 = int(v1/vInc)*vInc + vInc
if (v0 % vInc) != 0:
v0 = int(v0/vInc)*vInc
v0 += vInc
v0 *= (f*ff)
v1 *= (f*ff)
vInc *= (f*ff)
return v0, v1, vInc
#--- ValueConvert(v)
#
def ValueConvert(self, v, inc):
if inc > 0:
logInc = int(math.log10(v/inc))
d = math.pow(10,logInc)
if d == 0:
d = 10.0
else:
d = 10.0
if d == 1 and float(v)/inc > 1.0:
d = 10.0
if v >= 1000000000 and inc > 1:
s = int(v/(1000000000/d))/d
if s*d == int(s)*d:
s = int(s)
r = str(s) + 'G'
elif v >= 1000000 and inc > 1:
s = int(v/(1000000/d))/d
if s*d == int(s)*d:
s = int(s)
r = str(s) + 'M'
elif v >= 1000 and inc > 1:
s = int(v/(1000/d))/d
if s*d == int(s)*d:
s = int(s)
r = str(s) + 'K'
elif v >= 1:
s = int(v*d)/d
if s*d == int(s)*d:
s = int(s)
r = str(s)
else:
r = str(int(v*100)/100.0)
return r
#--- GetAxis(vBeg, vEnd, vInc, logFlag)
#
def GetAxis(self, vBeg, vEnd, vInc, logFlag):
fix = '{ 0 add }'
if isinstance(vBeg,list):
vList = vBeg
vList.append(' ')
self.xUniform = True
v0 = 1
v1 = len(vList)
vi = 1
fix = '{ '+str(v0-vi)+' sub '+str(vi)+' div }'
logFlag = 0
else:
if vInc == 0:
v0,v1,vi = self.GetInc(vBeg,vEnd)
else:
v0 = vBeg
v1 = vEnd
vi = vInc
if vBeg > 0 and (logFlag==1 or (logFlag==0 and (vEnd/vBeg > 100))):
v0 = vBeg
v1 = vEnd
logFlag = 1
v0Log = math.log10(v0)
t = math.ceil(v0Log)
ff = math.modf(v0Log)
if math.fabs(ff[0]) < math.fabs(v0Log)/1000 and t < 0:
t += 1
logOffset = 0
while t < 1:
logOffset += 1
t += 1
v0 = math.pow(10,math.floor(v0Log)+1)
v1 = math.pow(10,math.ceil(math.log10(v1)))
vi = 1
vList = []
v = v0
while v <= v1:
vList.append(self.ValueConvert(v,0))
v *= 10
if v0 > 1:
logOffset -= (math.log10(v0) - 1)
# substract 1 from above inside parent?
fix = '{ dup 0 eq { } { log '+str(logOffset)+' add } ifelse }'
else:
logFlag = 0
v = v0
vList = []
n = 0
while True:
vList.append(self.ValueConvert(v,vi))
if v > vEnd:
break
n += 1
v = v0 + n*vi
fix = '{ '+str(v0-vi)+' sub '+str(vi)+' div }'
print >>self.flog, 'v0:',v0,' vi:',vi,' v1:',v1,' (',vEnd,')'
print >>self.flog, 'vList: ', vList
print >>self.flog, 'logFlag: ', logFlag, ' fix: ', fix
return v0,v1,vi,vList,fix,logFlag
#--- SetXLen(xlen)
def SetXLen(self, xlen):
self.xLen = xlen
print >>self.fout, '/xAxisLen %.2f def' % self.xLen
print >>self.fout, 'doGraph'
return
#--- SetXSize(xsize)
def SetXSize(self, xsize):
self.xSize = xsize
return
#--- SetYSize(ysize)
def SetYSize(self, ysize):
self.ySize = ysize
return
#--- SetPlotBgLevel(level)
#
def SetPlotBgLevel(self,level):
print >>self.fout, '/plotBgLevel ', level, 'def\n'
return
#--- SetPlotPercentDir(value)
def SetPlotPercentDir(self,value):
if value == 'Vertical':
print >>self.fout, '/plotNumPercentDir 1 def\n'
else:
print >>self.fout, '/plotNumPercentDir 0 def\n'
return
#--- SetPlotYLogScale(axis,value)
#
def SetPlotYLogScale(self,axis,value):
if value == 'Off':
v = -1
elif value == 'On':
v = 1
else:
v = 0;
if axis == 1:
self.y1LogScale = v
else:
self.y2LogScale = v
return
#--- SetPlot(xbeg,xend,xinc,ybeg,yend,yinc,xtitle,ytitle,title)
#
def SetPlot(self,xbeg,xend,xinc,ybeg,yend,yinc,xtitle,ytitle,title):
print >>self.fout, '\n\nnextGraph\n1 setlinewidth\n'
(x0,x1,xi,xList,fix,logFlag) = self.GetAxis(xbeg,xend,xinc,0)
self.x0 = x0
self.xInc = xi
self.xCount = len(xList)
self.xList = xList
self.xDict = {}
k = 1
for x in xList:
self.xDict[x] = k
k=k+1
print >>self.fout, '/xfix ', fix, ' def\n'
(y0,y1,yi,yList,fix,logFlag) = self.GetAxis(ybeg,yend,yinc,
self.y1LogScale)
self.y1Inc = yi
self.y1Count = len(yList)
self.yfix1 = '/yfix '+fix+' def\n /yinc yinc1 def'
print >>self.fout, self.yfix1
print >>self.fout, '[ '
for x in xList:
self.fout.write('('+str(x)+') ')
self.fout.write(' ]\n[ ')
for y in yList:
self.fout.write('('+str(y)+') ')
print >>self.fout, ' ]'
print >>self.fout, '('+xtitle+')\n('+ytitle+')\naxes\n'
print >>self.fout, self.xGrid, self.yGrid, ' grid\n'
print >>self.fout, '/ymtitle ypos ylen add 10 add def\n'
# Multiple lines in title are separated by '|'
print >>self.flog, 'Main Title: '+title
titleLines = title.split('|')
for t in titleLines:
if len(t) > 0:
print >>self.flog, ' '+t
print >>self.fout, '('+t+')\n'
print >>self.fout, 'Mtitles\n'
# print >>self.fout, '('+title+')\nMtitles\n'
if logFlag == 1:
print >>self.fout, 'beginFunction\n'
for ys in yList:
factor = 1
if ys[-1:] == 'K':
yss = ys[:-1]
factor = 1000
elif ys[-1:] == 'M':
yss = ys[:-1]
factor = 1000000
else:
yss = ys
y = float(yss)*factor/10.0
k = 2
while k < 10:
print >>self.fout, 0, k*y
k += 1
print >>self.fout, 'endFunction\n'
print >>self.fout, '19 { 0 0 0 setrgbcolor } plotSymbolsC\n'
return y1
#--- SetPlot2(xbeg,xend,xinc,ybeg,yend,yinc,zbeg,zend,zinc,
# xtitle,ytitle,ztitle,title)
#
def SetPlot2(self,xbeg,xend,xinc,ybeg,yend,yinc,zbeg,zend,zinc,
xtitle,ytitle,ztitle,title):
rv = self.SetPlot(xbeg,xend,xinc,ybeg,yend,yinc,xtitle,ytitle,title)
(z0,z1,zi,zList,fix,logFlag) = self.GetAxis(zbeg,zend,zinc,self.y2LogScale)
self.y2Inc = zi
self.y2Count = len(zList)
print >>self.fout, '/Flag2Yaxes 1 def'
self.yfix2 = '/yfix '+fix+' def\n/yinc yinc2 def'
print >>self.fout, 'axpos axlen add aypos aylen'
self.fout.write('[ ')
for z in zList:
self.fout.write('('+str(z)+') ')
self.fout.write(' ]')
if ztitle != '':
print >>self.fout, '('+ztitle+') vaxis2'
if logFlag == 1:
print >>self.fout, self.yfix2
print >>self.fout, 'beginFunction\n'
for zs in zList:
factor = 1
if zs[-1:] == 'K':
zss = zs[:-1]
factor = 1000
elif zs[-1:] == 'M':
zss = zs[:-1]
factor = 1000000
else:
zss = zs
y = float(zss)*factor/10.0
k = 2
while k < 10:
print >>self.fout, self.xCount, k*y
k += 1
print >>self.fout, 'endFunction\n'
print >>self.fout, '18 { 0.72 0.52 0.5 setrgbcolor } plotSymbolsC\n'
return rv
#--- SetColor(color)
#
def SetColor(self, color):
rv = ' { '+str(color[0])+' '+str(color[1])+' '+str(color[2])+ \
' setrgbcolor } '
return rv
#--- GetColorIndx(indx)
#
def GetColorIndx(self, indx):
color = self.colors[indx % self.colorsN]
rv = ' { '+str(color[0])+' '+str(color[1])+' '+str(color[2])+ \
' setrgbcolor } '
return rv
#--- SetColorIndx(indx, r, g, b)
#
def SetColorIndx(self, indx, r, g, b):
self.colors[indx][0] = r
self.colors[indx][1] = g
self.colors[indx][2] = b
return rv
#--- outputPS(string)
#
def outputPS(self, s):
print >>self.fout, s
#--- SeriesNames(names)
#
def SeriesNames(self, names):
indx = len(names) - 1
if indx == 0:
return
print >>self.fout, '('+self.seriesTitle+')'
while indx >= 0:
if names[indx] != None:
print >>self.fout, '('+names[indx]+') '
print >>self.fout, self.SetColor(self.colors[indx % self.colorsN])
indx -= 1
print >>self.fout, 'fdescriptionsC'
#--- PlotVBars(xList, type)
#
def PlotVBars(self, xList, type):
flog = self.flog
print >>self.fout, self.yfix1
print >>self.fout, 'beginFunction\n'
endFun = 'endFunction\n'
indx = 0
for x in xList:
if x == ' ' and indx == len(xList)-1:
continue
indx += 1
print >>self.fout, x, 0.0
if (indx != 0) and (indx % 1000) == 0:
print >>self.fout, endFun+type+'\nbeginFunction\n'
print >>self.fout, x
print >>self.fout, endFun, type, '\n'
return
#--- PlotData(axis, xList, yList, zList, id, type)
#
def PlotData(self, axis, xList, yList, zList, id, type):
flog = self.flog
print >>flog, 'graph xList: ', self.xList, ' xList: ', xList, \
' yList: ', yList
print >>self.fout, '%\n% Plot '+id+'\n%\n'
print >>self.fout, '/xfix { ', self.x0 - self.xInc - self.xOffset,' sub ', self.xInc, ' div ', 0.0,' add } def\n'
if axis == 2:
print >>self.fout, self.yfix2
elif axis == 1:
print >>self.fout, self.yfix1
# else:
# print >>self.fout, '/yfix { 0 add } def\n'
print >>self.fout, 'beginFunction\n'
if isinstance(zList,list):
endFun = 'endFunctionW\n'
else:
endFun = 'endFunction\n'
indx = 0
for x in xList:
if x == ' ' and indx == len(xList)-1:
continue
if len(yList) <= indx:
continue
y = yList[indx]
if isinstance(zList,list):
if len(zList) <= indx:
continue
z = zList[indx]
else:
z = ''
indx += 1
if self.xUniform == True:
g_indx = self.xDict[x]
print >>self.fout, g_indx, y, z
else:
print >>self.fout, x, y, z
if (indx != 0) and (indx % 1000) == 0:
print >>self.fout, endFun+type+'\nbeginFunction\n'
if self.xUniform == True:
print >>self.fout, g_indx, y, z
else:
print >>self.fout, x, y, z
print >>self.fout, endFun, type, '\n'
return
#--- GetImage()
#
def GetImage(self):
flog = self.flog
print >>self.fout, 'showpage\n'
self.fout.flush()
os.fsync(self.fout)
if self.plotsPerPage == 1:
# size = ' -g1200x550 '
size = ' -g%dx%d ' % (self.xSize, self.ySize)
xres = int(100 * self.xSize * 6.5 / (1200 * self.xLen))
yres = int(110 * self.ySize / 550)
res = ' -r%dx%d ' % (xres, yres)
cmdStr = gsPath + ' -sDEVICE=jpeg'+size+'-sOutputFile='+self.foutPath+self.foutName+'.jpg -dNOPAUSE '+ res +self.fname+' -c quit'
# cmdStr = gsPath + ' -sDEVICE=jpeg'+size+'-sOutputFile='+self.foutPath+self.foutName+'.jpg -dNOPAUSE -r100x100 '+self.fname+' -c quit'
else:
size = ' -g1200x1100 '
cmdStr = gsPath + ' -sDEVICE=jpeg'+size+'-sOutputFile='+self.foutPath+self.foutName+'%d.jpg -dNOPAUSE -r100x100 '+self.fname+' -c quit'
print >>flog, 'cmdStr: ', cmdStr
output = commands.getoutput(cmdStr)
print >>flog, 'output from gs command: ', output
return self.foutPath+self.foutName+'.jpg'
#--- Main
#
def main():
tMin = 0
tMax = 100000
stateList = [0,1,2,2,3,3,3,3,4]
fname = 'sched.txt'
if len(sys.argv) == 2:
fname = sys.argv[1]
elif len(sys.argv) == 3:
tMin = int(sys.argv[1])
tMax = int(sys.argv[2])
elif len(sys.argv) == 4:
tMin = int(sys.argv[1])
tMax = int(sys.argv[2])
fname = sys.argv[3]
elif len(sys.argv) != 1:
print 'USAGE: psPlot.py [tMin tMax] [fname]'
sys.exit(1)
print 'tMin,tMax: ', tMin, tMax, 'fname: ', fname
p = PsPlot('./p', 'Header', 'SubHeader', 1)
fromStateList = []
toStateList = []
time1List = []
time2List = []
indx = 0
oldTime = 0
fin = open(fname, 'r')
for inputLine in fin:
inputLine = inputLine.replace(' ','')
inputLine = inputLine.replace("'", '')
i1 = inputLine.find('(')
i2 = inputLine.find(')')
inputList = inputLine[i1+1:i2-1].split(',')
s1 = stateList[int(inputList[0])]
s2 = stateList[int(inputList[1])]
t = int(inputList[2])
if indx != 0 and t >= tMin and t <= tMax:
fromStateList.append(s1)
toStateList.append(s2)
time1List.append(oldTime)
time2List.append(t)
oldTime = t
indx += 1
p.SetPlot(tMin, tMax, 0, 0, 2, 0, 'Time', 'Socket/State', 'Chavey\'s Plot')
state = 0
while state <= 4:
t1List = []
t2List = []
sList = []
indx = 0
for s in toStateList:
if s == state:
t1List.append(time1List[indx])
t2List.append(time2List[indx])
sList.append(0.10 + s*0.20)
indx += 1
p.PlotData(1,t1List, t2List, sList, 'Test',
'0.1 in 0 '+p.SetColor(p.colors[state])+' plotWbarsC',
sys.stdout)
state += 1
image = p.GetImage(sys.stdout)
print 'Image file: ', image
p.End()
if __name__ == "__main__":
main()
| 31.260656
| 147
| 0.457968
| 2,295
| 19,069
| 3.798693
| 0.156427
| 0.070658
| 0.099908
| 0.0195
| 0.254416
| 0.193164
| 0.161849
| 0.146708
| 0.133861
| 0.129158
| 0
| 0.054708
| 0.391316
| 19,069
| 609
| 148
| 31.311987
| 0.69639
| 0.063873
| 0
| 0.280079
| 0
| 0
| 0.090128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.013807
| null | null | 0.149901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
86d75f7e9a302f49289d9be8498b550dc47650fa
| 79,634
|
py
|
Python
|
private/templates/NYC/config.py
|
devinbalkind/eden
|
d5a684eae537432eb2c7d954132484a4714ca8fb
|
[
"MIT"
] | null | null | null |
private/templates/NYC/config.py
|
devinbalkind/eden
|
d5a684eae537432eb2c7d954132484a4714ca8fb
|
[
"MIT"
] | null | null | null |
private/templates/NYC/config.py
|
devinbalkind/eden
|
d5a684eae537432eb2c7d954132484a4714ca8fb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.html import A, URL
from gluon.storage import Storage
from s3 import s3_fullname
T = current.T
settings = current.deployment_settings
"""
Template settings for NYC Prepared
"""
# Pre-Populate
settings.base.prepopulate = ("NYC",)
settings.base.system_name = T("NYC Prepared")
settings.base.system_name_short = T("NYC Prepared")
# Theme (folder to use for views/layout.html)
settings.base.theme = "NYC"
settings.ui.formstyle_row = "bootstrap"
settings.ui.formstyle = "bootstrap"
settings.ui.filter_formstyle = "table_inline"
settings.msg.parser = "NYC"
# Uncomment to Hide the language toolbar
settings.L10n.display_toolbar = False
# Default timezone for users
settings.L10n.utc_offset = "UTC -0500"
# Uncomment these to use US-style dates in English
settings.L10n.date_format = "%m-%d-%Y"
# Start week on Sunday
settings.L10n.firstDOW = 0
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
# Enable this to change the label for 'Mobile Phone'
settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
settings.ui.label_postcode = "ZIP Code"
# Uncomment to disable responsive behavior of datatables
# - Disabled until tested
settings.ui.datatables_responsive = False
# PDF to Letter
settings.base.paper_size = T("Letter")
# Restrict the Location Selector to just certain countries
# NB This can also be over-ridden for specific contexts later
# e.g. Activities filtered to those of parent Project
settings.gis.countries = ("US",)
settings.fin.currencies = {
"USD" : T("United States Dollars"),
}
settings.L10n.languages = OrderedDict([
("en", "English"),
("es", "Español"),
])
# Authentication settings
# These settings should be changed _after_ the 1st (admin) user is
# registered in order to secure the deployment
# Should users be allowed to register themselves?
settings.security.self_registration = "index"
# Do new users need to verify their email address?
settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
settings.auth.registration_requires_approval = True
# Always notify the approver of a new (verified) user, even if the user is automatically approved
#settings.auth.always_notify_approver = False
# Uncomment this to request the Mobile Phone when a user registers
settings.auth.registration_requests_mobile_phone = True
# Uncomment this to request the Organisation when a user registers
settings.auth.registration_requests_organisation = True
# Uncomment this to request the Site when a user registers
#settings.auth.registration_requests_site = True
# Roles that newly-registered users get automatically
#settings.auth.registration_roles = { 0: ["comms_dispatch"]}
#settings.auth.registration_link_user_to = {"staff":T("Staff"),
# #"volunteer":T("Volunteer")
# }
settings.auth.registration_link_user_to_default = "staff"
settings.security.policy = 5 # Controller, Function & Table ACLs
# Enable this to have Open links in IFrames open a full page in a new tab
settings.ui.iframe_opens_full = True
settings.ui.label_attachments = "Media"
settings.ui.update_label = "Edit"
# Uncomment to disable checking that LatLons are within boundaries of their parent
#settings.gis.check_within_parent_boundaries = False
# GeoNames username
settings.gis.geonames_username = "eden_nyc"
# Uncomment to show created_by/modified_by using Names not Emails
settings.ui.auth_user_represent = "name"
# Record Approval
settings.auth.record_approval = True
settings.auth.record_approval_required_for = ("org_organisation",)
# -----------------------------------------------------------------------------
# Audit
def audit_write(method, tablename, form, record, representation):
if not current.auth.user:
# Don't include prepop
return False
if tablename in ("cms_post",
"org_facility",
"org_organisation",
"req_req",
):
# Perform normal Audit
return True
else:
# Don't Audit non user-visible resources
return False
settings.security.audit_write = audit_write
# -----------------------------------------------------------------------------
# CMS
# Uncomment to use Bookmarks in Newsfeed
settings.cms.bookmarks = True
# Uncomment to use have Filter form in Newsfeed be open by default
settings.cms.filter_open = True
# Uncomment to adjust filters in Newsfeed when clicking on locations instead of opening the profile page
settings.cms.location_click_filters = True
# Uncomment to use organisation_id instead of created_by in Newsfeed
settings.cms.organisation = "post_organisation.organisation_id"
# Uncomment to use org_group_id in Newsfeed
settings.cms.organisation_group = "post_organisation_group.group_id"
# Uncomment to use person_id instead of created_by in Newsfeed
settings.cms.person = "person_id"
# Uncomment to use Rich Text editor in Newsfeed
settings.cms.richtext = True
# Uncomment to show Links in Newsfeed
settings.cms.show_links = True
# Uncomment to show Tags in Newsfeed
settings.cms.show_tags = True
# Uncomment to show post Titles in Newsfeed
settings.cms.show_titles = True
# -----------------------------------------------------------------------------
# Inventory Management
# Uncomment to customise the label for Facilities in Inventory Management
settings.inv.facility_label = "Facility"
# Uncomment if you need a simpler (but less accountable) process for managing stock levels
#settings.inv.direct_stock_edits = True
# Uncomment to call Stock Adjustments, 'Stock Counts'
settings.inv.stock_count = True
# Uncomment to not track pack values
settings.inv.track_pack_values = False
settings.inv.send_show_org = False
# Types common to both Send and Receive
settings.inv.shipment_types = {
1: T("Other Warehouse")
}
settings.inv.send_types = {
#21: T("Distribution")
}
settings.inv.send_type_default = 1
settings.inv.item_status = {
#0: current.messages["NONE"],
#1: T("Dump"),
#2: T("Sale"),
#3: T("Reject"),
#4: T("Surplus")
}
# -----------------------------------------------------------------------------
# Organisations
#
# Enable the use of Organisation Groups
settings.org.groups = "Network"
# Make Services Hierarchical
settings.org.services_hierarchical = True
# Set the label for Sites
settings.org.site_label = "Facility"
#settings.org.site_label = "Location"
# Uncomment to show the date when a Site (Facilities-only for now) was last contacted
settings.org.site_last_contacted = True
# Enable certain fields just for specific Organisations
# empty list => disabled for all (including Admin)
#settings.org.dependent_fields = { \
# "pr_person_details.mother_name" : [],
# "pr_person_details.father_name" : [],
# "pr_person_details.company" : [],
# "pr_person_details.affiliations" : [],
# "vol_volunteer.active" : [],
# "vol_volunteer_cluster.vol_cluster_type_id" : [],
# "vol_volunteer_cluster.vol_cluster_id" : [],
# "vol_volunteer_cluster.vol_cluster_position_id" : [],
# }
# Uncomment to use an Autocomplete for Site lookup fields
settings.org.site_autocomplete = True
# Extra fields to search in Autocompletes & display in Representations
settings.org.site_autocomplete_fields = ("organisation_id$name",
"location_id$addr_street",
)
# Uncomment to hide inv & req tabs from Sites
#settings.org.site_inv_req_tabs = True
# -----------------------------------------------------------------------------
def facility_marker_fn(record):
"""
Function to decide which Marker to use for Facilities Map
@ToDo: Legend
"""
db = current.db
s3db = current.s3db
table = db.org_facility_type
ltable = db.org_site_facility_type
query = (ltable.site_id == record.site_id) & \
(ltable.facility_type_id == table.id)
rows = db(query).select(table.name)
types = [row.name for row in rows]
# Use Marker in preferential order
if "Hub" in types:
marker = "warehouse"
elif "Medical Clinic" in types:
marker = "hospital"
elif "Food" in types:
marker = "food"
elif "Relief Site" in types:
marker = "asset"
elif "Residential Building" in types:
marker = "residence"
#elif "Shelter" in types:
# marker = "shelter"
else:
# Unknown
marker = "office"
if settings.has_module("req"):
# Colour code by open/priority requests
reqs = record.reqs
if reqs == 3:
# High
marker = "%s_red" % marker
elif reqs == 2:
# Medium
marker = "%s_yellow" % marker
elif reqs == 1:
# Low
marker = "%s_green" % marker
mtable = db.gis_marker
try:
marker = db(mtable.name == marker).select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
except:
marker = db(mtable.name == "office").select(mtable.image,
mtable.height,
mtable.width,
cache=s3db.cache,
limitby=(0, 1)
).first()
return marker
# -----------------------------------------------------------------------------
def org_facility_onvalidation(form):
"""
Default the name to the Street Address
"""
form_vars = form.vars
name = form_vars.get("name", None)
if name:
return
address = form_vars.get("address", None)
if address:
form_vars.name = address
else:
# We need a default
form_vars.name = current.db.org_facility.location_id.represent(form_vars.location_id)
# -----------------------------------------------------------------------------
def customise_org_facility_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Tell the client to request per-feature markers
s3db.configure("org_facility", marker_fn=facility_marker_fn)
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method not in ("read", "update"):
types = r.get_vars.get("site_facility_type.facility_type_id__belongs", None)
if not types:
# Hide Private Residences
from s3 import FS
s3.filter = FS("site_facility_type.facility_type_id$name") != "Private Residence"
if r.interactive:
tablename = "org_facility"
table = s3db[tablename]
if not r.component and r.method in (None, "create", "update"):
from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2, S3MultiSelectWidget
field = table.location_id
if r.method in ("create", "update"):
field.label = "" # Gets replaced by widget
levels = ("L2", "L3")
field.requires = IS_LOCATION_SELECTOR2(levels=levels)
field.widget = S3LocationSelectorWidget2(levels=levels,
hide_lx=False,
reverse_lx=True,
show_address=True,
show_postcode=True,
)
table.organisation_id.widget = S3MultiSelectWidget(multiple=False)
if r.get_vars.get("format", None) == "popup":
# Coming from req/create form
# Hide most Fields
from s3 import S3SQLCustomForm, S3SQLInlineComponent
# We default this onvalidation
table.name.notnull = False
table.name.requires = None
crud_form = S3SQLCustomForm(S3SQLInlineComponent(
"site_facility_type",
label = T("Facility Type"),
fields = [("", "facility_type_id")],
multiple = False,
required = True,
),
"name",
"location_id",
)
s3db.configure(tablename,
crud_form = crud_form,
onvalidation = org_facility_onvalidation,
)
return True
s3.prep = custom_prep
return attr
settings.customise_org_facility_controller = customise_org_facility_controller
# -----------------------------------------------------------------------------
def customise_org_organisation_resource(r, tablename):
from gluon.html import DIV, INPUT
from s3 import S3MultiSelectWidget, S3SQLCustomForm, S3SQLInlineLink, S3SQLInlineComponent, S3SQLInlineComponentMultiSelectWidget
s3db = current.s3db
if r.tablename == "org_organisation":
if r.id:
# Update form
ctable = s3db.pr_contact
query = (ctable.pe_id == r.record.pe_id) & \
(ctable.contact_method == "RSS") & \
(ctable.deleted == False)
rss = current.db(query).select(ctable.poll,
limitby=(0, 1)
).first()
if rss and not rss.poll:
# Remember that we don't wish to import
rss_import = "on"
else:
# Default
rss_import = None
else:
# Create form: Default
rss_import = None
else:
# Component
if r.component_id:
# Update form
db = current.db
otable = s3db.org_organisation
org = db(otable.id == r.component_id).select(otable.pe_id,
limitby=(0, 1)
).first()
try:
pe_id = org.pe_id
except:
current.log.error("Org %s not found: cannot set rss_import correctly" % r.component_id)
# Default
rss_import = None
else:
ctable = s3db.pr_contact
query = (ctable.pe_id == pe_id) & \
(ctable.contact_method == "RSS") & \
(ctable.deleted == False)
rss = db(query).select(ctable.poll,
limitby=(0, 1)
).first()
if rss and not rss.poll:
# Remember that we don't wish to import
rss_import = "on"
else:
# Default
rss_import = None
else:
# Create form: Default
rss_import = None
mtable = s3db.org_group_membership
mtable.group_id.widget = S3MultiSelectWidget(multiple=False)
mtable.status_id.widget = S3MultiSelectWidget(multiple=False,
create=dict(c="org",
f="group_membership_status",
label=str(T("Add New Status")),
parent="group_membership",
child="status_id"
))
crud_form = S3SQLCustomForm(
"name",
"acronym",
S3SQLInlineLink(
"organisation_type",
field = "organisation_type_id",
label = T("Type"),
multiple = False,
#widget = "hierarchy",
),
S3SQLInlineComponentMultiSelectWidget(
# activate hierarchical org_service:
#S3SQLInlineLink(
"service",
label = T("Services"),
field = "service_id",
# activate hierarchical org_service:
#leafonly = False,
#widget = "hierarchy",
),
S3SQLInlineComponent(
"group_membership",
label = T("Network"),
fields = [("", "group_id"),
("", "status_id"),
],
),
S3SQLInlineComponent(
"address",
label = T("Address"),
multiple = False,
# This is just Text - put into the Comments box for now
# Ultimately should go into location_id$addr_street
fields = [("", "comments")],
),
S3SQLInlineComponentMultiSelectWidget(
"location",
label = T("Neighborhoods Served"),
field = "location_id",
filterby = dict(field = "level",
options = "L4"
),
# @ToDo: GroupedCheckbox Widget or Hierarchical MultiSelectWidget
#cols = 5,
),
"phone",
S3SQLInlineComponent(
"contact",
name = "phone2",
label = T("Phone2"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "WORK_PHONE"
)
),
S3SQLInlineComponent(
"contact",
name = "email",
label = T("Email"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL"
)
),
"website",
S3SQLInlineComponent(
"contact",
comment = DIV(INPUT(_type="checkbox",
_name="rss_no_import",
value = rss_import,
),
T("Don't Import Feed")),
name = "rss",
label = T("RSS"),
multiple = False,
fields = [("", "value"),
#(T("Don't Import Feed"), "poll"),
],
filterby = dict(field = "contact_method",
options = "RSS"
)
),
S3SQLInlineComponent(
"document",
name = "iCal",
label = "iCAL",
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="iCal"
)
),
S3SQLInlineComponent(
"document",
name = "data",
label = T("Data"),
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="Data"
)
),
S3SQLInlineComponent(
"contact",
name = "twitter",
label = T("Twitter"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "TWITTER"
)
),
S3SQLInlineComponent(
"contact",
name = "facebook",
label = T("Facebook"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "FACEBOOK"
)
),
"comments",
postprocess = pr_contact_postprocess,
)
from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter
# activate hierarchical org_service:
#from s3 import S3LocationFilter, S3OptionsFilter, S3TextFilter, S3HierarchyFilter
filter_widgets = [
S3TextFilter(["name", "acronym"],
label = T("Name"),
_class = "filter-search",
),
S3OptionsFilter("group_membership.group_id",
label = T("Network"),
represent = "%(name)s",
#hidden = True,
),
S3LocationFilter("organisation_location.location_id",
label = T("Neighborhood"),
levels = ("L3", "L4"),
#hidden = True,
),
S3OptionsFilter("service_organisation.service_id",
#label = T("Service"),
#hidden = True,
),
# activate hierarchical org_service:
#S3HierarchyFilter("service_organisation.service_id",
# #label = T("Service"),
# #hidden = True,
# ),
S3OptionsFilter("organisation_organisation_type.organisation_type_id",
label = T("Type"),
#hidden = True,
),
]
list_fields = ["name",
(T("Type"), "organisation_organisation_type.organisation_type_id"),
(T("Services"), "service.name"),
"phone",
(T("Email"), "email.value"),
"website"
#(T("Neighborhoods Served"), "location.name"),
]
s3db.configure("org_organisation",
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
settings.customise_org_organisation_resource = customise_org_organisation_resource
# -----------------------------------------------------------------------------
def customise_org_organisation_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive:
if r.component_name == "facility":
if r.method in (None, "create", "update"):
from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2
table = s3db.org_facility
field = table.location_id
if r.method in ("create", "update"):
field.label = "" # Gets replaced by widget
levels = ("L2", "L3")
field.requires = IS_LOCATION_SELECTOR2(levels=levels)
field.widget = S3LocationSelectorWidget2(levels=levels,
hide_lx=False,
reverse_lx=True,
show_address=True,
show_postcode=True,
)
elif r.component_name == "human_resource":
# Don't assume that user is from same org/site as Contacts they create
r.component.table.site_id.default = None
return result
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
if "rheader" in output:
# Custom Tabs
tabs = [(T("Basic Details"), None),
(T("Contacts"), "human_resource"),
(T("Facilities"), "facility"),
(T("Projects"), "project"),
(T("Assets"), "asset"),
]
output["rheader"] = s3db.org_rheader(r, tabs=tabs)
return output
s3.postp = custom_postp
return attr
settings.customise_org_organisation_controller = customise_org_organisation_controller
# -----------------------------------------------------------------------------
def customise_org_group_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if not r.component:
table = s3db.org_group
list_fields = ["name",
"mission",
"website",
"meetings",
]
s3db.configure("org_group",
list_fields = list_fields,
)
if r.interactive:
from gluon.html import DIV, INPUT
from s3 import S3SQLCustomForm, S3SQLInlineComponent
if r.method != "read":
from gluon.validators import IS_EMPTY_OR
from s3 import IS_LOCATION_SELECTOR2, S3LocationSelectorWidget2
field = table.location_id
field.label = "" # Gets replaced by widget
#field.requires = IS_LOCATION_SELECTOR2(levels = ("L2",))
field.requires = IS_EMPTY_OR(
IS_LOCATION_SELECTOR2(levels = ("L2",))
)
field.widget = S3LocationSelectorWidget2(levels = ("L2",),
points = True,
polygons = True,
)
# Default location to Manhattan
db = current.db
gtable = db.gis_location
query = (gtable.name == "New York") & \
(gtable.level == "L2")
manhattan = db(query).select(gtable.id,
limitby=(0, 1)).first()
if manhattan:
field.default = manhattan.id
table.mission.readable = table.mission.writable = True
table.meetings.readable = table.meetings.writable = True
if r.id:
# Update form
ctable = s3db.pr_contact
query = (ctable.pe_id == r.record.pe_id) & \
(ctable.contact_method == "RSS") & \
(ctable.deleted == False)
rss = current.db(query).select(ctable.poll,
limitby=(0, 1)
).first()
if rss and not rss.poll:
# Remember that we don't wish to import
rss_import = "on"
else:
# Default
rss_import = None
else:
# Create form: Default
rss_import = None
crud_form = S3SQLCustomForm(
"name",
"location_id",
"mission",
S3SQLInlineComponent(
"contact",
name = "phone",
label = T("Phone"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "WORK_PHONE"
)
),
S3SQLInlineComponent(
"contact",
name = "email",
label = T("Email"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL"
)
),
"website",
S3SQLInlineComponent(
"contact",
comment = DIV(INPUT(_type="checkbox",
_name="rss_no_import",
value = rss_import,
),
T("Don't Import Feed")),
name = "rss",
label = T("RSS"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "RSS"
)
),
S3SQLInlineComponent(
"document",
name = "iCal",
label = "iCAL",
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="iCal"
)
),
S3SQLInlineComponent(
"document",
name = "data",
label = T("Data"),
multiple = False,
fields = [("", "url")],
filterby = dict(field = "name",
options="Data"
)
),
S3SQLInlineComponent(
"contact",
name = "twitter",
label = T("Twitter"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "TWITTER"
)
),
S3SQLInlineComponent(
"contact",
name = "facebook",
label = T("Facebook"),
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "FACEBOOK"
)
),
"meetings",
"comments",
postprocess = pr_contact_postprocess,
)
s3db.configure("org_group",
crud_form = crud_form,
)
elif r.component_name == "pr_group":
list_fields = [#(T("Network"), "group_team.org_group_id"),
"name",
"description",
"meetings",
(T("Chairperson"), "chairperson"),
"comments",
]
s3db.configure("pr_group",
list_fields = list_fields,
)
elif r.component_name == "organisation":
# Add Network Status to List Fields
list_fields = s3db.get_config("org_organisation", "list_fields")
list_fields.insert(1, "group_membership.status_id")
return result
s3.prep = custom_prep
if current.auth.s3_logged_in():
# Allow components with components (such as org/group) to breakout from tabs
attr["native"] = True
return attr
settings.customise_org_group_controller = customise_org_group_controller
# -----------------------------------------------------------------------------
# Persons
# Uncomment to hide fields in S3AddPersonWidget
settings.pr.request_dob = False
settings.pr.request_gender = False
# Doesn't yet work (form fails to submit)
#settings.pr.select_existing = False
settings.pr.show_emergency_contacts = False
# -----------------------------------------------------------------------------
# Persons
def customise_pr_person_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
s3db = current.s3db
#if r.method == "validate":
# # Can't validate image without the file
# image_field = s3db.pr_image.image
# image_field.requires = None
if r.interactive or r.representation == "aadata":
if not r.component:
hr_fields = ["organisation_id",
"job_title_id",
"site_id",
]
if r.method in ("create", "update"):
get_vars = r.get_vars
# Context from a Profile page?"
organisation_id = get_vars.get("(organisation)", None)
if organisation_id:
field = s3db.hrm_human_resource.organisation_id
field.default = organisation_id
field.readable = field.writable = False
hr_fields.remove("organisation_id")
site_id = get_vars.get("(site)", None)
if site_id:
field = s3db.hrm_human_resource.site_id
field.default = site_id
field.readable = field.writable = False
hr_fields.remove("site_id")
else:
s3db.hrm_human_resource.site_id.default = None
# ImageCrop widget doesn't currently work within an Inline Form
#image_field = s3db.pr_image.image
#from gluon.validators import IS_IMAGE
#image_field.requires = IS_IMAGE()
#image_field.widget = None
from s3 import S3SQLCustomForm, S3SQLInlineComponent
s3_sql_custom_fields = ["first_name",
#"middle_name",
"last_name",
S3SQLInlineComponent(
"human_resource",
name = "human_resource",
label = "",
multiple = False,
fields = hr_fields,
),
#S3SQLInlineComponent(
# "image",
# name = "image",
# label = T("Photo"),
# multiple = False,
# fields = [("", "image")],
# filterby = dict(field = "profile",
# options=[True]
# )
# ),
]
list_fields = [(current.messages.ORGANISATION, "human_resource.organisation_id"),
"first_name",
#"middle_name",
"last_name",
(T("Job Title"), "human_resource.job_title_id"),
(T("Office"), "human_resource.site_id"),
]
# Don't include Email/Phone for unauthenticated users
if current.auth.is_logged_in():
MOBILE = settings.get_ui_label_mobile_phone()
EMAIL = T("Email")
list_fields += [(MOBILE, "phone.value"),
(EMAIL, "email.value"),
]
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "phone",
label = MOBILE,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "SMS")),
)
s3_sql_custom_fields.insert(3,
S3SQLInlineComponent(
"contact",
name = "email",
label = EMAIL,
multiple = False,
fields = [("", "value")],
filterby = dict(field = "contact_method",
options = "EMAIL")),
)
crud_form = S3SQLCustomForm(*s3_sql_custom_fields)
s3db.configure(r.tablename,
crud_form = crud_form,
list_fields = list_fields,
)
elif r.component_name == "group_membership":
s3db.pr_group_membership.group_head.label = T("Group Chairperson")
return result
s3.prep = custom_prep
# Custom postp
standard_postp = s3.postp
def custom_postp(r, output):
# Call standard postp
if callable(standard_postp):
output = standard_postp(r, output)
if r.interactive and isinstance(output, dict):
if "form" in output:
output["form"].add_class("pr_person")
elif "item" in output and hasattr(output["item"], "add_class"):
output["item"].add_class("pr_person")
return output
s3.postp = custom_postp
return attr
settings.customise_pr_person_controller = customise_pr_person_controller
# -----------------------------------------------------------------------------
# Groups
def chairperson(row):
"""
Virtual Field to show the chairperson of a group
"""
if hasattr(row, "pr_group"):
row = row.pr_group
try:
group_id = row.id
except:
# not available
return current.messages["NONE"]
db = current.db
mtable = current.s3db.pr_group_membership
ptable = db.pr_person
query = (mtable.group_id == group_id) & \
(mtable.group_head == True) & \
(mtable.person_id == ptable.id)
chair = db(query).select(ptable.first_name,
ptable.middle_name,
ptable.last_name,
ptable.id,
limitby=(0, 1)).first()
if chair:
# Only used in list view so HTML is OK
return A(s3_fullname(chair),
_href=URL(c="hrm", f="person", args=chair.id))
else:
return current.messages["NONE"]
# -----------------------------------------------------------------------------
def customise_pr_group_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
from s3 import S3Represent, S3TextFilter, S3OptionsFilter, S3SQLCustomForm, S3SQLInlineComponent
s3db = current.s3db
s3db.org_group_team.org_group_id.represent = S3Represent(lookup="org_group",
show_link=True)
crud_form = S3SQLCustomForm("name",
"description",
S3SQLInlineComponent("group_team",
label = T("Network"),
fields = [("", "org_group_id")],
# @ToDo: Make this optional?
multiple = False,
),
"meetings",
"comments",
)
filter_widgets = [
S3TextFilter(["name",
"description",
"comments",
"group_team.org_group_id$name",
],
label = T("Search"),
comment = T("You can search by by group name, description or comments and by network name. You may use % as wildcard. Press 'Search' without input to list all."),
#_class = "filter-search",
),
S3OptionsFilter("group_team.org_group_id",
label = T("Network"),
#hidden = True,
),
]
# Need to re-do list_fields as get over_written by hrm_group_controller()
list_fields = [(T("Network"), "group_team.org_group_id"),
"name",
"description",
"meetings",
(T("Chairperson"), "chairperson"),
"comments",
]
s3db.configure("pr_group",
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
s3db.pr_group_membership.group_head.label = T("Group Chairperson")
if r.component_name == "group_membership":
from s3layouts import S3AddResourceLink
s3db.pr_group_membership.person_id.comment = \
S3AddResourceLink(c="pr", f="person",
title=T("Create Person"),
tooltip=current.messages.AUTOCOMPLETE_HELP)
#else:
# # RHeader wants a simplified version, but don't want inconsistent across tabs
# s3db.pr_group_membership.group_head.label = T("Chairperson")
return True
s3.prep = custom_prep
return attr
settings.customise_pr_group_controller = customise_pr_group_controller
# -----------------------------------------------------------------------------
def customise_pr_group_resource(r, tablename):
"""
Customise pr_group resource (in group & org_group controllers)
- runs after controller customisation
- but runs before prep
"""
s3db = current.s3db
table = s3db.pr_group
field = table.group_type
field.default = 3 # Relief Team, to show up in hrm/group
field.readable = field.writable = False
table.name.label = T("Name")
table.description.label = T("Description")
table.meetings.readable = table.meetings.writable = True
# Increase size of widget
from s3 import s3_comments_widget
table.description.widget = s3_comments_widget
from gluon import Field
table.chairperson = Field.Method("chairperson", chairperson)
# Format for filter_widgets & imports
s3db.add_components("pr_group",
org_group_team = "group_id",
)
s3db.configure("pr_group",
# Redirect to member list when a new group has been created
create_next = URL(c="hrm", f="group",
args=["[id]", "group_membership"]),
)
settings.customise_pr_group_resource = customise_pr_group_resource
# -----------------------------------------------------------------------------
def pr_contact_postprocess(form):
"""
Import Organisation/Network RSS Feeds
"""
s3db = current.s3db
form_vars = form.vars
rss_url = form_vars.rsscontact_i_value_edit_0 or \
form_vars.rsscontact_i_value_edit_none
if not rss_url:
if form.record:
# Update form
old_rss = form.record.sub_rsscontact
import json
data = old_rss = json.loads(old_rss)["data"]
if data:
# RSS feed is being deleted, so we should disable it
old_rss = data[0]["value"]["value"]
table = s3db.msg_rss_channel
old = current.db(table.url == old_rss).select(table.channel_id,
table.enabled,
limitby = (0, 1)
).first()
if old and old.enabled:
s3db.msg_channel_disable("msg_rss_channel", old.channel_id)
return
else:
# Nothing to do :)
return
# Check if we already have a channel for this Contact
db = current.db
name = form_vars.name
table = s3db.msg_rss_channel
name_exists = db(table.name == name).select(table.id,
table.channel_id,
table.enabled,
table.url,
limitby = (0, 1)
).first()
no_import = current.request.post_vars.get("rss_no_import", None)
if name_exists:
if name_exists.url == rss_url:
# No change to either Contact Name or URL
if no_import:
if name_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
name_exists.channel_id)
return
elif name_exists.enabled:
# Nothing to do :)
return
else:
# Enable channel (& associated parsers)
s3db.msg_channel_enable("msg_rss_channel",
name_exists.channel_id)
return
# Check if we already have a channel for this URL
url_exists = db(table.url == rss_url).select(table.id,
table.channel_id,
table.enabled,
limitby = (0, 1)
).first()
if url_exists:
# We have 2 feeds: 1 for the Contact & 1 for the URL
# Disable the old Contact one and link the URL one to this Contact
# and ensure active or not as appropriate
# Name field is unique so rename old one
name_exists.update_record(name="%s (Old)" % name)
if name_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
name_exists.channel_id)
url_exists.update_record(name=name)
if no_import:
if url_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
url_exists.channel_id)
return
elif url_exists.enabled:
# Nothing to do :)
return
else:
# Enable channel (& associated parsers)
s3db.msg_channel_enable("msg_rss_channel",
url_exists.channel_id)
return
else:
# Update the URL
name_exists.update_record(url=rss_url)
if no_import:
if name_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
name_exists.channel_id)
return
elif name_exists.enabled:
# Nothing to do :)
return
else:
# Enable channel (& associated parsers)
s3db.msg_channel_enable("msg_rss_channel",
name_exists.channel_id)
return
else:
# Check if we already have a channel for this URL
url_exists = db(table.url == rss_url).select(table.id,
table.channel_id,
table.enabled,
limitby = (0, 1)
).first()
if url_exists:
# Either Contact has changed Name or this feed is associated with
# another Contact
# - update Feed name
url_exists.update_record(name=name)
if no_import:
if url_exists.enabled:
# Disable channel (& associated parsers)
s3db.msg_channel_disable("msg_rss_channel",
url_exists.channel_id)
return
elif url_exists.enabled:
# Nothing to do :)
return
else:
# Enable channel (& associated parsers)
s3db.msg_channel_enable("msg_rss_channel",
url_exists.channel_id)
return
elif no_import:
# Nothing to do :)
return
#else:
# # Create a new Feed
# pass
# Add RSS Channel
_id = table.insert(name=name, enabled=True, url=rss_url)
record = dict(id=_id)
s3db.update_super(table, record)
# Enable
channel_id = record["channel_id"]
s3db.msg_channel_enable("msg_rss_channel", channel_id)
# Setup Parser
table = s3db.msg_parser
_id = table.insert(channel_id=channel_id,
function_name="parse_rss",
enabled=True)
s3db.msg_parser_enable(_id)
# Check Now
async = current.s3task.async
async("msg_poll", args=["msg_rss_channel", channel_id])
async("msg_parse", args=[channel_id, "parse_rss"])
# -----------------------------------------------------------------------------
# Human Resource Management
# Uncomment to chage the label for 'Staff'
settings.hrm.staff_label = "Contacts"
# Uncomment to allow Staff & Volunteers to be registered without an email address
settings.hrm.email_required = False
# Uncomment to allow Staff & Volunteers to be registered without an Organisation
settings.hrm.org_required = False
# Uncomment to show the Organisation name in HR represents
settings.hrm.show_organisation = True
# Uncomment to disable Staff experience
settings.hrm.staff_experience = False
# Uncomment to disable the use of HR Certificates
settings.hrm.use_certificates = False
# Uncomment to disable the use of HR Credentials
settings.hrm.use_credentials = False
# Uncomment to enable the use of HR Education
settings.hrm.use_education = False
# Uncomment to disable the use of HR Skills
#settings.hrm.use_skills = False
# Uncomment to disable the use of HR Trainings
settings.hrm.use_trainings = False
# Uncomment to disable the use of HR Description
settings.hrm.use_description = False
# Change the label of "Teams" to "Groups"
settings.hrm.teams = "Groups"
# Custom label for Organisations in HR module
#settings.hrm.organisation_label = "National Society / Branch"
settings.hrm.organisation_label = "Organization"
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive or r.representation == "aadata":
if not r.component:
from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter
filter_widgets = [
S3TextFilter(["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
],
label = T("Name"),
),
S3OptionsFilter("organisation_id",
filter = True,
header = "",
hidden = True,
),
S3OptionsFilter("group_person.group_id",
label = T("Network"),
#filter = True,
#header = "",
hidden = True,
),
S3LocationFilter("location_id",
label = T("Location"),
levels = ("L1", "L2", "L3", "L4"),
hidden = True,
),
S3OptionsFilter("site_id",
hidden = True,
),
S3OptionsFilter("training.course_id",
label = T("Training"),
hidden = True,
),
S3OptionsFilter("group_membership.group_id",
label = T("Team"),
filter = True,
header = "",
hidden = True,
),
]
s3db = current.s3db
s3db.configure("hrm_human_resource",
filter_widgets = filter_widgets,
)
field = r.table.site_id
# Don't assume that user is from same org/site as Contacts they create
field.default = None
# Use a hierarchical dropdown instead of AC
field.widget = None
script = \
'''$.filterOptionsS3({
'trigger':'organisation_id',
'target':'site_id',
'lookupResource':'site',
'lookupURL':'/%s/org/sites_for_org/',
'optional':true
})''' % r.application
s3.jquery_ready.append(script)
return result
s3.prep = custom_prep
return attr
settings.customise_hrm_human_resource_controller = customise_hrm_human_resource_controller
# -----------------------------------------------------------------------------
def customise_hrm_human_resource_resource(r, tablename):
"""
Customise hrm_human_resource resource (in facility, human_resource, organisation & person controllers)
- runs after controller customisation
- but runs before prep
"""
s3db = current.s3db
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("person_id",
"organisation_id",
"site_id",
S3SQLInlineComponent(
"group_person",
label = T("Network"),
link = False,
fields = [("", "group_id")],
multiple = False,
),
"job_title_id",
"start_date",
)
list_fields = ["id",
"person_id",
"job_title_id",
"organisation_id",
(T("Network"), "group_person.group_id"),
(T("Groups"), "person_id$group_membership.group_id"),
"site_id",
#"site_contact",
(T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
]
s3db.configure("hrm_human_resource",
crud_form = crud_form,
list_fields = list_fields,
)
settings.customise_hrm_human_resource_resource = customise_hrm_human_resource_resource
# -----------------------------------------------------------------------------
def customise_hrm_job_title_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if r.interactive or r.representation == "aadata":
table = current.s3db.hrm_job_title
table.organisation_id.readable = table.organisation_id.writable = False
table.type.readable = table.type.writable = False
return result
s3.prep = custom_prep
return attr
settings.customise_hrm_job_title_controller = customise_hrm_job_title_controller
# -----------------------------------------------------------------------------
# Projects
# Use codes for projects (called 'blurb' in NYC)
settings.project.codes = True
# Uncomment this to use settings suitable for detailed Task management
settings.project.mode_task = False
# Uncomment this to use Activities for projects
settings.project.activities = True
# Uncomment this to use Milestones in project/task.
settings.project.milestones = False
# Uncomment this to disable Sectors in projects
settings.project.sectors = False
# Multiple partner organizations
settings.project.multiple_organisations = True
def customise_project_project_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
else:
result = True
if not r.component and (r.interactive or r.representation == "aadata"):
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineComponentCheckbox
s3db = current.s3db
table = r.table
tablename = "project_project"
table.code.label = T("Project blurb (max. 100 characters)")
table.code.max_length = 100
table.comments.label = T("How people can help")
script = '''$('#project_project_code').attr('maxlength','100')'''
s3.jquery_ready.append(script)
crud_form = S3SQLCustomForm(
"organisation_id",
"name",
"code",
"description",
"status_id",
"start_date",
"end_date",
"calendar",
#"drr.hfa",
#"objectives",
"human_resource_id",
# Activities
S3SQLInlineComponent(
"location",
label = T("Location"),
fields = [("", "location_id")],
),
# Partner Orgs
S3SQLInlineComponent(
"organisation",
name = "partner",
label = T("Partner Organizations"),
fields = ["organisation_id",
"comments", # NB This is labelled 'Role' in DRRPP
],
filterby = dict(field = "role",
options = "2"
)
),
S3SQLInlineComponent(
"document",
name = "media",
label = T("URLs (media, fundraising, website, social media, etc."),
fields = ["document_id",
"name",
"url",
"comments",
],
filterby = dict(field = "name")
),
S3SQLInlineComponentCheckbox(
"activity_type",
label = T("Categories"),
field = "activity_type_id",
cols = 3,
# Filter Activity Type by Project
filter = {"linktable": "project_activity_type_project",
"lkey": "project_id",
"rkey": "activity_type_id",
},
),
#"budget",
#"currency",
"comments",
)
from s3 import S3TextFilter, S3OptionsFilter, S3LocationFilter, S3DateFilter
filter_widgets = [
S3TextFilter(["name",
"code",
"description",
"organisation.name",
"organisation.acronym",
],
label = T("Name"),
_class = "filter-search",
),
S3OptionsFilter("status_id",
label = T("Status"),
# Not translateable
#represent = "%(name)s",
cols = 3,
),
#S3OptionsFilter("theme_project.theme_id",
# label = T("Theme"),
# #hidden = True,
# ),
S3LocationFilter("location.location_id",
label = T("Location"),
levels = ("L1", "L2", "L3", "L4"),
#hidden = True,
),
# @ToDo: Widget to handle Start & End in 1!
S3DateFilter("start_date",
label = T("Start Date"),
hide_time = True,
#hidden = True,
),
S3DateFilter("end_date",
label = T("End Date"),
hide_time = True,
#hidden = True,
),
]
list_fields = ["id",
"name",
"code",
"organisation_id",
"start_date",
"end_date",
(T("Locations"), "location.location_id"),
]
s3db.configure(tablename,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
)
return result
s3.prep = custom_prep
return attr
settings.customise_project_project_controller = customise_project_project_controller
# -----------------------------------------------------------------------------
# Requests Management
settings.req.req_type = ["People", "Stock"]#, "Summary"]
settings.req.prompt_match = False
#settings.req.use_commit = False
settings.req.requester_optional = True
settings.req.date_writable = False
settings.req.item_quantities_writable = True
settings.req.skill_quantities_writable = True
settings.req.items_ask_purpose = False
#settings.req.use_req_number = False
# Label for Requester
settings.req.requester_label = "Site Contact"
# Filter Requester as being from the Site
settings.req.requester_from_site = True
# Label for Inventory Requests
settings.req.type_inv_label = "Supplies"
# Uncomment to enable Summary 'Site Needs' tab for Offices/Facilities
settings.req.summary = True
# -----------------------------------------------------------------------------
def req_req_postprocess(form):
"""
Runs after crud_form completes
- creates a cms_post in the newswire
- @ToDo: Send out Tweets
"""
req_id = form.vars.id
db = current.db
s3db = current.s3db
rtable = s3db.req_req
# Read the full record
row = db(rtable.id == req_id).select(rtable.type,
rtable.site_id,
rtable.requester_id,
rtable.priority,
rtable.date_required,
rtable.purpose,
rtable.comments,
limitby=(0, 1)
).first()
# Build Title & Body from the Request details
priority = rtable.priority.represent(row.priority)
date_required = row.date_required
if date_required:
date = rtable.date_required.represent(date_required)
title = "%(priority)s by %(date)s" % dict(priority=priority,
date=date)
else:
title = priority
body = row.comments
if row.type == 1:
# Items
ritable = s3db.req_req_item
items = db(ritable.req_id == req_id).select(ritable.item_id,
ritable.item_pack_id,
ritable.quantity)
item_represent = s3db.supply_item_represent
pack_represent = s3db.supply_item_pack_represent
for item in items:
item = "%s %s %s" % (item.quantity,
pack_represent(item.item_pack_id),
item_represent(item.item_id))
body = "%s\n%s" % (item, body)
else:
# Skills
body = "%s\n%s" % (row.purpose, body)
rstable = s3db.req_req_skill
skills = db(rstable.req_id == req_id).select(rstable.skill_id,
rstable.quantity)
skill_represent = s3db.hrm_multi_skill_represent
for skill in skills:
item = "%s %s" % (skill.quantity, skill_represent(skill.skill_id))
body = "%s\n%s" % (item, body)
# Lookup series_id
stable = s3db.cms_series
try:
series_id = db(stable.name == "Request").select(stable.id,
cache=s3db.cache,
limitby=(0, 1)
).first().id
except:
# Prepop hasn't been run
series_id = None
# Location is that of the site
otable = s3db.org_site
location_id = db(otable.site_id == row.site_id).select(otable.location_id,
limitby=(0, 1)
).first().location_id
# Create Post
ptable = s3db.cms_post
_id = ptable.insert(series_id=series_id,
title=title,
body=body,
location_id=location_id,
person_id=row.requester_id,
)
record = dict(id=_id)
s3db.update_super(ptable, record)
# Add source link
url = "%s%s" % (settings.get_base_public_url(),
URL(c="req", f="req", args=req_id))
s3db.doc_document.insert(doc_id=record["doc_id"],
url=url,
)
# -----------------------------------------------------------------------------
def customise_req_req_resource(r, tablename):
from s3layouts import S3AddResourceLink
current.s3db.req_req.site_id.comment = \
S3AddResourceLink(c="org", f="facility",
vars = dict(child="site_id"),
title=T("Create Facility"),
tooltip=current.messages.AUTOCOMPLETE_HELP)
current.response.s3.req_req_postprocess = req_req_postprocess
if not r.component and r.method in ("create", "update"):
script = \
'''$('#req_req_site_id').change(function(){
var url=$('#person_add').attr('href')
url=url.split('?')
var q=S3.queryString.parse(url[1])
q['(site)']=$(this).val()
url=url[0]+'?'+S3.queryString.stringify(q)
$('#person_add').attr('href',url)})'''
current.response.s3.jquery_ready.append(script)
settings.customise_req_req_resource = customise_req_req_resource
# -----------------------------------------------------------------------------
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Admin"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
# Uncomment to enable internal support requests
#("support", Storage(
# name_nice = T("Support"),
# #description = "Support Requests",
# restricted = True,
# module_type = None # This item is handled separately for the menu
# )),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 9, # 8th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Locations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 4
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Contacts"),
#description = "Human Resources Management",
restricted = True,
module_type = 3,
)),
#("vol", Storage(
# name_nice = T("Volunteers"),
# #description = "Human Resources Management",
# restricted = True,
# module_type = 2,
# )),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = 10,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = None,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Inventory"),
#description = "Receiving and Sending Items",
restricted = True,
module_type = 10
)),
#("proc", Storage(
# name_nice = T("Procurement"),
# #description = "Ordering & Purchasing of Goods & Services",
# restricted = True,
# module_type = 10
# )),
("asset", Storage(
name_nice = T("Assets"),
#description = "Recording and Assigning Assets",
restricted = True,
module_type = 10,
)),
# Vehicle depends on Assets
#("vehicle", Storage(
# name_nice = T("Vehicles"),
# #description = "Manage Vehicles",
# restricted = True,
# module_type = 10,
# )),
("req", Storage(
name_nice = T("Requests"),
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
restricted = True,
module_type = 1,
)),
("project", Storage(
name_nice = T("Projects"),
#description = "Tracking of Projects, Activities and Tasks",
restricted = True,
module_type = 10
)),
("assess", Storage(
name_nice = T("Assessments"),
#description = "Rapid Assessments & Flexible Impact Assessments",
restricted = True,
module_type = 5,
)),
("event", Storage(
name_nice = T("Events"),
#description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
restricted = True,
module_type = 10,
)),
("survey", Storage(
name_nice = T("Surveys"),
#description = "Create, enter, and manage surveys.",
restricted = True,
module_type = 5,
)),
#("cr", Storage(
# name_nice = T("Shelters"),
# #description = "Tracks the location, capacity and breakdown of victims in Shelters",
# restricted = True,
# module_type = 10
# )),
#("dvr", Storage(
# name_nice = T("Disaster Victim Registry"),
# #description = "Allow affected individuals & households to register to receive compensation and distributions",
# restricted = False,
# module_type = 10,
# )),
#("member", Storage(
# name_nice = T("Members"),
# #description = "Membership Management System",
# restricted = True,
# module_type = 10,
# )),
# @ToDo: Rewrite in a modern style
#("budget", Storage(
# name_nice = T("Budgeting Module"),
# #description = "Allows a Budget to be drawn up",
# restricted = True,
# module_type = 10
# )),
# @ToDo: Port these Assessments to the Survey module
#("building", Storage(
# name_nice = T("Building Assessments"),
# #description = "Building Safety Assessments",
# restricted = True,
# module_type = 10,
# )),
])
| 39.422772
| 187
| 0.470226
| 6,895
| 79,634
| 5.26454
| 0.130384
| 0.00843
| 0.011984
| 0.012783
| 0.393427
| 0.319761
| 0.283727
| 0.267941
| 0.245544
| 0.22907
| 0
| 0.009749
| 0.422922
| 79,634
| 2,019
| 188
| 39.442298
| 0.780133
| 0.206733
| 0
| 0.538405
| 0
| 0.000732
| 0.087764
| 0.012475
| 0
| 0
| 0
| 0.002972
| 0
| 0
| null | null | 0
| 0.037308
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
86d90c0ca6a5dbc266bca705498a4a9e3c8d3aac
| 721
|
py
|
Python
|
chroma-manager/tests/utils/__init__.py
|
GarimaVishvakarma/intel-chroma
|
fdf68ed00b13643c62eb7480754d3216d9295e0b
|
[
"MIT"
] | null | null | null |
chroma-manager/tests/utils/__init__.py
|
GarimaVishvakarma/intel-chroma
|
fdf68ed00b13643c62eb7480754d3216d9295e0b
|
[
"MIT"
] | null | null | null |
chroma-manager/tests/utils/__init__.py
|
GarimaVishvakarma/intel-chroma
|
fdf68ed00b13643c62eb7480754d3216d9295e0b
|
[
"MIT"
] | null | null | null |
import time
import datetime
import contextlib
@contextlib.contextmanager
def patch(obj, **attrs):
"Monkey patch an object's attributes, restoring them after the block."
stored = {}
for name in attrs:
stored[name] = getattr(obj, name)
setattr(obj, name, attrs[name])
try:
yield
finally:
for name in stored:
setattr(obj, name, stored[name])
@contextlib.contextmanager
def timed(msg='', threshold=0):
"Print elapsed time of a block, if over optional threshold."
start = time.time()
try:
yield
finally:
elapsed = time.time() - start
if elapsed >= threshold:
print datetime.timedelta(seconds=elapsed), msg
| 24.033333
| 74
| 0.629681
| 86
| 721
| 5.27907
| 0.488372
| 0.046256
| 0.118943
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001905
| 0.271845
| 721
| 29
| 75
| 24.862069
| 0.862857
| 0
| 0
| 0.32
| 0
| 0
| 0.174757
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.12
| null | null | 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
86dd7f5030a8b0c0b8c5d1166bbac51638b7d539
| 25,946
|
py
|
Python
|
opaflib/xmlast.py
|
feliam/opaf
|
f9908c26af1bf28cc29f3d647dcd9f55d631d732
|
[
"MIT"
] | 2
|
2019-11-23T14:46:35.000Z
|
2022-01-21T16:09:47.000Z
|
opaflib/xmlast.py
|
feliam/opaf
|
f9908c26af1bf28cc29f3d647dcd9f55d631d732
|
[
"MIT"
] | null | null | null |
opaflib/xmlast.py
|
feliam/opaf
|
f9908c26af1bf28cc29f3d647dcd9f55d631d732
|
[
"MIT"
] | 1
|
2019-09-06T21:04:39.000Z
|
2019-09-06T21:04:39.000Z
|
from lxml import etree
from opaflib.filters import defilterData
#Logging facility
import logging,code
logger = logging.getLogger("OPAFXML")
class PDFXML(etree.ElementBase):
''' Base pdf-xml class. Every pdf token xml representation will
have a span wich indicates where the original token layed in the file
'''
def _getspan(self):
return tuple([int(i) for i in self.get('span').split('~')])
def _setspan(self, value):
self.set('span',"%d~%d"%value)
def span_move(self,offset, recursive=True):
begin,end = self.span
self.span = (begin+offset,end+offset)
if recursive:
for child in self.getchildren():
child.span_move(offset)
def span_expand(self,span):
begin,end = self.span
self.span = (min(begin,span[0]),max(end,span[1]))
def clear_span(self, recursive=True):
del self.attrib['span']
for child in self.getchildren():
child.clear_span()
span = property(_getspan,_setspan)
def _to_xml(self):
return etree.tostring(self)
xml = property(_to_xml)
def _from_python(self, value):
self.from_python(value)
def _to_python(self):
return self.to_python()
value = property(_to_python,_from_python)
def __getattr__(self, name):
tags = set([e.tag for e in self])
if name in tags:
return self.xpath('./%s'%name)
return getattr(super(PDFXML,self),name)
def get_numgen(self):
''' Search the object and generation number of any pdf element '''
if self.tag.startswith('indirect'):
return self.id
else:
return self.getparent().get_numgen()
#leaf
class PDFString(PDFXML):
def from_python(self, value):
self.text = value.encode('string_escape')
def to_python(self):
return self.text.decode('string_escape')
class PDFName(PDFString):
pass
class PDFData(PDFString):
pass
class PDFBool(PDFString):
def from_python(self, value):
assert type(value) == bool, 'Value must be a boolean'
self.text = ['false','true'][int(value)]
def to_python(self):
return {'false': False, 'true': True}[self.text]
class PDFNull(PDFString):
def from_python(self, value):
assert value is None, 'Value must be None'
self.text = 'null'
def to_python(self):
assert self.text == 'null', 'PDFNull xml not initialized'
return None
class PDFR(PDFString):
def from_python(self, (n,g)):
assert type(n) == int and type(g) == int, 'R must be two numbers, n and g'
assert n >= 0 and n < 65535 , 'Invalid object number (%d)'%n
assert g >= 0 and g < 65535 , 'Invalid generation number (%d)'%g
self.text = "%d %d"%(n,g)
def to_python(self):
return tuple([int(i) for i in self.text.split(' ')])
def solve(self):
''' search the referenced indirect object in the containing pdf '''
pdf = self.xpath('/*')[0]
return pdf.getIndirectObject(self.value)
class PDFNumber(PDFXML):
def from_python(self, value):
assert type(value) in [int, float], 'Wrong type for a number'
self.text = str(value)
def to_python(self):
x = self.text
return float(int(float(x))) == float(x) and int(float(x)) or float(x)
class PDFStartxref(PDFString):
def from_python(self, value):
assert type(value) == int , 'Wrong type for startxref'
self.text = str(value).encode('string_escape')
def to_python(self):
return int(self.text.decode('string_escape'))
class PDFHeader(PDFString):
pass
#tree
class PDFEntry(PDFXML):
def to_python(self):
return tuple([e.value for e in self.getchildren()])
def _getkey(self):
return self[0]
def _setkey(self, key):
assert key.tag == 'name'
self[0] = key
key = property(_getkey,_setkey,None)
def _getval(self):
return self[1]
def _setval(self, val):
self[1] = val
val = property(_getval,_setval,None)
class PDFDictionary(PDFXML):
def to_python(self):
return dict([e.value for e in self.getchildren()])
def has_key(self,key):
return len(self.xpath('./entry/name[position()=1 and text()="%s"]'%key))>0
def __getitem__(self, i):
if str == type(i):
return self.xpath('./entry/name[position()=1 and text()="%s"]/../*[position()=2]'%i)[0]
return super(PDFDictionary,self).__getitem__(i)
def __delitem__(self, i):
if str == type(i):
return self.remove(self.xpath('./entry/name[position()=1 and text()="%s"]/..'%i)[0])
return super(PDFDictionary,self).__delitem__(i)
def __setitem__(self, key, val):
if str == type(key):
self.xpath('./entry/name[position()=1 and text()="%s"]/..'%key)[0].val=val
else:
super(PDFDictionary,self).__setitem__(key,val)
class PDFStream(PDFXML):
def to_python(self):
return {'dictionary':self[0].value, 'data':self[1].value}
def _getdictionary(self):
return self[0]
def _setdictionary(self, d):
assert key.tag == 'dictionary'
self[0] = d
dictionary = property(_getdictionary,_setdictionary,None)
def _getdata(self):
return self[1]
def _setdata(self, data):
assert data.tag == 'data'
self[1] = data
data = property(_getdata,_setdata,None)
def isFiltered(self):
''' Check if stream is filtered '''
return self.dictionary.has_key('Filter')
def getFilters(self):
val = self.dictionary.value
filters = val.get('Filter',None)
params = val.get('DecodeParams',None)
assert any([type(filters) == list and (type(params) == list or params==None ),
type(filters) != list and (type(params) == dict or params==None ) ]), 'Filter/DecodeParms wrong type'
if type(filters) != list:
filters=[filters]
params=params and [params] or [{}]
if params == None:
params = [{}]*len(filters)
assert all([type(x)==str for x in filters]), 'Filter shall be a names'
assert all([type(x)==dict for x in params]), 'Params should be a dictionary.. or null?'
assert len(filters) == len(params),'Number of Decodeparams should match Filters'
return zip(filters,params)
def popFilter(self):
dictionary = self.dictionary
assert dictionary.has_key('Filter'), 'Stream not Filtered!'
selected_filter = None
selected_params = None
deletion_list = []
if dictionary['Length'].value != len(self.data.value):
logger.info("Length field of object %s does not match the actual data size (%d != %d)"%(str(self.get_numgen()),dictionary['Length'].value,len(self.data.value)))
if type(dictionary['Filter']) == PDFArray:
selected_filter = dictionary['Filter'][0]
del dictionary['Filter'][0]
if dictionary.has_key('DecodeParms'):
assert dictionary['DecodeParms'] == PDFArray, 'Array of filters need array of decoding params'
selected_params = dictionary['DecodeParms'][0]
deletion_list.append((dictionary['DecodeParms'],0))
#del dictionary['DecodeParms'][0]
else:
selected_filter = dictionary['Filter']
del dictionary['Filter']
if dictionary.has_key('DecodeParms'):
selected_params = dictionary['DecodeParms']
deletion_list.append((dictionary, 'DecodeParms'))
#del dictionary['DecodeParms']
if dictionary.has_key('Filter') and \
type(dictionary['Filter']) == PDFArray and \
len(dictionary['Filter']) == 0:
deletion_list.append((dictionary, 'Filter'))
#del dictionary['Filter']
if dictionary.has_key('DecodeParms') and \
type(dictionary['DecodeParms']) == PDFArray and \
len(dictionary['DecodeParms']) == 0:
deletion_list.append((dictionary, 'DecodeParms'))
#del dictionary['DecodeParms']
#FIX recode defilterData .. make it register/unregister able.
#(think /Crypt 7.4.10 Crypt Filter )
self.data.value = defilterData(selected_filter.value,self.data.value, selected_params and selected_params.value or selected_params)
for v,i in deletion_list:
del v[i]
dictionary['Length'].value = len(self.data.value)
def defilter(self):
try:
while self.isFiltered():
self.popFilter()
except Exception,e:
logger.debug("Couldn't defilter <%s> stream (exception %s)."%(self.value,str(e)))
logger.info("Couldn't defilter <%s> stream."%str(self.get_numgen()))
def isObjStm(self):
''' Return true if this is an object stream (ObjStml) '''
return self.dictionary.has_key('Type') and self.dictionary['Type'].value == 'ObjStm'
def expandObjStm(self):
'''
This parses the ObjStm structure and replace it with all the new
indirect objects.
'''
from opaflib.parser import parse
assert not self.isFiltered(), "ObjStm should not be compressed at this point"
assert self.dictionary.has_key('N'), "N is mandatory in ObjStm dictionary"
assert self.dictionary.has_key('First'), "First is mandatory in ObjStm dictionary"
dictionary = self.dictionary
data = self.data.value
first = dictionary["First"].value
pointers = [int(x) for x in data[:first].split()]
assert len(pointers)%2 == 0 , "Wrong number of integer in the ObjStm begining"
pointers = dict([(pointers[i+1]+first,pointers[i]) for i in range(0,len(pointers),2) ])
positions = sorted(pointers.keys() + [len(data)])
parsed_objects = []
for p in range(0,len(positions)-1):
logger.info("Adding new object %s from objectstream"%repr((pointers[positions[p]],0)))
io = PDF.indirect_object(parse('object', data[positions[p]:positions[p+1]]+" "))
io.id = (pointers[positions[p]],0)
parsed_objects.append(io)
return parsed_objects
class PDFArray(PDFXML):
def to_python(self):
return [e.value for e in self]
class PDFIndirect(PDFXML):
def to_python(self):
assert len(self.getchildren())==1, "Wrong number of children in indirect object"
return (self.id, self.object.value)
def _getobject(self):
return self[0]
def _setobject(self, o):
self[0] = o
object = property(_getobject,_setobject,None)
def _getid(self):
return tuple([int(i) for i in self.get('id').split(' ')])
def _setid(self, o):
self.set('id', "%d %d"%o)
id = property(_getid,_setid,None)
def isStream(self):
return len(self.xpath('./stream'))==1
class PDFPdf(PDFXML):
def to_python(self):
return [e.value for e in self]
def getStartxref(self):
''' Get the last startxref pointer (should be at least one) '''
return self.pdf_update[-1].startxref[-1]
#FIX move all this to pdf_update and do the wrapper here
def getObjectAt(self, pos):
''' Get the object found at certain byte position '''
return self.xpath('//*[starts-with(@span,"%d~")]'%pos)[0]
def getTrailer(self, startxref=None):
''' Get the Trailer dictionary (should be at least one) '''
if startxref == None:
startxref = self.getStartxref().value
xref = self.getObjectAt(startxref)
assert xref.tag in ['xref', 'stream'] and xref[0].tag == 'dictionary'
return xref[0]
def getID(self, startxref=None):
''' Get the pdf ID from the trailer dictionary '''
trailer = self.getTrailer(startxref).value
if trailer.has_key('ID'):
return trailer['ID']
else:
return ['','']
def getIndirectObject(self, ref):
''' Search for an indirect object '''
for u in self.pdf_update:
if u.has_key(ref):
return u[ref]
def getRoot(self):
''' Get the pdf Root node. '''
return self.getIndirectObject(self.getTrailer()['Root'].value).object
def isEncrypted(self):
''' Return true if pdf is encrypted '''
return self.getTrailer().has_key('Encrypt')
def countObjStm(self):
''' Count number of 'compressed' object streams '''
return len(self.xpath('//stream/dictionary/entry/name[position()=1 and text()="Type"]/../name[position()=2 and text()="ObjStm"]/../../..'))
def countIObj(self):
''' Count number of 'compressed' object streams '''
return len(self.xpath('//indirect_object'))
def graph(xml_pdf,dot='default.dot'):
''' Generate a .dot graph of the pdf '''
dotdata = "digraph {\n"
nodes_added = set()
for io in self.pdf_update.indirect_object:
references = io.xpath(".//R")
orig = "%d %d"%io.id
if len(references) == 0:
dotdata += '\t"%s";\n'%x
nodes_added.add(orig)
else:
for r in references:
dest = "%d %d"%r.value
dotdata += '\t"%s" -> "%s";\n'%(orig, dest)
nodes_added.add(orig)
nodes_added.add(dest)
try:
root = "%d %d"%self.getRoot()
dotdata += '\t"trailer" -> "%s";\n'%root
except Exception,e :
pass
dotdata += '}\n'
logger.info("Writing graph to %s(a dot file). Download graphviz or try this http://rise4fun.com/Agl for render it."%dot)
file(dot,"w").write(dotdata)
def expandAllObjStm(self):
''' Find all object streams and expand them. Each ObjStm will be replaced
by its childs '''
for u in self.pdf_update:
for ref in u.findAllObjStm():
u.expandObjStm(ref)
def defilterAll(self):
''' Find all object streams and expand them. Each ObjStm will be replaced
by its childs '''
for u in self.pdf_update:
for io in u[:]:
if type(io) == PDFIndirect and io.isStream() and io.object.isFiltered():
io.object.defilter()
def decrypt(self):
''' This will try to decrypt V:4 null password encryption '''
import hashlib, struct
from Crypto.Cipher import AES
from Crypto.Util import randpool
import base64
def rc4crypt(data, key):
x = 0
box = range(256)
for i in range(256):
x = (x + box[i] + ord(key[i % len(key)])) % 256
box[i], box[x] = box[x], box[i]
x = 0
y = 0
out = []
for char in data:
x = (x + 1) % 256
y = (y + box[x]) % 256
box[x], box[y] = box[y], box[x]
out.append(chr(ord(char) ^ box[(box[x] + box[y]) % 256]))
return ''.join(out)
block_size = 16
key_size = 32
def encrypt(plain_text,key_bytes):
assert len(key_bytes) == key_size
mode = AES.MODE_CBC
pad = block_size - len(plain_text) % block_size
data = plain_text + pad * chr(pad)
iv_bytes = randpool.RandomPool(512).get_bytes(block_size)
encrypted_bytes = iv_bytes + AES.new(key_bytes, mode, iv_bytes).encrypt(data)
return encrypted_bytes
def decrypt(encrypted_bytes,key_bytes):
#assert len(key_bytes) == key_size
mode = AES.MODE_CBC
iv_bytes = encrypted_bytes[:block_size]
plain_text = AES.new(key_bytes, mode, iv_bytes).decrypt(encrypted_bytes[block_size:])
pad = ord(plain_text[-1])
return plain_text[:-pad]
assert self.isEncrypted()
#Get and print the encryption dictionary
encrypt = self.getTrailer()['Encrypt'].solve().object
print "It's ENCRYPTED!"
encrypt_py = encrypt.value
print encrypt_py
#Ok try to decrypt it ...
assert encrypt_py['V'] == 4, "Sorry only Version 4 supported"
assert encrypt_py['R'] == 4, "Sorry only Version 4 supported"
#password length
n = encrypt_py['Length']/8
print "N:",n
#a) Pad or truncate the password string to exactly 32 bytes.
user_password = ""
pad = "28BF4E5E4E758A4164004E56FFFA01082E2E00B6D0683E802F0CA9FE6453697A".decode('hex')
print "PASSWORD: ", user_password.encode('hex')
print "PAD: ", pad.encode('hex')
#b) Initialize the MD5 hash function and pass the result of step (a) as input to this function.
m = hashlib.md5()
m.update((user_password+pad)[:32])
print "MD5 update 1", ((user_password+pad)[:32]).encode('hex')
#c) Pass the value of the encryption dictionary's O entry to the MD5 hash function.
m.update (encrypt_py['O'][:32])
print "MD5 update 2", (encrypt_py['O'][:32]).encode('hex')
#d) Convert the integer value of the P entry to a 32-bit unsigned binary number and pass these bytes to the
# MD5 hash function, low-order byte first. WTF!!??
print "MD5 update 3", struct.pack("<L", 0xffffffff&encrypt_py['P']).encode('hex')
m.update (struct.pack("<L", 0xffffffff&encrypt_py['P'] ))
#e) append ID ?
#TODO, get the ID from the trailer..
ID = ''
m.update (ID)
print "MD5 update 4", ID.encode('hex')
#f) If document metadata is not being encrypted, pass 4 bytes with the value 0xFFFFFFFF to the MD5 hash function.
if encrypt_py.has_key('EncryptMetadata') and encrypt_py['EncryptMetadata'] == false:
m.update('\xff'*4)
print "MD5 update 5", ('\xff'*4).encode('hex')
print "1rst DIGEST:", m.digest().encode('hex')
h = m.digest()[:n]
for i in range(0,50):
h = hashlib.md5(h[:n]).digest()
print "Encryption KEY(%d)"%i, h.encode('hex')
key = h[:n]
print "Encryption KEY", key.encode('hex')
print "Try to authenticate"
_buf = hashlib.md5(pad + ID).digest()
print "MD5(padding+ID):",_buf.encode('hex')
for i in range(0,20):
_key = ''.join([chr(ord(k)^i) for k in list(key)])
_buf1 = rc4crypt(_buf,_key)
print "RC4 iter(%d) Encrypt data <%s> with key <%s> and it gives data <%s>"%(i,_buf.encode('hex'),_key.encode('hex'),_buf1.encode('hex'))
_buf = _buf1
assert _buf == encrypt_py['U'][:16]
print "Authenticated! (An actual pass is not needed. Using null pass '' )"
print "U", encrypt_py['U'].encode('hex')
print "O", encrypt_py['O'].encode('hex')
def decrypt_xml(xml_element):
n,g = xml_element.get_numgen()
m = hashlib.md5()
m.update(key)
m.update(chr(n&0xff))
m.update(chr((n>>8)&0xff))
m.update(chr((n>>16)&0xff))
m.update(chr(g&0xff))
m.update(chr((g>>8)&0xff))
m.update("sAlT")
real_key = m.digest()
pld = e.value
if pld.endswith("\x0d\x0a"):
pld = pld[:-2]
pld = decrypt(pld,real_key)
e.value=pld
#decrypt every string and stream in place...
for e in self.xpath('//stream/data'):
decrypt_xml(e)
for e in self.xpath('//string'):
decrypt_xml(e)
class PDFUpdate(PDFXML):
def to_python(self):
return dict([e.value for e in self.xpath('./indirect_object')])
def has_key(self,key):
key = "%d %d"%key
return len(self.xpath('./indirect_object[@id="%s"]'%key))>0
def __getitem__(self, key):
if tuple == type(key):
key = "%d %d"%key
return self.xpath('./indirect_object[@id="%s"]'%key)[0]
return super(PDFUpdate,self).__getitem__(key)
def __delitem__(self, key):
if tuple == type(key):
key = "%d %d"%key
return self.remove(self.xpath('./indirect_object[@id="%s"]'%key)[0])
return super(PDFUpdate,self).__delitem__(key)
def __setitem__(self, key, val):
if str == type(key):
self.xpath('./indirect_object[@obj="%s"]'%key)[0][:]=[val] #mmm
else:
super(PDFDictionary,self).__setitem__(key,val)
def getObjectAt(self, pos):
''' Get the object found at certain byte position (only in this update!)'''
return self.xpath('.//*[starts-with(@span,"%d~")]'%pos)[0]
def getTrailer(self, startxref=None):
''' Get the Trailer dictionary (of this update!)'''
if startxref == None:
startxref = self.getStartxref().value
xref = self.getObjectAt(startxref)
return xref.dictionary
def getRoot(self):
''' Get the pdf Root node of this update. '''
return self[self.getTrailer()['Root'].value].object
def countObjStm(self):
''' Count number of 'compressed' object streams '''
return len(self.xpath('.//stream/dictionary/entry/name[position()=1 and text()="Type"]/../name[position()=2 and text()="ObjStm"]/../../..'))
def expandObjStm(self, ref):
io_objstm = self[ref]
assert io_objstm.object.dictionary['Type'].value == 'ObjStm'
#completelly defilter the object stream
while io_objstm.object.isFiltered():
io_objstm.object.popFilter()
#parse the indirect simpe objects inside it
expanded_iobjects = io_objstm.object.expandObjStm()
#replace the object stream by its childs
for new_io in expanded_iobjects:
io_objstm.addnext(new_io)
self.remove(io_objstm)
def findAllObjStm(self):
''' Search 'compressed' object streams ids/refs'''
return [io.id for io in self.xpath('.//stream/dictionary/entry/name[position()=1 and text()="Type"]/../name[position()=2 and text()="ObjStm"]/../../../..')]
def expandAllObjStm(self):
for ref in self.findAllObjStm():
self.expandObjStm(ref)
#Factory
class PDFXMLFactory():
def __init__(self):
self.parser = etree.XMLParser()
fallback = etree.ElementDefaultClassLookup(PDFXML)
lookup = etree.ElementNamespaceClassLookup(fallback)
namespace = lookup.get_namespace(None)
#leafs
namespace['name'] = PDFName
namespace['string'] = PDFString
namespace['number'] = PDFNumber
namespace['null'] = PDFNull
namespace['bool'] = PDFBool
namespace['R'] = PDFR
namespace['header'] = PDFHeader
namespace['startxref'] = PDFStartxref
namespace['data'] = PDFData
#trees
namespace['entry'] = PDFEntry
namespace['dictionary'] = PDFDictionary
namespace['stream'] = PDFStream
namespace['pdf'] = PDFPdf
namespace['pdf_update'] = PDFUpdate
namespace['indirect_object'] = PDFIndirect
namespace['array'] = PDFArray
self.parser.set_element_class_lookup(lookup)
#leaf
def create_leaf(self, tag, value,**attribs):
assert tag in ['number','string','name','R','startxref','header','data','null','bool'], "Got wrong leaf tag: %s"%tag
xml = self.parser.makeelement(tag)
xml.value=value
xml.span=attribs.setdefault('span', (0xffffffff,-1))
del attribs['span']
for attr_key, attr_val in attribs.items():
xml.set(attr_key, str(attr_val))
return xml
#Tree
def create_tree(self, tag, *childs, **attribs):
assert tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update'], "Got wrong tree tag: %s"%tag
xml = self.parser.makeelement(tag)
xml.span=attribs.setdefault('span', (0xffffffff,-1))
del attribs['span']
for attr_key, attr_val in attribs.items():
xml.set(attr_key, str(attr_val))
for child in childs:
xml.append(child)
return xml
def __getattr__(self,tag, *args,**kwargs):
if tag in ['number','string','name','R','startxref','header','data','null','bool']:
return lambda payload, **my_kwargs: self.create_leaf(tag, payload, **my_kwargs)
elif tag in ['indirect_object','dictionary', 'entry', 'array', 'stream', 'xref', 'pdf', 'pdf_update']:
return lambda payload, **my_kwargs: self.create_tree(tag, *payload, **my_kwargs)
return super(PDFXMLFactory,self).__getattr__(tag,*args,**kwargs)
PDF = PDFXMLFactory()
def create_leaf(tag, value, **kwargs):
return PDF.create_leaf(tag, value,**kwargs)
def create_tree(tag, childs, **kwargs):
return PDF.create_tree(tag, *childs, **kwargs)
if __name__=="__main__":
name = create_leaf('name', "Name")
string = create_leaf('string', "Felipe")
entry = create_tree('entry',[name,string])
dictionary = create_tree('dictionary',[entry])
stream_data = create_leaf('data',"A"*100)
stream = create_tree('stream',[dictionary,stream_data])
indirect = create_tree('indirect_object', [stream], obj=(1,0))
array = create_tree('array', [create_leaf('number', i) for i in range(0,10)])
xml=indirect
print etree.tostring(xml), xml.value
import code
code.interact(local=locals())
| 37.332374
| 172
| 0.582248
| 3,230
| 25,946
| 4.57678
| 0.141486
| 0.015558
| 0.010417
| 0.014206
| 0.342826
| 0.282081
| 0.235406
| 0.207536
| 0.178584
| 0.154908
| 0
| 0.012456
| 0.279041
| 25,946
| 694
| 173
| 37.386167
| 0.777825
| 0.04413
| 0
| 0.206287
| 0
| 0.009823
| 0.13935
| 0.029108
| 0
| 0
| 0.00258
| 0.001441
| 0.060904
| 0
| null | null | 0.017682
| 0.017682
| null | null | 0.039293
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
86f0c522be62919400b4d5f2f8a78d4b4a38dcb9
| 399
|
py
|
Python
|
scripts/make_gene_table.py
|
lmdu/bioinfo
|
4542b0718410d15f3956c6545d9824a16608e02b
|
[
"MIT"
] | null | null | null |
scripts/make_gene_table.py
|
lmdu/bioinfo
|
4542b0718410d15f3956c6545d9824a16608e02b
|
[
"MIT"
] | null | null | null |
scripts/make_gene_table.py
|
lmdu/bioinfo
|
4542b0718410d15f3956c6545d9824a16608e02b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
descripts = {}
with open('macaca_genes.txt') as fh:
fh.readline()
for line in fh:
cols = line.strip('\n').split('\t')
if cols[1]:
descripts[cols[0]] = cols[1].split('[')[0].strip()
else:
descripts[cols[0]] = cols[1]
with open('gene_info.txt') as fh:
for line in fh:
cols = line.strip().split('\t')
cols.append(descripts[cols[1]])
print "\t".join(cols)
| 19.95
| 53
| 0.611529
| 66
| 399
| 3.666667
| 0.454545
| 0.082645
| 0.057851
| 0.090909
| 0.355372
| 0.198347
| 0.198347
| 0
| 0
| 0
| 0
| 0.020958
| 0.162907
| 399
| 19
| 54
| 21
| 0.703593
| 0.050125
| 0
| 0.142857
| 0
| 0
| 0.100529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
86f7b299e6e411fb0020928642f34720d9448cf2
| 301
|
py
|
Python
|
python_for_everybody/py2_p4i_old/6.5findslicestringextract.py
|
timothyyu/p4e-prac
|
f978b71ce147b6e9058372929f2666c2e67d0741
|
[
"BSD-3-Clause"
] | null | null | null |
python_for_everybody/py2_p4i_old/6.5findslicestringextract.py
|
timothyyu/p4e-prac
|
f978b71ce147b6e9058372929f2666c2e67d0741
|
[
"BSD-3-Clause"
] | null | null | null |
python_for_everybody/py2_p4i_old/6.5findslicestringextract.py
|
timothyyu/p4e-prac
|
f978b71ce147b6e9058372929f2666c2e67d0741
|
[
"BSD-3-Clause"
] | 1
|
2020-04-18T16:09:04.000Z
|
2020-04-18T16:09:04.000Z
|
# 6.5 Write code using find() and string slicing (see section 6.10) to extract
# the number at the end of the line below.
# Convert the extracted value to a floating point number and print it out.
text = "X-DSPAM-Confidence: 0.8475";
pos = text.find(':')
text = float(text[pos+1:])
print text
| 27.363636
| 79
| 0.697674
| 53
| 301
| 3.962264
| 0.735849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045267
| 0.192691
| 301
| 11
| 80
| 27.363636
| 0.81893
| 0.637874
| 0
| 0
| 0
| 0
| 0.283019
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8101825b7fae5806f4a1d2d670c101bc508918db
| 5,681
|
py
|
Python
|
modules/documents.py
|
rotsee/protokollen
|
a001a1db86df57adcf5c53c95c4c2fae426340f1
|
[
"MIT",
"Apache-2.0",
"CC0-1.0",
"Unlicense"
] | 4
|
2015-03-22T20:23:36.000Z
|
2015-12-09T14:31:34.000Z
|
modules/documents.py
|
rotsee/protokollen
|
a001a1db86df57adcf5c53c95c4c2fae426340f1
|
[
"MIT",
"Apache-2.0",
"CC0-1.0",
"Unlicense"
] | 4
|
2015-03-24T10:42:00.000Z
|
2016-06-21T08:44:01.000Z
|
modules/documents.py
|
rotsee/protokollen
|
a001a1db86df57adcf5c53c95c4c2fae426340f1
|
[
"MIT",
"Apache-2.0",
"CC0-1.0",
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""This module contains classes for documents, and lists of documents.
Documents are defined by the document rules in settings.py
A file can contain one or more document. However, a document can
not be constructed from more than one file. This is a limitation,
obvious in cases like Gotlands kommun, where meeting minutes are
split up in a large number of files.
"""
import settings
from modules.utils import make_unicode, last_index
from modules.extractors.documentBase import ExtractionNotAllowed
document_headers = {
"Content-Type": "text/plain",
"Content-Disposition": "attachment",
"Cache-Control": "public"
}
class DocumentList(object):
"""Contains a list of documents, extracted from a file.
"""
def __init__(self, extractor):
"""Create a list of documents, using `extractor`
"""
self._documents = []
page_types_and_dates = []
"""Keep track of documents by type and date, to be able to merge
documents depending on `settings.document_type_settings`
"""
# Loop through pages, and add pages of the same type and date together
last_page_type = None
last_page_date = None
documents = []
try:
for page in extractor.get_next_page():
temp_doc = Document(page, extractor)
if (len(documents) > 0 and
temp_doc.type_ == last_page_type and
temp_doc.date == last_page_date):
documents[-1].merge_with(temp_doc)
else:
documents.append(temp_doc)
page_types_and_dates.append((temp_doc.type_, temp_doc.date))
last_page_type = temp_doc.type_
last_page_date = temp_doc.date
except ExtractionNotAllowed:
raise ExtractionNotAllowed
# merge documents, if disallow_infixes == True
doc_settings = settings.document_type_settings
disallow_infixes = [d for d in doc_settings
if doc_settings[d]["disallow_infixes"] is True]
"""Document types that disallow holes"""
num_docs = len(page_types_and_dates)
i = 0
while i < num_docs:
(type_, date) = page_types_and_dates[i]
last_match = last_index(page_types_and_dates, (type_, date))
if type_ in disallow_infixes and last_match > i:
num_docs_to_merge = last_match - i + 1
new_doc = documents.pop(0)
for j in range(i, last_match):
new_doc.merge_with(documents.pop(0))
self._documents.append(new_doc)
i += num_docs_to_merge
else:
doc_to_merge = documents.pop(0)
self._documents.append(doc_to_merge)
i += 1
def get_next_document(self):
for document in self._documents:
yield document
def __len__(self):
"""len is the number of documents"""
return len(self._documents)
class Document(object):
"""Represents a single document
"""
text = ""
header = ""
date = None
type_ = None
def __init__(self, page, extractor):
"""Create a document stub from a page. Use add_page
to keep extending this document.
"""
self.text = page.get_text()
self.header = page.get_header() or extractor.get_header()
self.date = page.get_date() or extractor.get_date()
self.type_ = self.get_document_type()
self.date = page.get_date() or extractor.get_date()
def append_page(self, page):
"""Append content from a page to this document.
"""
pass
def append_text(self, text):
"""Append content to this document.
"""
self.text += text
def merge_with(self, document):
"""Merge this document with another one"""
try:
self.text += document.text
except UnicodeDecodeError:
self.text = make_unicode(self.text) + make_unicode(document.text)
def __len__(self):
"""len is the length of the total plaintext"""
return len(self.text)
def get_document_type(self):
"""
Return the first matching document type, based on this
header text.
"""
for document_type in settings.document_rules:
if self.parse_rules(document_type[1], self.header):
return document_type[0]
return None
def parse_rules(self, tuple_, header):
"""Parse document rules. See settings.py for syntax"""
rule_key = tuple_[0].upper()
rule_val = tuple_[1]
header = header.upper()
# --------- Logical separators --------
if rule_key == "AND":
hit = True
for rule in rule_val:
hit = hit and self.parse_rules(rule, header)
return hit
elif rule_key == "OR":
hit = False
for rule in rule_val:
hit = hit or self.parse_rules(rule, header)
return hit
elif rule_key == "NOT":
hit = not self.parse_rules(rule_val, header)
return hit
# -------------- Rules ----------------
elif rule_key == "HEADER_CONTAINS":
try:
pos = make_unicode(header).find(rule_val.upper())
except UnicodeDecodeError:
pos = -1
return pos > -1
if __name__ == "__main__":
print "This module is only intended to be called from other scripts."
import sys
sys.exit()
| 33.417647
| 80
| 0.582996
| 686
| 5,681
| 4.603499
| 0.246356
| 0.019949
| 0.018999
| 0.026916
| 0.141862
| 0.096897
| 0.065231
| 0.051298
| 0.051298
| 0.027866
| 0
| 0.003908
| 0.324415
| 5,681
| 169
| 81
| 33.615385
| 0.818916
| 0.037141
| 0
| 0.149533
| 0
| 0
| 0.041942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.009346
| 0.037383
| null | null | 0.009346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
811898bc6c0124ca8489662af03fc5f7195a1876
| 5,191
|
py
|
Python
|
octopart/scrape_octopart.py
|
nicholaschiang/dl-datasheets
|
1c5ab2545a85c1ea7643fc655005259544617d90
|
[
"MIT"
] | null | null | null |
octopart/scrape_octopart.py
|
nicholaschiang/dl-datasheets
|
1c5ab2545a85c1ea7643fc655005259544617d90
|
[
"MIT"
] | null | null | null |
octopart/scrape_octopart.py
|
nicholaschiang/dl-datasheets
|
1c5ab2545a85c1ea7643fc655005259544617d90
|
[
"MIT"
] | 1
|
2019-12-07T20:13:06.000Z
|
2019-12-07T20:13:06.000Z
|
#! /usr/bin/env python
import sys
import json
import urllib
import urllib2
import time
import argparse
import re
# Category ID for Discrete Semiconductors > Transistors > BJTs
TRANSISTOR_ID = b814751e89ff63d3
def find_total_hits(search_query):
"""
Function: find_total_hits
--------------------
Returns the number of hits that correspond to the search query.
"""
url = "http://octopart.com/api/v3/categories/"
# NOTE: Use your API key here (https://octopart.com/api/register)
url += "?apikey=09b32c6c"
args = [
('q', search_query),
('start', 0),
('limit', 1), #change to increase number of datasheets
('include[]','datasheets')
]
url += '&' + urllib.urlencode(args)
data = urllib.urlopen(url).read() # perform a SearchRequest
search_response = json.loads(data) # Grab the SearchResponse
# return number of hits
return search_response['hits']
def download_datasheets(search_query):
"""
Function: download_datasheets
--------------------
Uses the OctoPart API to download all datasheets associated with a given
set of search keywords.
"""
MAX_RESULTS = 100
counter = 0
total_hits = find_total_hits(search_query)
# print number of hits
print "[info] Search Response Hits: %s" % (total_hits)
# Calculate how many multiples of 100s of hits there are
num_hundreds = total_hits / MAX_RESULTS
print "[info] Performing %s iterations of %s results." % (num_hundreds, MAX_RESULTS)
for i in range(num_hundreds+1):
url = "http://octopart.com/api/v3/parts/search"
# NOTE: Use your API key here (https://octopart.com/api/register)
url += "?apikey=09b32c6c"
args = [
('q', search_query),
('start', (i * MAX_RESULTS)),
('limit', MAX_RESULTS), # change to edit number of datasheets
('include[]','datasheets')
# ('include[]','specs'),
# ('include[]','descriptions')
]
url += '&' + urllib.urlencode(args)
data = urllib.urlopen(url).read() # perform a SearchRequest
search_response = json.loads(data) # Grab the SearchResponse
# Iterate through the SearchResults in the SearchResponse
if not search_response.get('results'):
print "[error] no results returned in outer loop: " + str(i)
continue
for result in search_response['results']:
part = result['item'] # Grab the Part in the SearchResult
print ("[info] %s_%s..." % (part['brand']['name'].replace(" ", ""), part['mpn'])),
sys.stdout.flush()
# Iterate through list of datasheets for the given part
for datasheet in part['datasheets']:
# Grab the Datasheet URL
pdflink = datasheet['url']
if pdflink is not None:
# Download the PDF
try:
response = urllib2.urlopen(pdflink)
except urllib2.HTTPError, err:
if err.code == 404:
print "[error] Page not found!...",
elif err.code == 403:
print "[error] Access Denied!...",
else:
print "[error] HTTP Error code ", err.code,
continue; # advance to next datasheet rather than crashing
try:
filename = re.search('([^/]*)\.[^.]*$', datasheet['url']).group(1)
except AttributeError:
continue; # skip to next datasheet rather than crashing
file = open("../datasheets/%s.pdf" % filename, 'w')
file.write(response.read())
file.close()
counter += 1 # Increment the counter of files downloaded
# NOTE: Not sure if this is necessary. Just a precaution.
time.sleep(0.4) # Limit ourselves to 3 HTTP Requests/second
print("DONE")
print("[info] %s Parts Completed." % MAX_RESULTS)
print("[info] COMPLETED: %s datasheets for the query were downloaded." % counter)
def parse_args():
"""
Function: parse_args
--------------------
Parse the arguments for the Octopart Datasheet Scraper
"""
# Define what commandline arguments can be accepted
parser = argparse.ArgumentParser()
parser.add_argument('query',metavar="\"SEARCH_KEYWORDS\"",
help="keywords to query in quotes (required)")
parser.add_argument('--version', action='version', version='%(prog)s 0.1.0')
args = parser.parse_args()
return args.query
# Main Function
if __name__ == "__main__":
reload(sys)
sys.setdefaultencoding('utf-8')
search_query = parse_args() # Parse commandline arguments
start_time = time.time()
print "[info] Download datasheets for %s" % search_query
download_datasheets(search_query)
finish_time = time.time()
print '[info] Took', finish_time - start_time, 'sec total.'
| 38.169118
| 97
| 0.571374
| 569
| 5,191
| 5.115993
| 0.363796
| 0.034009
| 0.019237
| 0.013054
| 0.231536
| 0.176572
| 0.138097
| 0.138097
| 0.138097
| 0.138097
| 0
| 0.014171
| 0.306685
| 5,191
| 135
| 98
| 38.451852
| 0.794665
| 0.199961
| 0
| 0.204545
| 0
| 0
| 0.191288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.079545
| null | null | 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
811ce2660d66f66cb91158b2b6a72ae00e0a02c5
| 3,904
|
py
|
Python
|
multidoc_mnb.py
|
dropofwill/author-attr-experiments
|
a90e2743591358a6253f3b3664f5e398517f84bc
|
[
"Unlicense"
] | 2
|
2015-01-06T12:53:39.000Z
|
2018-02-01T13:57:09.000Z
|
multidoc_mnb.py
|
dropofwill/author-attr-experiments
|
a90e2743591358a6253f3b3664f5e398517f84bc
|
[
"Unlicense"
] | null | null | null |
multidoc_mnb.py
|
dropofwill/author-attr-experiments
|
a90e2743591358a6253f3b3664f5e398517f84bc
|
[
"Unlicense"
] | null | null | null |
from sklearn import datasets
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.cross_validation import ShuffleSplit
from sklearn.cross_validation import Bootstrap
from sklearn.naive_bayes import MultinomialNB
from sklearn.grid_search import GridSearchCV
from scipy.stats import sem
from pprint import pprint
import numpy as np
import pylab as pl
import string
import matplotlib.pyplot as plt
# Calculates the mean of the scores with the standard deviation
def mean_sem(scores):
return ("Mean score: {0:.3f} (+/-{1:.3f})").format(np.mean(scores), sem(scores))
def test_docs(dir):
# Load documents
docs = datasets.load_files(container_path="../../sklearn_data/"+dir)
X, y = docs.data, docs.target
baseline = 1/float(len(list(np.unique(y))))
# Select Features via Bag of Words approach without stop words
#X = CountVectorizer(charset_error='ignore', stop_words='english', strip_accents='unicode', ).fit_transform(X)
X = TfidfVectorizer(charset_error='ignore', stop_words='english', analyzer='char', ngram_range=(2,4), strip_accents='unicode', sublinear_tf=True, max_df=0.5).fit_transform(X)
n_samples, n_features = X.shape
# sklearn's grid search
parameters = { 'alpha': np.logspace(-100,0,10)}
bv = Bootstrap(n_samples, n_iter=10, test_size=0.3, random_state=42)
mnb_gv = GridSearchCV(MultinomialNB(), parameters, cv=bv,)
#scores = cross_val_score(mnb_gv, X, y, cv=bv)
mnb_gv.fit(X, y)
mnb_gv_best_params = mnb_gv.best_params_.values()[0]
print mnb_gv.best_score_
print mnb_gv_best_params
# CV with Bootstrap
mnb = MultinomialNB(alpha=mnb_gv_best_params)
boot_scores = cross_val_score(mnb, X, y, cv=bv)
print mean_sem(boot_scores)
improvement = (mnb_gv.best_score_ - baseline) / baseline
rand_baseline.append(baseline)
test_results.append([mnb_gv.best_score_])
com_results.append(improvement)
sem_results.append(sem(boot_scores))
def graph(base_list, results_list, com_list, arange):
N=arange
base=np.array(base_list)
res=np.array(results_list)
com = np.array(com_list)
ind = np.arange(N) # the x locations for the groups
width = 0.3 # the width of the bars: can also be len(x) sequence
#fig, ax = plt.sublots()
p1 = plt.bar(ind, base, width, color='r')
p2 = plt.bar(ind+0.3, res, width, color='y')
p3 = plt.bar(ind+0.6, com, width, color='b')
plt.rcParams['figure.figsize'] = 10, 7.5
plt.rcParams['axes.grid'] = True
plt.gray()
plt.ylabel('Accuracy')
plt.title('AAAC Problem Accuracy')
plt.yticks(np.arange(0,3,30))
plt.xticks(np.arange(0,13,13))
#plt.set_xticks(('A','B','C','D','E','F','G','H','I','J','K','L','M'))
plt.legend( (p1[0], p2[0], p3[0]), ('Baseline', 'Algorithm', 'Improvement'))
plt.show()
rand_baseline = list()
test_results = list()
sem_results = list()
com_results = list()
#test_docs("problemA")
for i in string.uppercase[:13]:
test_docs("problem"+i)
#graph(rand_baseline,test_results,com_results,13)
import os
import time as tm
sub_dir = "Results/"
location = "multiDoc" + tm.strftime("%Y%m%d-%H%M%S") + ".txt"
with open(os.path.join(sub_dir, location), 'w') as myFile:
myFile.write(str(rand_baseline))
myFile.write("\n")
myFile.write(str(test_results))
myFile.write("\n")
myFile.write(str(sem_results))
myFile.write("\n")
myFile.write(str(com_results))
# CV with ShuffleSpit
'''
cv = ShuffleSplit(n_samples, n_iter=100, test_size=0.2, random_state=0)
test_scores = cross_val_score(mnb, X, y, cv=cv)
print np.mean(test_scores)
'''
# Single run through
'''
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
print X_train.shape
print y_train.shape
print X_test.shape
print y_test.shape
mnb = MultinomialNB().fit(X_train, y_train)
print mnb.score(X_test, y_test)
'''
| 27.111111
| 175
| 0.733863
| 636
| 3,904
| 4.323899
| 0.319182
| 0.018182
| 0.022909
| 0.037818
| 0.159273
| 0.104727
| 0.042909
| 0.018909
| 0
| 0
| 0
| 0.017748
| 0.119621
| 3,904
| 144
| 176
| 27.111111
| 0.782368
| 0.157018
| 0
| 0.04
| 0
| 0
| 0.072816
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.226667
| null | null | 0.053333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
81338229b9f75f52ae6ffcf7ef860588b32f5b97
| 3,915
|
py
|
Python
|
Harpe-website/website/contrib/communication/utils.py
|
Krozark/Harpe-Website
|
1038a8550d08273806c9ec244cb8157ef9e9101e
|
[
"BSD-2-Clause"
] | null | null | null |
Harpe-website/website/contrib/communication/utils.py
|
Krozark/Harpe-Website
|
1038a8550d08273806c9ec244cb8157ef9e9101e
|
[
"BSD-2-Clause"
] | null | null | null |
Harpe-website/website/contrib/communication/utils.py
|
Krozark/Harpe-Website
|
1038a8550d08273806c9ec244cb8157ef9e9101e
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import socket as csocket
from struct import pack,unpack
from website.contrib.communication.models import *
def enum(**enums):
return type('Enum', (), enums)
class Socket:
Dommaine = enum(IP=csocket.AF_INET,LOCAL=csocket.AF_UNIX)
Type = enum(TCP=csocket.SOCK_STREAM, UDP=csocket.SOCK_DGRAM)
Down = enum(SEND=0,RECIVE=1,BOTH=2)
NTW_WELCOM_MSG = "hello!\0"
NTW_ERROR_NO = 0
def __init__ (self,dommaine,type,protocole=0):
self.sock = csocket.socket(dommaine,type,protocole)
self.buffer = b""
self.status = 0
def connect(self,host,port):
self.sock.connect((host,port))
def verify_connexion(self):
code = 404
if self.receive() > 0:
msg = self._unpack_str()
if msg == self.NTW_WELCOM_MSG and self.status == self.NTW_ERROR_NO:
print "verify_connexion <%d : %s>" % (self.status,msg)
else:
print "verify_connexion <%d : %s>" % (self.status,msg)
self.clear()
return self.status
def _unpack_str(self):
i = 0
while self.buffer[i]!= '\0':
i+=1
i+=1
res = self.buffer[:i]
self.buffer = self.buffer[i:]
return res
def send(self):
size = len(self.buffer)
_size = pack('!Ih',size,self.status)
data = _size + self.buffer
sent = self.sock.send(data)
if sent == 0:
print "Connexion lost"
return False
return True
def receive(self):
recv = b''
recv = self.sock.recv(6)
if recv == b'':
print "Connexion lost"
return None
size,self.status = unpack('!Ih',recv)
self.buffer = self.sock.recv(size)
return len(recv) + len(self.buffer)
#Format C Type Python type Standard size
#x pad byte no value
#c char string of length 1
#b signed char integer 1
#B unsigned char integer 1
#? _Bool bool 1
#h short integer 2
#H unsigned short integer 2
#i int integer 4
#I unsigned int integer 4
#l long integer 4
#L unsigned long integer 4
#q long long integer 8
#Q unsigned long long integer 8
#f float float 4
#d double float 8
#s char[] string
#p char[] string
#P void * integer
def add(self,typ,*args):
self.buffer +=pack('!'+typ,*args)
def clear(self):
self.buffer = b""
self.status = 0
def call(self,ret_type,func_id,types="",*args):
if len(types) < len(args):
print "Wrong number of args/type"
return 0
self.clear()
self.add("i",func_id)
if types:
self.add(types,*args)
self.send()
size = self.receive()
if size:
if self.status != 0:
print "recive error code : %d" % self.status
else:
return unpack("!"+ret_type,self.buffer)[0]
return 0
def create_socket():
sock = Socket(Socket.Dommaine.IP,Socket.Type.TCP)
ser = HarpeServer.objects.filter(is_active=True)[:1]
if not ser:
return False
ser = ser[0]
sock.connect(ser.ip,ser.port)
if sock.verify_connexion() != sock.NTW_ERROR_NO:
print "An error occur"
return None
return sock
def send_AnalyseMgf_to_calc(analyseMfg):
sock = create_socket()
if not sock:
return False
data = analyseMfg.mgf.read() + '\0'
return sock.call("i",HarpeServer.FUNCTION_ID.ANALYSE,"i%ds" % (analyseMfg.mgf.size+1) ,analyseMfg.pk,data)
| 29.659091
| 110
| 0.527458
| 490
| 3,915
| 4.132653
| 0.283673
| 0.059259
| 0.014815
| 0.014815
| 0.059259
| 0.059259
| 0.059259
| 0.034568
| 0
| 0
| 0
| 0.016453
| 0.363474
| 3,915
| 131
| 111
| 29.885496
| 0.796148
| 0.203321
| 0
| 0.230769
| 0
| 0
| 0.055215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.032967
| null | null | 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
81368cbcf7560067152788c0a732e279491b5a68
| 7,884
|
py
|
Python
|
pydeap/feature_extraction/_time_domain_features.py
|
Wlgls/pyDEAP
|
b7cec369cedd4a69ea82bc49a2fb8376260e4ad2
|
[
"Apache-2.0"
] | null | null | null |
pydeap/feature_extraction/_time_domain_features.py
|
Wlgls/pyDEAP
|
b7cec369cedd4a69ea82bc49a2fb8376260e4ad2
|
[
"Apache-2.0"
] | null | null | null |
pydeap/feature_extraction/_time_domain_features.py
|
Wlgls/pyDEAP
|
b7cec369cedd4a69ea82bc49a2fb8376260e4ad2
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
'''
@File :_time_domain_features.py
@Time :2021/04/16 20:02:55
@Author :wlgls
@Version :1.0
'''
import numpy as np
def statistics(data, combined=True):
"""Statistical features, include Power, Mean, Std, 1st differece, Normalized 1st difference, 2nd difference, Normalized 2nd difference.
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [13]: d.shape, l.shape
Out[13]: ((40, 32, 8064), (40, 1))
In [14]: statistics_feature(d).shape
Out[14]: (40, 32, 7)
"""
# Power
power = np.mean(data**2, axis=-1)
# Mean
ave = np.mean(data, axis=-1)
# Standard Deviation
std = np.std(data, axis=-1)
# the mean of the absolute values of 1st differece mean
diff_1st = np.mean(np.abs(np.diff(data,n=1, axis=-1)), axis=-1)
# the mean of the absolute values of Normalized 1st difference
normal_diff_1st = diff_1st / std
# the mean of the absolute values of 2nd difference mean
diff_2nd = np.mean(np.abs(data[..., 2:] - data[..., :-2]), axis=-1)
# the mean of the absolute values of Normalized 2nd difference
normal_diff_2nd = diff_2nd / std
# Features.append(np.concatenate((Power, Mean, Std, diff_1st, normal_diff_1st, diff_2nd, normal_diff_2nd), axis=2))
f = np.stack((power, ave, std, diff_1st, normal_diff_1st, diff_2nd, normal_diff_2nd), axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def hjorth(data, combined=True):
"""Solving Hjorth features, include activity, mobility, complexity
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [15]: d.shape, l.shape
Out[15]: ((40, 32, 8064), (40, 1))
In [16]: hjorth_features(d).shape
Out[16]: (40, 32, 3)
"""
data = np.array(data)
ave = np.mean(data, axis=-1)[..., np.newaxis]
diff_1st = np.diff(data, n=1, axis=-1)
# print(diff_1st.shape)
diff_2nd = data[..., 2:] - data[..., :-2]
# Activity
activity = np.mean((data-ave)**2, axis=-1)
# print(Activity.shape)
# Mobility
varfdiff = np.var(diff_1st, axis=-1)
# print(varfdiff.shape)
mobility = np.sqrt(varfdiff / activity)
# Complexity
varsdiff = np.var(diff_2nd, axis=-1)
complexity = np.sqrt(varsdiff/varfdiff) / mobility
f = np.stack((activity, mobility, complexity), axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def higher_order_crossing(data, k=10, combined=True):
"""Solving the feature of hoc. Hoc is a high order zero crossing quantity.
Parameters
----------
data : array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
k : int, optional
Order, by default 10
Return
----------
nzc:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [4]: d, l = load_deap(path, 0)
In [5]: hoc(d, k=10).shape
Out[5]: (40, 32, 10)
In [6]: hoc(d, k=5).shape
Out[6]: (40, 32, 5)
"""
nzc = []
for i in range(k):
curr_diff = np.diff(data, n=i)
x_t = curr_diff >= 0
x_t = np.diff(x_t)
x_t = np.abs(x_t)
count = np.count_nonzero(x_t, axis=-1)
nzc.append(count)
f = np.stack(nzc, axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def sevcik_fd(data, combined=True):
"""Fractal dimension feature is solved, which is used to describe the shape information of EEG time series data. It seems that this feature can be used to judge the electrooculogram and EEG.The calculation methods include Sevcik, fractal Brownian motion, box counting, Higuchi and so on.
Sevcik method: fast calculation and robust analysis of noise
Higuchi: closer to the theoretical value than box counting
The Sevick method is used here because it is easier to implement
Parameters
----------
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [7]: d.shape, l.shape
Out[7]: ((40, 32, 8064), (40, 1))
In [8]: sevcik_fd(d).shape
Out[8]: (40, 32, 1)
"""
points = data.shape[-1]
x = np.arange(1, points+1)
x_ = x / np.max(x)
miny = np.expand_dims(np.min(data, axis=-1), axis=-1)
maxy = np.expand_dims(np.max(data, axis=-1), axis=-1)
y_ = (data-miny) / (maxy-miny)
L = np.expand_dims(np.sum(np.sqrt(np.diff(y_, axis=-1)**2 + np.diff(x_)**2), axis=-1), axis=-1)
f = 1 + np.log(L) / np.log(2 * (points-1))
# print(FD.shape)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
def calc_L(X, k, m):
"""
Return Lm(k) as the length of the curve.
"""
N = X.shape[-1]
n = np.floor((N-m)/k).astype(np.int64)
norm = (N-1) / (n*k)
ss = np.sum(np.abs(np.diff(X[..., m::k], n=1)), axis=-1)
Lm = (ss*norm) / k
return Lm
def calc_L_average(X, k):
"""
Return <L(k)> as the average value over k sets of Lm(k).
"""
calc_L_series = np.frompyfunc(lambda m: calc_L(X, k, m), 1, 1)
L_average = np.average(calc_L_series(np.arange(1, k+1)))
return L_average
def higuchi_fd(data, k_max, combined=True):
"""Fractal dimension feature is solved, which is used to describe the shape information of EEG time series data. It seems that this feature can be used to judge the electrooculogram and EEG.The calculation methods include Sevcik, fractal Brownian motion, box counting, Higuchi and so on.
Sevcik method: fast calculation and robust analysis of noise
Higuchi: closer to the theoretical value than box counting
The higuchi method is used here because it is easier to implement
Parameters
----------
Parameters
----------
data array
data, for DEAP dataset, It's shape may be (n_trials, n_channels, points)
Return
----------
f:
Solved feature, It's shape is similar to the shape of your input data.
e.g. for input.shape is (n_trials, n_channels, points), the f.shape is (n_trials, n_channels, n_features)
Example
----------
In [7]: d.shape, l.shape
Out[7]: ((40, 32, 8064), (40, 1))
In [8]: higuchi_fd(dif combined:
f = f
return ).shape
Out[8]: (40, 32, 1)
"""
calc_L_average_series = np.frompyfunc(lambda k: calc_L_average(data, k), 1, 1)
k = np.arange(1, k_max+1)
L = calc_L_average_series(k)
L = np.stack(L, axis=-1)
fd = np.zeros(data.shape[:-1])
for ind in np.argwhere(L[..., 0]):
tmp = L[ind[0], ind[1], ind[2]]
D, _= np.polyfit(np.log2(k), np.log2(tmp), 1)
fd[ind[0], ind[1if combined:
f = f
return ], ind[2]] = - D
f = np.expand_dims(fd, axis=-1)
if combined:
f = f.reshape((*f.shape[:-2]))
return f
| 29.977186
| 291
| 0.597793
| 1,240
| 7,884
| 3.712097
| 0.164516
| 0.027156
| 0.02607
| 0.05214
| 0.560721
| 0.544645
| 0.525092
| 0.511623
| 0.511623
| 0.504454
| 0
| 0.038592
| 0.250634
| 7,884
| 262
| 292
| 30.091603
| 0.740521
| 0.064688
| 0
| 0.189873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.012658
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
8143df98ebce82100584c4d53ea2d04b4dccafa6
| 3,351
|
py
|
Python
|
experiments/rpi/gertboard/dtoa.py
|
willingc/pingo
|
0890bf5ed763e9061320093fc3fb5f7543c5cc2c
|
[
"MIT"
] | null | null | null |
experiments/rpi/gertboard/dtoa.py
|
willingc/pingo
|
0890bf5ed763e9061320093fc3fb5f7543c5cc2c
|
[
"MIT"
] | 1
|
2021-03-20T05:17:03.000Z
|
2021-03-20T05:17:03.000Z
|
experiments/rpi/gertboard/dtoa.py
|
willingc/pingo
|
0890bf5ed763e9061320093fc3fb5f7543c5cc2c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2.7
# Python 2.7 version by Alex Eames of http://RasPi.TV
# functionally equivalent to the Gertboard dtoa test by Gert Jan van Loo & Myra VanInwegen
# Use at your own risk - I'm pretty sure the code is harmless, but check it yourself.
# This will not work unless you have installed py-spidev as in the README.txt file
# spi must also be enabled on your system
import spidev
import sys
from time import sleep
board_type = sys.argv[-1]
# reload spi drivers to prevent spi failures
import subprocess
unload_spi = subprocess.Popen('sudo rmmod spi_bcm2708', shell=True, stdout=subprocess.PIPE)
start_spi = subprocess.Popen('sudo modprobe spi_bcm2708', shell=True, stdout=subprocess.PIPE)
sleep(3)
def which_channel():
channel = raw_input("Which channel do you want to test? Type 0 or 1.\n") # User inputs channel number
while not channel.isdigit(): # Check valid user input
channel = raw_input("Try again - just numbers 0 or 1 please!\n") # Make them do it again if wrong
return channel
spi = spidev.SpiDev()
spi.open(0,1) # The Gertboard DAC is on SPI channel 1 (CE1 - aka GPIO7)
channel = 3 # set initial value to force user selection
common = [0,0,0,160,240] # 2nd byte common to both channels
voltages = [0.0,0.5,1.02,1.36,2.04] # voltages for display
while not (channel == 1 or channel == 0): # channel is set by user input
channel = int(which_channel()) # continue asking until answer 0 or 1 given
if channel == 1: # once proper answer given, carry on
num_list = [176,180,184,186,191] # set correct channel-dependent list for byte 1
else:
num_list = [48,52,56,58,63]
print "These are the connections for the digital to analogue test:"
if board_type == "m":
print "jumper connecting GPIO 7 to CSB"
print "Multimeter connections (set your meter to read V DC):"
print " connect black probe to GND"
print " connect red probe to DA%d on D/A header" % channel
else:
print "jumper connecting GP11 to SCLK"
print "jumper connecting GP10 to MOSI"
print "jumper connecting GP9 to MISO"
print "jumper connecting GP7 to CSnB"
print "Multimeter connections (set your meter to read V DC):"
print " connect black probe to GND"
print " connect red probe to DA%d on J29" % channel
raw_input("When ready hit enter.\n")
for i in range(5):
r = spi.xfer2([num_list[i],common[i]]) #write the two bytes to the DAC
print "Your meter should read about %.2fV" % voltages[i]
raw_input("When ready hit enter.\n")
r = spi.xfer2([16,0]) # switch off channel A = 00010000 00000000 [16,0]
r = spi.xfer2([144,0]) # switch off channel B = 10010000 00000000 [144,0]
# The DAC is controlled by writing 2 bytes (16 bits) to it.
# So we need to write a 16 bit word to DAC
# bit 15 = channel, bit 14 = ignored, bit 13 =gain, bit 12 = shutdown, bits 11-4 data, bits 3-0 ignored
# You feed spidev a decimal number and it converts it to 8 bit binary
# each argument is a byte (8 bits), so we need two arguments, which together make 16 bits.
# that's what spidev sends to the DAC. If you need to delve further, have a look at the datasheet. :)
| 45.90411
| 110
| 0.664279
| 545
| 3,351
| 4.056881
| 0.46789
| 0.024876
| 0.04749
| 0.019901
| 0.150158
| 0.150158
| 0.150158
| 0.091361
| 0.091361
| 0.091361
| 0
| 0.063821
| 0.251865
| 3,351
| 72
| 111
| 46.541667
| 0.818109
| 0.411817
| 0
| 0.177778
| 0
| 0
| 0.341225
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.088889
| null | null | 0.288889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d4a0dbe903b46f2ac15b321d70b46c5431fada6b
| 4,932
|
py
|
Python
|
scripts/H5toXMF.py
|
robertsawko/proteus
|
6f1e4c2ca1af85a906b35a5162430006f0343861
|
[
"NASA-1.3"
] | null | null | null |
scripts/H5toXMF.py
|
robertsawko/proteus
|
6f1e4c2ca1af85a906b35a5162430006f0343861
|
[
"NASA-1.3"
] | null | null | null |
scripts/H5toXMF.py
|
robertsawko/proteus
|
6f1e4c2ca1af85a906b35a5162430006f0343861
|
[
"NASA-1.3"
] | null | null | null |
#import numpy
#import os
#from xml.etree.ElementTree import *
import tables
#from Xdmf import *
def H5toXMF(basename,size,start,finaltime,stride):
# Open XMF files
for step in range(start,finaltime+1,stride):
XMFfile = open(basename+"."+str(step)+".xmf","w")
XMFfile.write(r"""<?xml version="1.0" ?>
<!DOCTYPE Xdmf SYSTEM "Xdmf.dtd" []>
<Xdmf Version="2.0" xmlns:xi="http://www.w3.org/2001/XInclude">
<Domain>"""+"\n")
XMFfile.write(r' <Grid GridType="Collection" CollectionType="Spatial">'+"\n")
for proc in range(0,size):
filename="solution.p"+str(proc)+"."+str(step)+".h5"
print filename
f1 = tables.openFile(filename)
XMFfile.write (r'<Grid GridType="Uniform">'+"\n")
XMFfile.write(r' <Time Value="'+str(step)+'" />'+"\n")
for tmp in f1.root:
if tmp.name == "elements":
XMFfile.write (r'<Topology NumberOfElements="' +str(len(tmp[:]))+ '" Type="Tetrahedron">'+"\n")
XMFfile.write (r' <DataItem DataType="Int" Dimensions="' +str(len(tmp[:]))+ ' 4" Format="HDF">' + filename + ':/elements</DataItem>'+"\n")
XMFfile.write (r'</Topology>'+"\n")
if tmp.name == "nodes":
XMFfile.write (r'<Geometry Type="XYZ">'+"\n")
XMFfile.write (r' <DataItem DataType="Float" Dimensions="' +str(len(tmp[:]))+ ' 3" Format="HDF" Precision="8">' + filename + ':/nodes</DataItem>'+"\n")
XMFfile.write (r'</Geometry>'+"\n")
if tmp.name == "u":
XMFfile.write (r'<Attribute AttributeType="Scalar" Center="Node" Name="u">'+"\n")
XMFfile.write (r' <DataItem DataType="Float" Dimensions="' +str(len(tmp[:]))+ '" Format="HDF" Precision="8">' + filename + ':/u</DataItem>'+"\n")
XMFfile.write (r'</Attribute>'+"\n")
if tmp.name == "v":
XMFfile.write (r'<Attribute AttributeType="Scalar" Center="Node" Name="v">'+"\n")
XMFfile.write (r' <DataItem DataType="Float" Dimensions="' +str(len(tmp[:]))+ '" Format="HDF" Precision="8">' + filename + ':/v</DataItem>'+"\n")
XMFfile.write (r'</Attribute>'+"\n")
if tmp.name == "w":
XMFfile.write (r'<Attribute AttributeType="Scalar" Center="Node" Name="w">'+"\n")
XMFfile.write (r' <DataItem DataType="Float" Dimensions="' +str(len(tmp[:]))+ '" Format="HDF" Precision="8">' + filename + ':/w</DataItem>'+"\n")
XMFfile.write (r'</Attribute>'+"\n")
if tmp.name == "p":
XMFfile.write (r'<Attribute AttributeType="Scalar" Center="Node" Name="p">'+"\n")
XMFfile.write (r' <DataItem DataType="Float" Dimensions="' +str(len(tmp[:]))+ '" Format="HDF" Precision="8">' + filename + ':/p</DataItem>'+"\n")
XMFfile.write (r'</Attribute>'+"\n")
if tmp.name == "phid":
XMFfile.write (r'<Attribute AttributeType="Scalar" Center="Node" Name="phid">'+"\n")
XMFfile.write (r' <DataItem DataType="Float" Dimensions="' +str(len(tmp[:]))+ '" Format="HDF" Precision="8">' + filename + ':/phid</DataItem>'+"\n")
XMFfile.write (r'</Attribute>'+"\n")
f1.close()
XMFfile.write(' </Grid>'+"\n")
XMFfile.write(' </Grid>'+"\n")
XMFfile.write(' </Domain>'+"\n")
XMFfile.write(' </Xdmf>'+"\n")
XMFfile.close()
if __name__ == '__main__':
from optparse import OptionParser
usage = ""
parser = OptionParser(usage=usage)
parser.add_option("-n","--size",
help="number of processors for run",
action="store",
type="int",
dest="size",
default=1)
parser.add_option("-s","--stride",
help="stride for solution output",
action="store",
type="int",
dest="stride",
default=0)
parser.add_option("-t","--finaltime",
help="finaltime",
action="store",
type="int",
dest="finaltime",
default=1000)
parser.add_option("-f","--filebase_flow",
help="base name for storage files",
action="store",
type="string",
dest="filebase",
default="solution")
(opts,args) = parser.parse_args()
start = 0
if opts.stride == 0 :
start = opts.finaltime
opts.stride = 1
H5toXMF(opts.filebase,opts.size,start,opts.finaltime,opts.stride)
| 42.153846
| 172
| 0.491484
| 507
| 4,932
| 4.753452
| 0.242604
| 0.144398
| 0.134855
| 0.092946
| 0.518257
| 0.417427
| 0.385892
| 0.372614
| 0.372614
| 0.258506
| 0
| 0.010098
| 0.317315
| 4,932
| 116
| 173
| 42.517241
| 0.705673
| 0.018045
| 0
| 0.166667
| 0
| 0.011905
| 0.336435
| 0.036601
| 0.202381
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.02381
| null | null | 0.011905
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d4b1cf0c1cabef461b1902ca1dbcbf5165c73bc9
| 45,496
|
py
|
Python
|
rpython/memory/test/test_transformed_gc.py
|
jptomo/pypy-lang-scheme
|
55edb2cec69d78f86793282a4566fcbc1ef9fcac
|
[
"MIT"
] | 1
|
2019-11-25T10:52:01.000Z
|
2019-11-25T10:52:01.000Z
|
rpython/memory/test/test_transformed_gc.py
|
jptomo/pypy-lang-scheme
|
55edb2cec69d78f86793282a4566fcbc1ef9fcac
|
[
"MIT"
] | null | null | null |
rpython/memory/test/test_transformed_gc.py
|
jptomo/pypy-lang-scheme
|
55edb2cec69d78f86793282a4566fcbc1ef9fcac
|
[
"MIT"
] | null | null | null |
import py
import inspect
from rpython.rlib.objectmodel import compute_hash, compute_identity_hash
from rpython.translator.c import gc
from rpython.annotator import model as annmodel
from rpython.rtyper.llannotation import SomePtr
from rpython.rtyper.lltypesystem import lltype, llmemory, rffi, llgroup
from rpython.memory.gctransform import framework, shadowstack
from rpython.rtyper.lltypesystem.lloperation import llop, void
from rpython.rlib.objectmodel import compute_unique_id, we_are_translated
from rpython.rlib.debug import ll_assert
from rpython.rlib import rgc
from rpython.conftest import option
from rpython.rlib.rstring import StringBuilder
from rpython.rlib.rarithmetic import LONG_BIT
WORD = LONG_BIT // 8
def rtype(func, inputtypes, specialize=True, gcname='ref',
backendopt=False, **extraconfigopts):
from rpython.translator.translator import TranslationContext
t = TranslationContext()
# XXX XXX XXX mess
t.config.translation.gc = gcname
t.config.translation.gcremovetypeptr = True
t.config.set(**extraconfigopts)
ann = t.buildannotator()
ann.build_types(func, inputtypes)
if specialize:
t.buildrtyper().specialize()
if backendopt:
from rpython.translator.backendopt.all import backend_optimizations
backend_optimizations(t)
if option.view:
t.viewcg()
return t
ARGS = lltype.FixedSizeArray(lltype.Signed, 3)
class GCTest(object):
gcpolicy = None
GC_CAN_MOVE = False
taggedpointers = False
def setup_class(cls):
cls.marker = lltype.malloc(rffi.CArray(lltype.Signed), 1,
flavor='raw', zero=True)
funcs0 = []
funcs2 = []
cleanups = []
name_to_func = {}
mixlevelstuff = []
for fullname in dir(cls):
if not fullname.startswith('define'):
continue
definefunc = getattr(cls, fullname)
_, name = fullname.split('_', 1)
func_fixup = definefunc.im_func(cls)
cleanup = None
if isinstance(func_fixup, tuple):
func, cleanup, fixup = func_fixup
mixlevelstuff.append(fixup)
else:
func = func_fixup
func.func_name = "f_%s" % name
if cleanup:
cleanup.func_name = "clean_%s" % name
nargs = len(inspect.getargspec(func)[0])
name_to_func[name] = len(funcs0)
if nargs == 2:
funcs2.append(func)
funcs0.append(None)
elif nargs == 0:
funcs0.append(func)
funcs2.append(None)
else:
raise NotImplementedError(
"defined test functions should have 0/2 arguments")
# used to let test cleanup static root pointing to runtime
# allocated stuff
cleanups.append(cleanup)
def entrypoint(args):
num = args[0]
func = funcs0[num]
if func:
res = func()
else:
func = funcs2[num]
res = func(args[1], args[2])
cleanup = cleanups[num]
if cleanup:
cleanup()
return res
from rpython.translator.c.genc import CStandaloneBuilder
s_args = SomePtr(lltype.Ptr(ARGS))
t = rtype(entrypoint, [s_args], gcname=cls.gcname,
taggedpointers=cls.taggedpointers)
for fixup in mixlevelstuff:
if fixup:
fixup(t)
cbuild = CStandaloneBuilder(t, entrypoint, config=t.config,
gcpolicy=cls.gcpolicy)
db = cbuild.generate_graphs_for_llinterp()
entrypointptr = cbuild.getentrypointptr()
entrygraph = entrypointptr._obj.graph
if option.view:
t.viewcg()
cls.name_to_func = name_to_func
cls.entrygraph = entrygraph
cls.rtyper = t.rtyper
cls.db = db
def runner(self, name, transformer=False):
db = self.db
name_to_func = self.name_to_func
entrygraph = self.entrygraph
from rpython.rtyper.llinterp import LLInterpreter
llinterp = LLInterpreter(self.rtyper)
gct = db.gctransformer
if self.__class__.__dict__.get('_used', False):
teardowngraph = gct.frameworkgc__teardown_ptr.value._obj.graph
llinterp.eval_graph(teardowngraph, [])
self.__class__._used = True
# FIIIIISH
setupgraph = gct.frameworkgc_setup_ptr.value._obj.graph
# setup => resets the gc
llinterp.eval_graph(setupgraph, [])
def run(args):
ll_args = lltype.malloc(ARGS, immortal=True)
ll_args[0] = name_to_func[name]
for i in range(len(args)):
ll_args[1+i] = args[i]
res = llinterp.eval_graph(entrygraph, [ll_args])
return res
if transformer:
return run, gct
else:
return run
class GenericGCTests(GCTest):
GC_CAN_SHRINK_ARRAY = False
def define_instances(cls):
class A(object):
pass
class B(A):
def __init__(self, something):
self.something = something
def malloc_a_lot():
i = 0
first = None
while i < 10:
i += 1
a = somea = A()
a.last = first
first = a
j = 0
while j < 30:
b = B(somea)
b.last = first
j += 1
return 0
return malloc_a_lot
def test_instances(self):
run = self.runner("instances")
run([])
def define_llinterp_lists(cls):
def malloc_a_lot():
i = 0
while i < 10:
i += 1
a = [1] * 10
j = 0
while j < 30:
j += 1
a.append(j)
return 0
return malloc_a_lot
def test_llinterp_lists(self):
run = self.runner("llinterp_lists")
run([])
def define_llinterp_tuples(cls):
def malloc_a_lot():
i = 0
while i < 10:
i += 1
a = (1, 2, i)
b = [a] * 10
j = 0
while j < 20:
j += 1
b.append((1, j, i))
return 0
return malloc_a_lot
def test_llinterp_tuples(self):
run = self.runner("llinterp_tuples")
run([])
def define_llinterp_dict(self):
class A(object):
pass
def malloc_a_lot():
i = 0
while i < 10:
i += 1
a = (1, 2, i)
b = {a: A()}
j = 0
while j < 20:
j += 1
b[1, j, i] = A()
return 0
return malloc_a_lot
def test_llinterp_dict(self):
run = self.runner("llinterp_dict")
run([])
def skipdefine_global_list(cls):
gl = []
class Box:
def __init__(self):
self.lst = gl
box = Box()
def append_to_list(i, j):
box.lst.append([i] * 50)
llop.gc__collect(lltype.Void)
return box.lst[j][0]
return append_to_list, None, None
def test_global_list(self):
py.test.skip("doesn't fit in the model, tested elsewhere too")
run = self.runner("global_list")
res = run([0, 0])
assert res == 0
for i in range(1, 5):
res = run([i, i - 1])
assert res == i - 1 # crashes if constants are not considered roots
def define_string_concatenation(cls):
def concat(j, dummy):
lst = []
for i in range(j):
lst.append(str(i))
return len("".join(lst))
return concat
def test_string_concatenation(self):
run = self.runner("string_concatenation")
res = run([100, 0])
assert res == len(''.join([str(x) for x in range(100)]))
def define_nongc_static_root(cls):
T1 = lltype.GcStruct("C", ('x', lltype.Signed))
T2 = lltype.Struct("C", ('p', lltype.Ptr(T1)))
static = lltype.malloc(T2, immortal=True)
def f():
t1 = lltype.malloc(T1)
t1.x = 42
static.p = t1
llop.gc__collect(lltype.Void)
return static.p.x
def cleanup():
static.p = lltype.nullptr(T1)
return f, cleanup, None
def test_nongc_static_root(self):
run = self.runner("nongc_static_root")
res = run([])
assert res == 42
def define_finalizer(cls):
class B(object):
pass
b = B()
b.nextid = 0
b.num_deleted = 0
class A(object):
def __init__(self):
self.id = b.nextid
b.nextid += 1
def __del__(self):
b.num_deleted += 1
def f(x, y):
a = A()
i = 0
while i < x:
i += 1
a = A()
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
return b.num_deleted
return f
def test_finalizer(self):
run = self.runner("finalizer")
res = run([5, 42]) #XXX pure lazyness here too
assert res == 6
def define_finalizer_calls_malloc(cls):
class B(object):
pass
b = B()
b.nextid = 0
b.num_deleted = 0
class AAA(object):
def __init__(self):
self.id = b.nextid
b.nextid += 1
def __del__(self):
b.num_deleted += 1
C()
class C(AAA):
def __del__(self):
b.num_deleted += 1
def f(x, y):
a = AAA()
i = 0
while i < x:
i += 1
a = AAA()
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
return b.num_deleted
return f
def test_finalizer_calls_malloc(self):
run = self.runner("finalizer_calls_malloc")
res = run([5, 42]) #XXX pure lazyness here too
assert res == 12
def define_finalizer_resurrects(cls):
class B(object):
pass
b = B()
b.nextid = 0
b.num_deleted = 0
class A(object):
def __init__(self):
self.id = b.nextid
b.nextid += 1
def __del__(self):
b.num_deleted += 1
b.a = self
def f(x, y):
a = A()
i = 0
while i < x:
i += 1
a = A()
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
aid = b.a.id
b.a = None
# check that __del__ is not called again
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
return b.num_deleted * 10 + aid + 100 * (b.a is None)
return f
def test_finalizer_resurrects(self):
run = self.runner("finalizer_resurrects")
res = run([5, 42]) #XXX pure lazyness here too
assert 160 <= res <= 165
def define_custom_trace(cls):
#
S = lltype.GcStruct('S', ('x', llmemory.Address))
T = lltype.GcStruct('T', ('z', lltype.Signed))
offset_of_x = llmemory.offsetof(S, 'x')
def customtrace(gc, obj, callback, arg):
gc._trace_callback(callback, arg, obj + offset_of_x)
lambda_customtrace = lambda: customtrace
#
def setup():
rgc.register_custom_trace_hook(S, lambda_customtrace)
tx = lltype.malloc(T)
tx.z = 4243
s1 = lltype.malloc(S)
s1.x = llmemory.cast_ptr_to_adr(tx)
return s1
def f():
s1 = setup()
llop.gc__collect(lltype.Void)
return llmemory.cast_adr_to_ptr(s1.x, lltype.Ptr(T)).z
return f
def test_custom_trace(self):
run = self.runner("custom_trace")
res = run([])
assert res == 4243
def define_weakref(cls):
import weakref, gc
class A(object):
pass
def g():
a = A()
return weakref.ref(a)
def f():
a = A()
ref = weakref.ref(a)
result = ref() is a
ref = g()
llop.gc__collect(lltype.Void)
result = result and (ref() is None)
# check that a further collection is fine
llop.gc__collect(lltype.Void)
result = result and (ref() is None)
return result
return f
def test_weakref(self):
run = self.runner("weakref")
res = run([])
assert res
def define_weakref_to_object_with_finalizer(cls):
import weakref, gc
class A(object):
count = 0
a = A()
class B(object):
def __del__(self):
a.count += 1
def g():
b = B()
return weakref.ref(b)
def f():
ref = g()
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
result = a.count == 1 and (ref() is None)
return result
return f
def test_weakref_to_object_with_finalizer(self):
run = self.runner("weakref_to_object_with_finalizer")
res = run([])
assert res
def define_collect_during_collect(cls):
class B(object):
pass
b = B()
b.nextid = 1
b.num_deleted = 0
b.num_deleted_c = 0
class A(object):
def __init__(self):
self.id = b.nextid
b.nextid += 1
def __del__(self):
llop.gc__collect(lltype.Void)
b.num_deleted += 1
C()
C()
class C(A):
def __del__(self):
b.num_deleted += 1
b.num_deleted_c += 1
def f(x, y):
persistent_a1 = A()
persistent_a2 = A()
i = 0
while i < x:
i += 1
a = A()
persistent_a3 = A()
persistent_a4 = A()
llop.gc__collect(lltype.Void)
llop.gc__collect(lltype.Void)
b.bla = persistent_a1.id + persistent_a2.id + persistent_a3.id + persistent_a4.id
# NB print would create a static root!
llop.debug_print(lltype.Void, b.num_deleted_c)
return b.num_deleted
return f
def test_collect_during_collect(self):
run = self.runner("collect_during_collect")
# runs collect recursively 4 times
res = run([4, 42]) #XXX pure lazyness here too
assert res == 12
def define_collect_0(cls):
def concat(j, dummy):
lst = []
for i in range(j):
lst.append(str(i))
result = len("".join(lst))
if we_are_translated():
llop.gc__collect(lltype.Void, 0)
return result
return concat
def test_collect_0(self):
run = self.runner("collect_0")
res = run([100, 0])
assert res == len(''.join([str(x) for x in range(100)]))
def define_interior_ptrs(cls):
from rpython.rtyper.lltypesystem.lltype import Struct, GcStruct, GcArray
from rpython.rtyper.lltypesystem.lltype import Array, Signed, malloc
S1 = Struct("S1", ('x', Signed))
T1 = GcStruct("T1", ('s', S1))
def f1():
t = malloc(T1)
t.s.x = 1
return t.s.x
S2 = Struct("S2", ('x', Signed))
T2 = GcArray(S2)
def f2():
t = malloc(T2, 1)
t[0].x = 1
return t[0].x
S3 = Struct("S3", ('x', Signed))
T3 = GcStruct("T3", ('items', Array(S3)))
def f3():
t = malloc(T3, 1)
t.items[0].x = 1
return t.items[0].x
S4 = Struct("S4", ('x', Signed))
T4 = Struct("T4", ('s', S4))
U4 = GcArray(T4)
def f4():
u = malloc(U4, 1)
u[0].s.x = 1
return u[0].s.x
S5 = Struct("S5", ('x', Signed))
T5 = GcStruct("T5", ('items', Array(S5)))
def f5():
t = malloc(T5, 1)
return len(t.items)
T6 = GcStruct("T6", ('s', Array(Signed)))
def f6():
t = malloc(T6, 1)
t.s[0] = 1
return t.s[0]
def func():
return (f1() * 100000 +
f2() * 10000 +
f3() * 1000 +
f4() * 100 +
f5() * 10 +
f6())
assert func() == 111111
return func
def test_interior_ptrs(self):
run = self.runner("interior_ptrs")
res = run([])
assert res == 111111
def define_id(cls):
class A(object):
pass
a1 = A()
def func():
a2 = A()
a3 = A()
id1 = compute_unique_id(a1)
id2 = compute_unique_id(a2)
id3 = compute_unique_id(a3)
llop.gc__collect(lltype.Void)
error = 0
if id1 != compute_unique_id(a1): error += 1
if id2 != compute_unique_id(a2): error += 2
if id3 != compute_unique_id(a3): error += 4
return error
return func
def test_id(self):
run = self.runner("id")
res = run([])
assert res == 0
def define_can_move(cls):
TP = lltype.GcArray(lltype.Float)
def func():
return rgc.can_move(lltype.malloc(TP, 1))
return func
def test_can_move(self):
run = self.runner("can_move")
res = run([])
assert res == self.GC_CAN_MOVE
def define_shrink_array(cls):
from rpython.rtyper.lltypesystem.rstr import STR
def f():
ptr = lltype.malloc(STR, 3)
ptr.hash = 0x62
ptr.chars[0] = '0'
ptr.chars[1] = 'B'
ptr.chars[2] = 'C'
ptr2 = rgc.ll_shrink_array(ptr, 2)
return ((ptr == ptr2) +
ord(ptr2.chars[0]) +
(ord(ptr2.chars[1]) << 8) +
(len(ptr2.chars) << 16) +
(ptr2.hash << 24))
return f
def test_shrink_array(self):
run = self.runner("shrink_array")
if self.GC_CAN_SHRINK_ARRAY:
expected = 0x62024231
else:
expected = 0x62024230
assert run([]) == expected
def define_string_builder_over_allocation(cls):
import gc
def fn():
s = StringBuilder(4)
s.append("abcd")
s.append("defg")
s.append("rty")
s.append_multiple_char('y', 1000)
gc.collect()
s.append_multiple_char('y', 1000)
res = s.build()[1000]
gc.collect()
return ord(res)
return fn
def test_string_builder_over_allocation(self):
fn = self.runner("string_builder_over_allocation")
res = fn([])
assert res == ord('y')
class GenericMovingGCTests(GenericGCTests):
GC_CAN_MOVE = True
GC_CAN_TEST_ID = False
def define_many_ids(cls):
class A(object):
pass
def f():
from rpython.rtyper.lltypesystem import rffi
alist = [A() for i in range(50)]
idarray = lltype.malloc(rffi.SIGNEDP.TO, len(alist), flavor='raw')
# Compute the id of all the elements of the list. The goal is
# to not allocate memory, so that if the GC needs memory to
# remember the ids, it will trigger some collections itself
i = 0
while i < len(alist):
idarray[i] = compute_unique_id(alist[i])
i += 1
j = 0
while j < 2:
if j == 1: # allocate some stuff between the two iterations
[A() for i in range(20)]
i = 0
while i < len(alist):
assert idarray[i] == compute_unique_id(alist[i])
i += 1
j += 1
lltype.free(idarray, flavor='raw')
return 0
return f
def test_many_ids(self):
if not self.GC_CAN_TEST_ID:
py.test.skip("fails for bad reasons in lltype.py :-(")
run = self.runner("many_ids")
run([])
@classmethod
def ensure_layoutbuilder(cls, translator):
jit2gc = getattr(translator, '_jit2gc', None)
if jit2gc:
assert 'invoke_after_minor_collection' in jit2gc
return jit2gc['layoutbuilder']
marker = cls.marker
GCClass = cls.gcpolicy.transformerclass.GCClass
layoutbuilder = framework.TransformerLayoutBuilder(translator, GCClass)
layoutbuilder.delay_encoding()
def seeme():
marker[0] += 1
translator._jit2gc = {
'layoutbuilder': layoutbuilder,
'invoke_after_minor_collection': seeme,
}
return layoutbuilder
def define_do_malloc_operations(cls):
P = lltype.GcStruct('P', ('x', lltype.Signed))
def g():
r = lltype.malloc(P)
r.x = 1
p = llop.do_malloc_fixedsize(llmemory.GCREF) # placeholder
p = lltype.cast_opaque_ptr(lltype.Ptr(P), p)
p.x = r.x
return p.x
def f():
i = 0
while i < 40:
g()
i += 1
return 0
if cls.gcname == 'incminimark':
marker = cls.marker
def cleanup():
assert marker[0] > 0
marker[0] = 0
else:
cleanup = None
def fix_graph_of_g(translator):
from rpython.translator.translator import graphof
from rpython.flowspace.model import Constant
from rpython.rtyper.lltypesystem import rffi
layoutbuilder = cls.ensure_layoutbuilder(translator)
type_id = layoutbuilder.get_type_id(P)
#
# now fix the do_malloc_fixedsize in the graph of g
graph = graphof(translator, g)
for op in graph.startblock.operations:
if op.opname == 'do_malloc_fixedsize':
op.args = [Constant(type_id, llgroup.HALFWORD),
Constant(llmemory.sizeof(P), lltype.Signed),
Constant(False, lltype.Bool), # has_finalizer
Constant(False, lltype.Bool), # is_finalizer_light
Constant(False, lltype.Bool)] # contains_weakptr
break
else:
assert 0, "oups, not found"
return f, cleanup, fix_graph_of_g
def test_do_malloc_operations(self):
run = self.runner("do_malloc_operations")
run([])
def define_do_malloc_operations_in_call(cls):
P = lltype.GcStruct('P', ('x', lltype.Signed))
def g():
llop.do_malloc_fixedsize(llmemory.GCREF) # placeholder
def f():
q = lltype.malloc(P)
q.x = 1
i = 0
while i < 40:
g()
i += q.x
return 0
def fix_graph_of_g(translator):
from rpython.translator.translator import graphof
from rpython.flowspace.model import Constant
from rpython.rtyper.lltypesystem import rffi
layoutbuilder = cls.ensure_layoutbuilder(translator)
type_id = layoutbuilder.get_type_id(P)
#
# now fix the do_malloc_fixedsize in the graph of g
graph = graphof(translator, g)
for op in graph.startblock.operations:
if op.opname == 'do_malloc_fixedsize':
op.args = [Constant(type_id, llgroup.HALFWORD),
Constant(llmemory.sizeof(P), lltype.Signed),
Constant(False, lltype.Bool), # has_finalizer
Constant(False, lltype.Bool), # is_finalizer_light
Constant(False, lltype.Bool)] # contains_weakptr
break
else:
assert 0, "oups, not found"
return f, None, fix_graph_of_g
def test_do_malloc_operations_in_call(self):
run = self.runner("do_malloc_operations_in_call")
run([])
def define_gc_heap_stats(cls):
S = lltype.GcStruct('S', ('x', lltype.Signed))
l1 = []
l2 = []
l3 = []
l4 = []
def f():
for i in range(10):
s = lltype.malloc(S)
l1.append(s)
l2.append(s)
if i < 3:
l3.append(s)
l4.append(s)
# We cheat here and only read the table which we later on
# process ourselves, otherwise this test takes ages
llop.gc__collect(lltype.Void)
tb = rgc._heap_stats()
a = 0
nr = 0
b = 0
c = 0
d = 0
e = 0
for i in range(len(tb)):
if tb[i].count == 10:
a += 1
nr = i
if tb[i].count > 50:
d += 1
for i in range(len(tb)):
if tb[i].count == 4:
b += 1
c += tb[i].links[nr]
e += tb[i].size
return d * 1000 + c * 100 + b * 10 + a
return f
def test_gc_heap_stats(self):
py.test.skip("this test makes the following test crash. Investigate.")
run = self.runner("gc_heap_stats")
res = run([])
assert res % 10000 == 2611
totsize = (res / 10000)
size_of_int = rffi.sizeof(lltype.Signed)
assert (totsize - 26 * size_of_int) % 4 == 0
# ^^^ a crude assumption that totsize - varsize would be dividable by 4
# (and give fixedsize)
def define_writebarrier_before_copy(cls):
S = lltype.GcStruct('S', ('x', lltype.Char))
TP = lltype.GcArray(lltype.Ptr(S))
def fn():
l = lltype.malloc(TP, 100)
l2 = lltype.malloc(TP, 100)
for i in range(100):
l[i] = lltype.malloc(S)
rgc.ll_arraycopy(l, l2, 50, 0, 50)
# force nursery collect
x = []
for i in range(20):
x.append((1, lltype.malloc(S)))
for i in range(50):
assert l2[i] == l[50 + i]
return 0
return fn
def test_writebarrier_before_copy(self):
run = self.runner("writebarrier_before_copy")
run([])
# ________________________________________________________________
class TestSemiSpaceGC(GenericMovingGCTests):
gcname = "semispace"
GC_CAN_SHRINK_ARRAY = True
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.semispace import SemiSpaceGC as GCClass
GC_PARAMS = {'space_size': 512*WORD,
'translated_to_c': False}
root_stack_depth = 200
class TestGenerationGC(GenericMovingGCTests):
gcname = "generation"
GC_CAN_SHRINK_ARRAY = True
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.generation import GenerationGC as \
GCClass
GC_PARAMS = {'space_size': 512*WORD,
'nursery_size': 32*WORD,
'translated_to_c': False}
root_stack_depth = 200
def define_weakref_across_minor_collection(cls):
import weakref
class A:
pass
def f():
x = 20 # for GenerationGC, enough for a minor collection
a = A()
a.foo = x
ref = weakref.ref(a)
all = [None] * x
i = 0
while i < x:
all[i] = [i] * i
i += 1
assert ref() is a
llop.gc__collect(lltype.Void)
assert ref() is a
return a.foo + len(all)
return f
def test_weakref_across_minor_collection(self):
run = self.runner("weakref_across_minor_collection")
res = run([])
assert res == 20 + 20
def define_nongc_static_root_minor_collect(cls):
T1 = lltype.GcStruct("C", ('x', lltype.Signed))
T2 = lltype.Struct("C", ('p', lltype.Ptr(T1)))
static = lltype.malloc(T2, immortal=True)
def f():
t1 = lltype.malloc(T1)
t1.x = 42
static.p = t1
x = 20
all = [None] * x
i = 0
while i < x: # enough to cause a minor collect
all[i] = [i] * i
i += 1
i = static.p.x
llop.gc__collect(lltype.Void)
return static.p.x + i
def cleanup():
static.p = lltype.nullptr(T1)
return f, cleanup, None
def test_nongc_static_root_minor_collect(self):
run = self.runner("nongc_static_root_minor_collect")
res = run([])
assert res == 84
def define_static_root_minor_collect(cls):
class A:
pass
class B:
pass
static = A()
static.p = None
def f():
t1 = B()
t1.x = 42
static.p = t1
x = 20
all = [None] * x
i = 0
while i < x: # enough to cause a minor collect
all[i] = [i] * i
i += 1
i = static.p.x
llop.gc__collect(lltype.Void)
return static.p.x + i
def cleanup():
static.p = None
return f, cleanup, None
def test_static_root_minor_collect(self):
run = self.runner("static_root_minor_collect")
res = run([])
assert res == 84
def define_many_weakrefs(cls):
# test for the case where allocating the weakref itself triggers
# a collection
import weakref
class A:
pass
def f():
a = A()
i = 0
while i < 17:
ref = weakref.ref(a)
assert ref() is a
i += 1
return 0
return f
def test_many_weakrefs(self):
run = self.runner("many_weakrefs")
run([])
def define_immutable_to_old_promotion(cls):
T_CHILD = lltype.Ptr(lltype.GcStruct('Child', ('field', lltype.Signed)))
T_PARENT = lltype.Ptr(lltype.GcStruct('Parent', ('sub', T_CHILD)))
child = lltype.malloc(T_CHILD.TO)
child2 = lltype.malloc(T_CHILD.TO)
parent = lltype.malloc(T_PARENT.TO)
parent2 = lltype.malloc(T_PARENT.TO)
parent.sub = child
child.field = 3
parent2.sub = child2
child2.field = 8
T_ALL = lltype.Ptr(lltype.GcArray(T_PARENT))
all = lltype.malloc(T_ALL.TO, 2)
all[0] = parent
all[1] = parent2
def f(x, y):
res = all[x]
#all[x] = lltype.nullptr(T_PARENT.TO)
return res.sub.field
return f
def test_immutable_to_old_promotion(self):
run, transformer = self.runner("immutable_to_old_promotion", transformer=True)
run([1, 4])
if not transformer.GCClass.prebuilt_gc_objects_are_static_roots:
assert len(transformer.layoutbuilder.addresses_of_static_ptrs) == 0
else:
assert len(transformer.layoutbuilder.addresses_of_static_ptrs) >= 4
# NB. Remember that the number above does not count
# the number of prebuilt GC objects, but the number of locations
# within prebuilt GC objects that are of type Ptr(Gc).
# At the moment we get additional_roots_sources == 6:
# * all[0]
# * all[1]
# * parent.sub
# * parent2.sub
# * the GcArray pointer from gc.wr_to_objects_with_id
# * the GcArray pointer from gc.object_id_dict.
def define_adr_of_nursery(cls):
class A(object):
pass
def f():
# we need at least 1 obj to allocate a nursery
a = A()
nf_a = llop.gc_adr_of_nursery_free(llmemory.Address)
nt_a = llop.gc_adr_of_nursery_top(llmemory.Address)
nf0 = nf_a.address[0]
nt0 = nt_a.address[0]
a0 = A()
a1 = A()
nf1 = nf_a.address[0]
nt1 = nt_a.address[0]
assert nf1 > nf0
assert nt1 > nf1
assert nt1 == nt0
return 0
return f
def test_adr_of_nursery(self):
run = self.runner("adr_of_nursery")
res = run([])
class TestGenerationalNoFullCollectGC(GCTest):
# test that nursery is doing its job and that no full collection
# is needed when most allocated objects die quickly
gcname = "generation"
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.generation import GenerationGC
class GCClass(GenerationGC):
__ready = False
def setup(self):
from rpython.memory.gc.generation import GenerationGC
GenerationGC.setup(self)
self.__ready = True
def semispace_collect(self, size_changing=False):
ll_assert(not self.__ready,
"no full collect should occur in this test")
def _teardown(self):
self.__ready = False # collecting here is expected
GenerationGC._teardown(self)
GC_PARAMS = {'space_size': 512*WORD,
'nursery_size': 128*WORD,
'translated_to_c': False}
root_stack_depth = 200
def define_working_nursery(cls):
def f():
total = 0
i = 0
while i < 40:
lst = []
j = 0
while j < 5:
lst.append(i*j)
j += 1
total += len(lst)
i += 1
return total
return f
def test_working_nursery(self):
run = self.runner("working_nursery")
res = run([])
assert res == 40 * 5
class TestHybridGC(TestGenerationGC):
gcname = "hybrid"
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.hybrid import HybridGC as GCClass
GC_PARAMS = {'space_size': 512*WORD,
'nursery_size': 32*WORD,
'large_object': 8*WORD,
'translated_to_c': False}
root_stack_depth = 200
def define_ref_from_rawmalloced_to_regular(cls):
import gc
S = lltype.GcStruct('S', ('x', lltype.Signed))
A = lltype.GcStruct('A', ('p', lltype.Ptr(S)),
('a', lltype.Array(lltype.Char)))
def setup(j):
p = lltype.malloc(S)
p.x = j*2
lst = lltype.malloc(A, j)
# the following line generates a write_barrier call at the moment,
# which is important because the 'lst' can be allocated directly
# in generation 2. This can only occur with varsized mallocs.
lst.p = p
return lst
def f(i, j):
lst = setup(j)
gc.collect()
return lst.p.x
return f
def test_ref_from_rawmalloced_to_regular(self):
run = self.runner("ref_from_rawmalloced_to_regular")
res = run([100, 100])
assert res == 200
def define_write_barrier_direct(cls):
from rpython.rlib import rgc
S = lltype.GcForwardReference()
S.become(lltype.GcStruct('S',
('x', lltype.Signed),
('prev', lltype.Ptr(S)),
('next', lltype.Ptr(S))))
s0 = lltype.malloc(S, immortal=True)
def f():
s = lltype.malloc(S)
s.x = 42
llop.bare_setfield(lltype.Void, s0, void('next'), s)
llop.gc_writebarrier(lltype.Void, llmemory.cast_ptr_to_adr(s0))
rgc.collect(0)
return s0.next.x
def cleanup():
s0.next = lltype.nullptr(S)
return f, cleanup, None
def test_write_barrier_direct(self):
run = self.runner("write_barrier_direct")
res = run([])
assert res == 42
class TestMiniMarkGC(TestHybridGC):
gcname = "minimark"
GC_CAN_TEST_ID = True
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.minimark import MiniMarkGC as GCClass
GC_PARAMS = {'nursery_size': 32*WORD,
'page_size': 16*WORD,
'arena_size': 64*WORD,
'small_request_threshold': 5*WORD,
'large_object': 8*WORD,
'card_page_indices': 4,
'translated_to_c': False,
}
root_stack_depth = 200
def define_no_clean_setarrayitems(cls):
# The optimization find_clean_setarrayitems() in
# gctransformer/framework.py does not work with card marking.
# Check that it is turned off.
S = lltype.GcStruct('S', ('x', lltype.Signed))
A = lltype.GcArray(lltype.Ptr(S))
def sub(lst):
lst[15] = lltype.malloc(S) # 'lst' is set the single mark "12-15"
lst[15].x = 123
lst[0] = lst[15] # that would be a "clean_setarrayitem"
def f():
lst = lltype.malloc(A, 16) # 16 > 10
rgc.collect()
sub(lst)
null = lltype.nullptr(S)
lst[15] = null # clear, so that A() is only visible via lst[0]
rgc.collect() # -> crash
return lst[0].x
return f
def test_no_clean_setarrayitems(self):
run = self.runner("no_clean_setarrayitems")
res = run([])
assert res == 123
def define_nursery_hash_base(cls):
class A:
pass
def fn():
objects = []
hashes = []
for i in range(200):
rgc.collect(0) # nursery-only collection, if possible
obj = A()
objects.append(obj)
hashes.append(compute_identity_hash(obj))
unique = {}
for i in range(len(objects)):
assert compute_identity_hash(objects[i]) == hashes[i]
unique[hashes[i]] = None
return len(unique)
return fn
def test_nursery_hash_base(self):
res = self.runner('nursery_hash_base')
assert res([]) >= 195
def define_instantiate_nonmovable(cls):
from rpython.rlib import objectmodel
from rpython.rtyper import annlowlevel
class A:
pass
def fn():
a1 = A()
a = objectmodel.instantiate(A, nonmovable=True)
a.next = a1 # 'a' is known young here, so no write barrier emitted
res = rgc.can_move(annlowlevel.cast_instance_to_base_ptr(a))
rgc.collect()
objectmodel.keepalive_until_here(a)
return res
return fn
def test_instantiate_nonmovable(self):
res = self.runner('instantiate_nonmovable')
assert res([]) == 0
class TestIncrementalMiniMarkGC(TestMiniMarkGC):
gcname = "incminimark"
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.incminimark import IncrementalMiniMarkGC \
as GCClass
GC_PARAMS = {'nursery_size': 32*WORD,
'page_size': 16*WORD,
'arena_size': 64*WORD,
'small_request_threshold': 5*WORD,
'large_object': 8*WORD,
'card_page_indices': 4,
'translated_to_c': False,
}
root_stack_depth = 200
def define_malloc_array_of_gcptr(self):
S = lltype.GcStruct('S', ('x', lltype.Signed))
A = lltype.GcArray(lltype.Ptr(S))
def f():
lst = lltype.malloc(A, 5)
return (lst[0] == lltype.nullptr(S)
and lst[1] == lltype.nullptr(S)
and lst[2] == lltype.nullptr(S)
and lst[3] == lltype.nullptr(S)
and lst[4] == lltype.nullptr(S))
return f
def test_malloc_array_of_gcptr(self):
run = self.runner('malloc_array_of_gcptr')
res = run([])
assert res
def define_malloc_struct_of_gcptr(cls):
S1 = lltype.GcStruct('S', ('x', lltype.Signed))
S = lltype.GcStruct('S',
('x', lltype.Signed),
('filed1', lltype.Ptr(S1)),
('filed2', lltype.Ptr(S1)))
s0 = lltype.malloc(S)
def f():
return (s0.filed1 == lltype.nullptr(S1) and s0.filed2 == lltype.nullptr(S1))
return f
def test_malloc_struct_of_gcptr(self):
run = self.runner("malloc_struct_of_gcptr")
res = run([])
assert res
# ________________________________________________________________
# tagged pointers
class TaggedPointerGCTests(GCTest):
taggedpointers = True
def define_tagged_simple(cls):
class Unrelated(object):
pass
u = Unrelated()
u.x = UnboxedObject(47)
def fn(n):
rgc.collect() # check that a prebuilt tagged pointer doesn't explode
if n > 0:
x = BoxedObject(n)
else:
x = UnboxedObject(n)
u.x = x # invoke write barrier
rgc.collect()
return x.meth(100)
def func():
return fn(1000) + fn(-1000)
assert func() == 205
return func
def test_tagged_simple(self):
func = self.runner("tagged_simple")
res = func([])
assert res == 205
def define_tagged_prebuilt(cls):
class F:
pass
f = F()
f.l = [UnboxedObject(10)]
def fn(n):
if n > 0:
x = BoxedObject(n)
else:
x = UnboxedObject(n)
f.l.append(x)
rgc.collect()
return f.l[-1].meth(100)
def func():
return fn(1000) ^ fn(-1000)
assert func() == -1999
return func
def test_tagged_prebuilt(self):
func = self.runner("tagged_prebuilt")
res = func([])
assert res == -1999
def define_gettypeid(cls):
class A(object):
pass
def fn():
a = A()
return rgc.get_typeid(a)
return fn
def test_gettypeid(self):
func = self.runner("gettypeid")
res = func([])
print res
from rpython.rlib.objectmodel import UnboxedValue
class TaggedBase(object):
__slots__ = ()
def meth(self, x):
raise NotImplementedError
class BoxedObject(TaggedBase):
attrvalue = 66
def __init__(self, normalint):
self.normalint = normalint
def meth(self, x):
return self.normalint + x + 2
class UnboxedObject(TaggedBase, UnboxedValue):
__slots__ = 'smallint'
def meth(self, x):
return self.smallint + x + 3
class TestHybridTaggedPointerGC(TaggedPointerGCTests):
gcname = "hybrid"
class gcpolicy(gc.BasicFrameworkGcPolicy):
class transformerclass(shadowstack.ShadowStackFrameworkGCTransformer):
from rpython.memory.gc.generation import GenerationGC as \
GCClass
GC_PARAMS = {'space_size': 512*WORD,
'nursery_size': 32*WORD,
'translated_to_c': False}
root_stack_depth = 200
def test_gettypeid(self):
py.test.skip("fails for obscure reasons")
| 31.904628
| 93
| 0.512638
| 5,174
| 45,496
| 4.327406
| 0.123695
| 0.013444
| 0.020322
| 0.024297
| 0.429879
| 0.340107
| 0.302099
| 0.273828
| 0.256856
| 0.242296
| 0
| 0.027162
| 0.389858
| 45,496
| 1,425
| 94
| 31.927018
| 0.779423
| 0.058027
| 0
| 0.459504
| 0
| 0
| 0.041585
| 0.011524
| 0
| 0
| 0.000561
| 0
| 0.042975
| 0
| null | null | 0.015702
| 0.038843
| null | null | 0.001653
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d4b5c94f17a9cee798f64b657926900668bb67f6
| 5,431
|
py
|
Python
|
classify_images.py
|
rmsare/cs231a-project
|
91776ada3512d3805de0e66940c9f1c5b3c4c641
|
[
"MIT"
] | 2
|
2017-11-06T10:23:16.000Z
|
2019-11-09T15:11:19.000Z
|
classify_images.py
|
rmsare/cs231a-project
|
91776ada3512d3805de0e66940c9f1c5b3c4c641
|
[
"MIT"
] | null | null | null |
classify_images.py
|
rmsare/cs231a-project
|
91776ada3512d3805de0e66940c9f1c5b3c4c641
|
[
"MIT"
] | null | null | null |
"""
Classification of pixels in images using color and other features.
General pipeline usage:
1. Load and segment images (img_utils.py)
2. Prepare training data (label_image.py)
3. Train classifier or cluster data (sklearn KMeans, MeanShift, SVC, etc.)
4. Predict labels on new image or directory (classify_directory())
5. Apply classification to 3D points and estimate ground plane orientation (process_pointcloud.py)
Project uses the following directory structure:
images/ - contains binary files of numpy arrays corresponding to survey images and segmentations
labelled/ - contains labelled ground truth images or training data
results/ - contains results of classification
I store randomly split training and testing images in test/ and train/ directories.
Author: Robert Sare
E-mail: rmsare@stanford.edu
Date: 8 June 2017
"""
import numpy as np
import matplotlib.pyplot as plt
import skimage.color, skimage.io
from skimage.segmentation import mark_boundaries
from sklearn.svm import SVC
from sklearn.cluster import KMeans, MeanShift
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle
import os, fnmatch
def classify_directory(classifier, test_dir, train_dir='train/'):
"""
Classify all images in a directory using an arbitrary sklearn classifier.
Saves results to results/ directory.
"""
# XXX: This is here if the classifier needs to be trained from scratch
#print("Preparing training data...")
#n_samples = 1000
#train_data, train_labels = load_training_images(train_dir, n_samples)
#
#print("Training classifier...")
#classifier = ImageSVC()
#classifier.fit(train_data, train_labels)
files = os.listdir(test_dir)
for f in files:
image = skimage.io.imread(f)
height, width, depth = image.shape
print("Predicting labels for " + f.strip('.JPG') + ".jpg")
features = compute_colorxy_features(image)
features /= features.max(axis=0)
pred_labels = classifier.predict(features)
print("Saving predictions for " + f.strip('.JPG') + ".jpg")
plt.figure()
plt.imshow(image)
plt.imshow(pred_labels.reshape((height, width)), alpha=0.5, vmin=0, vmax=2)
plt.show(block=False)
plt.savefig('results/' + f.strip('.JPG') + '_svm_pred.png')
plt.close()
np.save('results/' + f.strip('.JPG') + 'svm.npy', pred_labels.reshape((height,width)))
def compute_colorxy_features(image):
"""
Extract and normalize color and pixel location features from image data
"""
height, width, depth = image.shape
colors = skimage.color.rgb2lab(image.reshape((height*width, depth))
X, Y = np.meshgrid(np.arange(height), np.arange(width))
xy = np.hstack([X.reshape((height*width, 1)), Y.reshape((height*width, 1))])
colorxy = np.hstack([xy, colors])
colorxy /= colorxy.max(axis=0)
return colorxy
def load_ground_truth(filename):
"""
Load ground truth or training image array and redefine labelling for nice
default colors
"""
truth = np.load(filename)
# Change labels for nice default colorscale when plotted
truth = truth - 1
truth[truth == -1] = 0
truth[truth == 0] = 5
truth[truth == 2] = 0
truth[truth == 5] = 2
return truth
def load_image_labels(name):
"""
Load image and labels from previous labelling session
"""
fname = 'images/' + name + '_image.npy'
image = np.load(fname)
fname = 'labelled/' + name + '_labels.npy'
labels = np.load(fname)
return image, labels
def plot_class_image(image, segments, labels):
"""
Display image with segments and class label overlay
"""
plt.figure()
plt.subplot(1,2,1)
plt.imshow(mark_boundaries(image, segments, color=(1,0,0), mode='thick'))
plt.title('segmented image')
plt.subplot(1,2,2)
plt.imshow(image)
plt.imshow(labels, alpha=0.75)
cb = plt.colorbar(orientation='horizontal', shrink=0.5)
plt.title('predicted class labels')
plt.show(block=False)
def load_training_images(train_dir, n_samples=1000, n_features=3):
"""
Load training images from directory and subsample for training or validation
"""
train_data = np.empty((0, n_features))
train_labels = np.empty(0)
files = os.listdir(train_dir)
for f in files:
name = parse_filename(f)
image, labels = load_image_labels(name)
ht, wid, depth = image.shape
train_data = np.append(train_data,
compute_color_features(image), axis=0)
train_labels = np.append(train_labels,
labels.reshape(wid*ht, 1).ravel())
train_data, train_labels = shuffle(train_data, train_labels,
random_state=0, n_samples=n_samples)
return train_data, train_labels
def save_prediction(name, pred_labels):
"""
Save predicted class labels
"""
np.save('results/' + name + '_pred', pred_labels)
if __name__ == "__main__":
# Load training data
train_dir = 'train/'
test_dir = 'test/'
train_data, train_labels = load_training_data(train_dir)
# Train classifier
clf = SVC()
clf.fit(train_data, train_labels)
# Predict labels for test images
classify_directory(clf, test_dir)
| 30.857955
| 104
| 0.662861
| 712
| 5,431
| 4.933989
| 0.308989
| 0.025619
| 0.027896
| 0.039852
| 0.131512
| 0.047253
| 0.019357
| 0
| 0
| 0
| 0
| 0.01364
| 0.230528
| 5,431
| 175
| 105
| 31.034286
| 0.826992
| 0
| 0
| 0.120482
| 0
| 0
| 0.065334
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.108434
| null | null | 0.024096
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d4cf41c3907f30d0f8d4b3c715caa3ef127581dc
| 5,353
|
py
|
Python
|
backend/services/apns_util.py
|
xuantan/viewfinder
|
992209086d01be0ef6506f325cf89b84d374f969
|
[
"Apache-2.0"
] | 645
|
2015-01-03T02:03:59.000Z
|
2021-12-03T08:43:16.000Z
|
backend/services/apns_util.py
|
hoowang/viewfinder
|
9caf4e75faa8070d85f605c91d4cfb52c4674588
|
[
"Apache-2.0"
] | null | null | null |
backend/services/apns_util.py
|
hoowang/viewfinder
|
9caf4e75faa8070d85f605c91d4cfb52c4674588
|
[
"Apache-2.0"
] | 222
|
2015-01-07T05:00:52.000Z
|
2021-12-06T09:54:26.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2012 Viewfinder Inc. All Rights Reserved.
"""Apple Push Notification service utilities.
Original copyright for this code: https://github.com/jayridge/apnstornado
TokenToBinary(): converts a hex-encoded token into a binary value
CreateMessage(): formats a binary APNs message from parameters
ParseResponse(): parses APNs binary response for status & identifier
ErrorStatusToString(): converts error status to error message
"""
__author__ = 'spencer@emailscrubbed.com (Spencer Kimball)'
import base64
import json
import struct
import time
from tornado import escape
_MAX_PAYLOAD_BYTES = 256
"""Maximum number of bytes in the APNS payload."""
_ELLIPSIS_BYTES = escape.utf8(u'…')
"""UTF-8 encoding of the Unicode ellipsis character."""
def TokenToBinary(token):
return base64.b64decode(token)
def TokenFromBinary(bin_token):
return base64.b64encode(bin_token)
def CreateMessage(token, alert=None, badge=None, sound=None,
identifier=0, expiry=None, extra=None, allow_truncate=True):
token = TokenToBinary(token)
if len(token) != 32:
raise ValueError, u'Token must be a 32-byte binary string.'
if (alert is not None) and (not isinstance(alert, (basestring, dict))):
raise ValueError, u'Alert message must be a string or a dictionary.'
if expiry is None:
expiry = long(time.time() + 365 * 86400)
# Start by determining the length of the UTF-8 encoded JSON with no alert text. This allows us to
# determine how much space is left for the message.
# 'content-available': 1 is necessary to trigger iOS 7's background download processing.
aps = { 'alert' : '', 'content-available': 1 }
if badge is not None:
aps['badge'] = badge
if sound is not None:
aps['sound'] = sound
data = { 'aps' : aps }
if extra is not None:
data.update(extra)
# Create compact JSON representation with no extra space and no escaping of non-ascii chars (i.e. use
# direct UTF-8 representation rather than "\u1234" escaping). This maximizes the amount of space that's
# left for the alert text.
encoded = escape.utf8(json.dumps(escape.recursive_unicode(data), separators=(',', ':'), ensure_ascii=False))
bytes_left = _MAX_PAYLOAD_BYTES - len(encoded)
if allow_truncate and isinstance(alert, basestring):
alert = _TruncateAlert(alert, bytes_left)
elif alert and len(escape.utf8(alert)) > bytes_left:
raise ValueError, u'max payload(%d) exceeded: %d' % (_MAX_PAYLOAD_BYTES, len(escape.utf8(alert)))
# Now re-encode including the alert text.
aps['alert'] = alert
encoded = escape.utf8(json.dumps(escape.recursive_unicode(data), separators=(',', ':'), ensure_ascii=False))
length = len(encoded)
assert length <= _MAX_PAYLOAD_BYTES, (encoded, length)
return struct.pack('!bIIH32sH%(length)ds' % { 'length' : length },
1, identifier, expiry,
32, token, length, encoded)
def ParseResponse(bytes):
if len(bytes) != 6:
raise ValueError, u'response must be a 6-byte binary string.'
command, status, identifier = struct.unpack_from('!bbI', bytes, 0)
if command != 8:
raise ValueError, u'response command must equal 8.'
return status, identifier, ErrorStatusToString(status)
def ErrorStatusToString(status):
if status is 0:
return 'No errors encountered'
elif status is 1:
return 'Processing error'
elif status is 2:
return 'Missing device token'
elif status is 3:
return 'Missing topic'
elif status is 4:
return 'Missing payload'
elif status is 5:
return 'Invalid token size'
elif status is 6:
return 'Invalid topic size'
elif status is 7:
return 'Invalid payload size'
elif status is 8:
return 'Invalid token'
elif status is 255:
return 'None (unknown)'
else:
return ''
def _TruncateAlert(alert, max_bytes):
"""Converts the alert text to UTF-8 encoded JSON format, which is how
the alert will be stored in the APNS payload. If the number of
resulting bytes exceeds "max_bytes", then truncates the alert text
at a Unicode character boundary, taking care not to split JSON
escape sequences. Returns the truncated UTF-8 encoded alert text,
including a trailing ellipsis character.
"""
alert_json = escape.utf8(json.dumps(escape.recursive_unicode(alert), ensure_ascii=False))
# Strip quotes added by JSON.
alert_json = alert_json[1:-1]
# Check if alert fits with no truncation.
if len(alert_json) <= max_bytes:
return escape.utf8(alert)
# Make room for an appended ellipsis.
assert max_bytes >= len(_ELLIPSIS_BYTES), 'max_bytes must be at least %d' % len(_ELLIPSIS_BYTES)
max_bytes -= len(_ELLIPSIS_BYTES)
# Truncate the JSON UTF8 string at a Unicode character boundary.
truncated = alert_json[:max_bytes].decode('utf-8', errors='ignore')
# If JSON escape sequences were split, then the truncated string may not be valid JSON. Keep
# chopping trailing characters until the truncated string is valid JSON. It may take several
# tries, such as in the case where a "\u1234" sequence has been split.
while True:
try:
alert = json.loads(u'"%s"' % truncated)
break
except Exception:
truncated = truncated[:-1]
# Return the UTF-8 encoding of the alert with the ellipsis appended to it.
return escape.utf8(alert) + _ELLIPSIS_BYTES
| 34.75974
| 110
| 0.713992
| 759
| 5,353
| 4.968379
| 0.322793
| 0.021215
| 0.02864
| 0.015115
| 0.09467
| 0.052241
| 0.052241
| 0.041368
| 0.041368
| 0.041368
| 0
| 0.019155
| 0.190547
| 5,353
| 153
| 111
| 34.986928
| 0.85045
| 0.198767
| 0
| 0.022989
| 0
| 0
| 0.151417
| 0.007379
| 0
| 0
| 0
| 0
| 0.022989
| 0
| null | null | 0
| 0.057471
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d4d48c8aa150de0f108ac0a0655e92b6976fd528
| 41,579
|
py
|
Python
|
megaboat.py
|
xros/megaboat
|
e55e7959c39677ad2a0cdbb00ac88814b838d3e3
|
[
"MIT"
] | 4
|
2015-06-07T18:44:02.000Z
|
2021-04-03T02:53:01.000Z
|
megaboat.py
|
xros/megaboat
|
e55e7959c39677ad2a0cdbb00ac88814b838d3e3
|
[
"MIT"
] | null | null | null |
megaboat.py
|
xros/megaboat
|
e55e7959c39677ad2a0cdbb00ac88814b838d3e3
|
[
"MIT"
] | 2
|
2015-03-27T04:24:55.000Z
|
2016-06-26T11:02:47.000Z
|
# -*- coding: utf-8 -*-
# Copyright to Alexander Liu.
# Any distrubites of this copy should inform its author. If for commercial, please inform the author for authentication. Apr 2014
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from lxml import etree
import time
import json
import urllib
import urllib2
# For media posting
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
class ParsingContainer(object):
"""Parsing Wechat messages for whose types are of : 'text', 'image', 'voice', 'video', 'location', 'link'
After making a new instance of the class, need to declare the 'MsgType'
For example,
$~ python
>>> holder = ParsingContainer()
>>> hasattr(holder, "_Content")
>>> True
>>> holder.initType(MsgType='video')
>>> hasattr(holder, "_PicUrl")
>>> True
>>> holder.initType(MsgType='text') # Or we can just ellipsis this operation since by default its 'text'
>>> hasattr(holder, "_PicUrl")
>>> False
>>> hasattr(holder, "_Content")
>>> True
>>> holder.getElementByTag('Content')
>>> ''
"""
# By default, MsgType is set as 'text'
MsgType = 'text'
# Unique tages in all the mapping relationship
#
# For those tags in-common of normal message
global commonTag
commonTag = ['ToUserName', 'FromUserName', 'CreateTime', 'MsgId', 'MsgType']
# For normal message mapping
global normalMapping
normalMapping = {
'text':['Content'],
'image':['PicUrl', 'MediaId'],
'voice':['MediaId','Format'],
'video':['MediaId','ThumbMeiaId'],
'location':['Location_X','Location_Y','Scale', 'Label'],
'link':['Title','Description','Url'],
}
# For event message mapping
global eventMapping
eventMapping = {
# The list presents the combined tag set of the event message
'event':['Event','EventKey','Ticket','Latitude','Longitude','Precision' ],
}
# For recognition message mapping
global recognitionMapping
recognitionMapping = {
'voice':['MediaId','Format','Recognition'],
}
def __init__(self, incomingMessage='<xml></xml>'):
# pre-set some common variables
root = etree.fromstring(incomingMessage)
# The 5 ones in common
if root.find('ToUserName') is not None:
self._ToUserName = root.find('ToUserName').text
else:
self._ToUserName = ''
if root.find('FromUserName') is not None:
self._FromUserName = root.find('FromUserName').text
else:
self._FromUserName = ''
if root.find('CreateTime') is not None:
self._CreateTime = root.find('CreateTime').text
else:
self._CreateTime = '1000000000'
if root.find('MsgType') is not None:
self._MsgType = root.find('MsgType').text
else:
self._MsgType = ''
if root.find('MsgId') is not None:
self._MsgId = root.find('MsgId').text
else:
self._MsgId = ''
# Store the XML incomingMessage if has
# For text message only
if self.MsgType == 'text':
if root.find('Content') is not None:
self._Content = root.find('Content').text
else:
self._Content = ''
# For image message only
elif self.MsgType == 'image':
if root.find('PicUrl') is not None:
self._PicUrl = root.find('PicUrl').text
else:
self._PicUrl = ''
if root.find('MediaId') is not None:
self._MediaId = root.find('MediaId').text
else:
self._MediaId = ''
# For voice message only
elif self.MsgType == 'voice':
if root.find('MediaId') is not None:
self._MediaId = root.find('MediaId').text
else:
self._MediaId = ''
if root.find('Format') is not None:
self._Format = root.find('Format').text
else:
self._Format = ''
# For video message only
elif self.MsgType == 'video':
if root.find('MediaId') is not None:
self._MediaId = root.find('MediaId').text
else:
self._MediaId = ''
if root.find('ThumbMediaId') is not None:
self._ThumbMediaId = root.find('ThumbMediaId').text
else:
self._ThumbMediaId = ''
# For location message only
elif self.MsgType == 'location':
if root.find('Location_X') is not None:
self._Location_X = root.find('Location_X').text
else:
self._Location_X = ''
if root.find('Location_Y') is not None:
self._Location_Y = root.find('Location_Y').text
else:
self._Location_Y = ''
if root.find('Scale') is not None:
self._Scale = root.find('Scale').text
else:
self._Scale = ''
if root.find('Label') is not None:
self._Label = root.find('Label').text
else:
self._Label = ''
# For link message only
elif self.MsgType == 'link':
if root.find('Title') is not None:
self._Title = root.find('Title').text
else:
self._Title = ''
if root.find('Description') is not None:
self._Description = root.find('Description').text
else:
self._Description = ''
if root.find('Url') is not None:
self._Url = root.find('Url').text
else:
self._Url = ''
# For event message only
elif self.MsgType == 'event':
# It has to have a ```self._Event``` for event message certainly
if root.find('Event') is not None:
self._Event = root.find('Event').text
else:
self._Event = ''
if root.find('EventKey') is not None:
self._EventKey = root.find('EventKey').text
if root.find('Ticket') is not None:
self._Ticket = root.find('Ticket').text
if root.find('Latitude') is not None:
self._Latitude = root.find('Latitude').text
if root.find('Longitude') is not None:
self._Longitude = root.find('Longitude').text
if root.find('Precision') is not None:
self._Precision = root.find('Precision').text
def initType(self, MsgType='text', incomingMessage='<xml></xml>'):
''' To initialize message type
'''
MsgType_list = ['text', 'image', 'voice', 'video', 'location', 'link', 'event']
if MsgType not in MsgType_list:
raise ValueError, "MsgType '%s' not valid " % MsgType
for i in MsgType_list:
if MsgType == i:
self.MsgType = i
break
# Delete the common tags
for c in commonTag:
try:
delattr(self, '_' + c)
except:
pass
# Delete the unuseful elements in normalMapping
for k in normalMapping:
if k !=self.MsgType:
for m in normalMapping[k]:
try:
delattr(self, '_' + m)
except:
pass
# Delete the unuseful elements in eventMapping
for k in eventMapping:
for e in eventMapping[k]:
try:
delattr(self, '_' + e)
except:
pass
self.__init__(incomingMessage)
# releasing method
def __del__(self):
pass
#@property
def getElementByTag(self, tag):
'''To get element from the tag
'''
try:
gotten = getattr(self, "_" + tag)
except:
return None
##raise ValueError
#tmp = "Instance has no attribute _%s" % tag
#raise AttributeError, tmp
else:
return gotten
def digest(self, incomingMessage):
'''To digest the XML message passed from wechat server
Make the value variable
The 'incomingMessage' is of XML
According to its content this will assgin values to ```self.MsgType and etc..``` Logistics as the followings:
1) check parent message type :"MsgType"
2) check subclass message type if "Voice Recognition", "Event", "Normal"
3) check children class message type
'''
root = etree.fromstring(incomingMessage)
msgType = root.find("MsgType").text
# Get message type based from the ```incomingMessage``` variable
if msgType in ['text', 'image', 'voice', 'video', 'location', 'link', 'event']:
# Check if the incomingMessage has tag 'Recognition' then, it is a voice recognition message
if root.find("Recognition") is not None:
self.type = 'recognition'
# Check if the incomingMessage has tag 'Event' then, it is a voice event message
elif root.find("Event") is not None:
self.type = 'event'
# After all then 'normal' message
else:
self.type = 'normal'
# For normal messages
if self.type == 'normal':
if msgType == 'text':
self.initType('text', incomingMessage)
elif msgType == 'image':
self.initType('image', incomingMessage)
elif msgType == 'voice':
self.initType('voice', incomingMessage)
elif msgType == 'video':
self.initType('video', incomingMessage)
elif msgType == 'location':
self.initType('location', incomingMessage)
elif msgType == 'link':
self.initType('link', incomingMessage)
elif msgType == 'image':
self.initType('image', incomingMessage)
# TODO
# For event messages
if self.type == 'recognition':
self.initType('voice', incomingMessage)
# Construct a var ```self._Recognition``` since it is just of this more than that of 'normal message => voice'
self._Recognition = root.find("Recognition").text
# For recognition messages
if self.type == 'event':
self.initType('event', incomingMessage)
class RespondingContainer(object):
"""Package XML to reponse to determained wechat message
For more information please visit: http://mp.weixin.qq.com/wiki/index.php?title=%E5%8F%91%E9%80%81%E8%A2%AB%E5%8A%A8%E5%93%8D%E5%BA%94%E6%B6%88%E6%81%AF
Usage:
>>> rc = RespondingContainer()
>>> rc.initType('text') # Or we can ellipsis this since it is of 'text' by default
>>> # Notice we don't need to set the 'CreateTime' since it has been generated automatically :)
>>> rc.setElementByTag(FromUserName='the_server', ToUserName='the_wechat_client',Content='Hello dude!')
>>> tpl_out = rc.dumpXML()
>>> tpl_out
>>><xml>
<ToUserName>the_wechat_client</ToUserName>
<FromUserName>the_server</FromUserName>
<CreateTime>1397808770</CreateTime>
<MsgType>text</MsgType>
<Content>Hello dude!</Content>
</xml>
>>>
"""
def __init__(self, MsgType='text'):
self._MsgType = MsgType
# By default set root as the 'text' XML format
the_tpl = globals()['tpl_' + self._MsgType].encode('utf-8').decode('utf-8')
self.root = etree.fromstring(the_tpl)
#print self.root.find("FromUserName").text
#print type(self.root.find("FromUserName").text)
def initType(self, MsgType='text'):
tpl_list = ['text', 'image', 'voice', 'video', 'music', 'news']
if MsgType not in tpl_list:
raise ValueError, "Invalid responsing message MsgType '%s'" % MsgType
else:
## Load the template
#for i in tpl_list:
# if MsgType == i:
# self._MsgType = MsgType
# ## the the template
# the_xml = globals()['tpl_'+i]
# self.root = etree.fromstring( the_xml )
# break
## Set the default tag value
### Get all the tags
#child_list = []
#for child in self.root.getchildren():
# child_list += [str(child)]
### Attach 'tag' object to class to make something as : 'self._FromUserName'
#for i in child_list:
# if i == 'CreateTime':
# setattr(self,"_"+i, str(int(time.time())))
# else:
# setattr(self,"_"+i, '')
self.__init__(MsgType)
#def setElementByTag(self, tag):
def setElementByTag(self, **kwargs):
""" To package XML message into an object
Usage:
>>> setElementByTag(FromUserName='the_wechat_server',ToUserName='the_wechat_client',Content='Hello dude!')
# In this way we can then use ```dumpXML()``` to get the XML we need to reponse to wechat clients! :)
"""
## assign the basic time
self.root.find('CreateTime').text = str(int(time.time()))
#print "-----"
#print self._MsgType
## For text message only
if self._MsgType == 'text':
# To set attribute value to such as: 'self._FromUsername'
for k, v in kwargs.items():
try:
## assign value to the object
#getattr(self, "_"+k) = v
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
#raise AttributeError, "Message type '%s' has no attribute/tag '%s'" % (self._MsgType, k)
## For image message only
elif self._MsgType == 'image':
# To set attribute value of the XML special for image
for k, v in kwargs.items():
if k == 'MediaId':
#print v
#print etree.tostring(self.root)
self.root.find('Image').find('MediaId').text = v
else:
try:
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
## For voice message only
elif self._MsgType == 'voice':
# To set attribute value of the XML special for image
for k, v in kwargs.items():
if k == 'MediaId':
#print v
#print etree.tostring(self.root)
self.root.find('Voice').find('MediaId').text = v
else:
try:
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
## For video message only
elif self._MsgType == 'video':
# To set attribute value of the XML special for image
for k, v in kwargs.items():
if k == 'MediaId':
#print v
#print etree.tostring(self.root)
self.root.find('Video').find('MediaId').text = v
elif k == 'Title':
self.root.find('Video').find('Title').text = v
elif k == 'Description':
self.root.find('Video').find('Description').text = v
elif k == 'MusicUrl':
self.root.find('Video').find('MusicUrl').text = v
elif k == 'HQMusicUrl':
self.root.find('Video').find('HQMusicUrl').text = v
elif k == 'ThumbMediaId':
self.root.find('Video').find('ThumbMediaId').text = v
else:
try:
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
## For article message only
elif self._MsgType == 'article':
# To set attribute value of the XML special for image
for k, v in kwargs.items():
if k == 'ArticleCount':
self.root.find(k).text = v
if k == 'Articles':
# TODO to generate articles as
#print v
#print etree.tostring(self.root)
self.root.find('Video').find('MediaId').text = v
elif k == 'Title':
self.root.find('Video').find('Title').text = v
elif k == 'Description':
self.root.find('Video').find('Description').text = v
elif k == 'MusicUrl':
self.root.find('Video').find('MusicUrl').text = v
elif k == 'HQMusicUrl':
self.root.find('Video').find('HQMusicUrl').text = v
elif k == 'ThumbMediaId':
self.root.find('Video').find('ThumbMediaId').text = v
else:
try:
## assign/update value to the new XML object
self.root.find(k).text = v
except Exception as e:
print e
raise e
def dumpXML(self):
# To dump the XML we need
# the ```self.root``` has been assigned already
return etree.tostring(self.root, encoding='utf-8',method='xml',pretty_print=True)
# The down blow are the templates of all the responsing message valid for wechat
# For more information, please visit : http://mp.weixin.qq.com/wiki/index.php?title=%E5%8F%91%E9%80%81%E8%A2%AB%E5%8A%A8%E5%93%8D%E5%BA%94%E6%B6%88%E6%81%AF
global tpl_text
global tpl_image
global tpl_voice
global tpl_video
global tpl_music
global tpl_news
tpl_text = u'''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[text]]></MsgType>
<Content><![CDATA[你好]]></Content>
</xml>'''
tpl_image = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[image]]></MsgType>
<Image>
<MediaId><![CDATA[media_id]]></MediaId>
</Image>
</xml>'''
tpl_voice = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[voice]]></MsgType>
<Voice>
<MediaId><![CDATA[media_id]]></MediaId>
</Voice>
</xml>'''
tpl_video = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[video]]></MsgType>
<Video>
<MediaId><![CDATA[media_id]]></MediaId>
<Title><![CDATA[title]]></Title>
<Description><![CDATA[description]]></Description>
</Video>
</xml>'''
tpl_music = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[music]]></MsgType>
<Music>
<Title><![CDATA[TITLE]]></Title>
<Description><![CDATA[DESCRIPTION]]></Description>
<MusicUrl><![CDATA[MUSIC_Url]]></MusicUrl>
<HQMusicUrl><![CDATA[HQ_MUSIC_Url]]></HQMusicUrl>
<ThumbMediaId><![CDATA[media_id]]></ThumbMediaId>
</Music>
</xml>'''
tpl_news = '''<xml>
<ToUserName><![CDATA[toUser]]></ToUserName>
<FromUserName><![CDATA[fromUser]]></FromUserName>
<CreateTime>12345678</CreateTime>
<MsgType><![CDATA[news]]></MsgType>
<ArticleCount>2</ArticleCount>
<Articles>
<item>
<Title><![CDATA[title1]]></Title>
<Description><![CDATA[description1]]></Description>
<PicUrl><![CDATA[picurl]]></PicUrl>
<Url><![CDATA[url]]></Url>
</item>
<item>
<Title><![CDATA[title]]></Title>
<Description><![CDATA[description]]></Description>
<PicUrl><![CDATA[picurl]]></PicUrl>
<Url><![CDATA[url]]></Url>
</item>
</Articles>
</xml>'''
# Positive response
class PositiveRespondingContainer(object):
'''Using wechat custom service API to pass 6 types of messages to those wechat clients \n
who sent messages to the public wechat service. Those 6 types of messages include:
text, image, voice, video, music, news
The dumped is of dict format.
We need to json.loads(the_dict_object) if we want to pass the right reponse back
'''
def __init__(self, MsgType='text'):
self._MsgType = MsgType
# By default set the ```self.the_dict``` as from the 'text' JSON format
the_json_tpl = globals()['json_' + self._MsgType].encode('utf-8').decode('utf-8')
self.the_dict = json.loads(the_json_tpl)
if MsgType == 'text':
pass
def initType(self, MsgType='text'):
if MsgType not in ['text', 'image', 'voice', 'video', 'music', 'news']:
raise ValueError, "It has no message type: '%s'" % MsgType
else:
# pass the message type to have ```self.the_dict```
self.__init__(MsgType)
def setElementByKey(self, **kwargs):
'''To set the ```self.the_dict``` according to the message type by such as ```initType(MsgType='text')```
Notice: all the kwargs 's key in this function should be of lower case. Official wechat define that. Don't claim '''
## For text message only
if self._MsgType == 'text':
for k, v in kwargs.items():
try:
if k == 'content':
self.the_dict['text'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For image message only
elif self._MsgType == 'image':
for k, v in kwargs.items():
try:
if k == 'media_id':
self.the_dict['image'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For voice message only
elif self._MsgType == 'voice':
for k, v in kwargs.items():
try:
if k == 'media_id':
self.the_dict['voice'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For video message only
elif self._MsgType == 'video':
for k, v in kwargs.items():
try:
if k == 'media_id':
self.the_dict['video'][k] = v
elif k == 'title':
self.the_dict['video'][k] = v
elif k == 'description':
self.the_dict['video'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For music message only
elif self._MsgType == 'music':
for k, v in kwargs.items():
try:
if k == 'musicurl':
self.the_dict['music'][k] = v
elif k == 'title':
self.the_dict['music'][k] = v
elif k == 'description':
self.the_dict['music'][k] = v
elif k == 'hqmusicurl':
self.the_dict['music'][k] = v
elif k == 'thumb_media_id':
self.the_dict['music'][k] = v
else:
self.the_dict[k] = v
except Exception as e:
print e
raise e
## For news message only
elif self._MsgType == 'news':
for k, v in kwargs.items():
try:
# here we just check whether the ```v``` is type of list the ```v``` should be packaged in a list already
# if list, then its the elment of the key ```articles``` for the news message
'''
"articles": [
{
"title":"Happy Day",
"description":"Is Really A Happy Day",
"url":"URL",
"picurl":"PIC_URL"
},
{
"title":"Happy Day",
"description":"Is Really A Happy Day",
"url":"URL",
"picurl":"PIC_URL"
}
]
'''
if k == 'articles':
if type(v) == list:
self.the_dict['news'][k] = v
else:
raise ValueError, "The value of the key 'articles' should be of type list"
elif k == 'touser':
self.the_dict['touser'] = v
elif k == 'msgtype':
self.the_dict['msgtype'] = 'news'
except Exception as e:
print e
raise e
# package article
def packageArticle(title= "default title", description="default description", url="http://www.baidu.com", picurl="http://www.baidu.com/img/bdlogo.gif"):
'''This will return an article in a list which contains a dict.
While construcing the JSON dumped,
This is used with the function ```setElementByKey(touser='someone', msgtype='news', articles=packageArticle())```
'''
return [{"title": title, "description":description, "url":url, "picurl":picurl}]
# to dump the the dict as for later on JSON loading
def dumpDict(self):
return self.the_dict
json_text = '''{
"touser":"OPENID",
"msgtype":"text",
"text":
{
"content":"Hello World"
}
}'''
json_image = '''{
"touser":"OPENID",
"msgtype":"image",
"image":
{
"media_id":"MEDIA_ID"
}
}'''
json_voice = '''{
"touser":"OPENID",
"msgtype":"voice",
"voice":
{
"media_id":"MEDIA_ID"
}
}'''
json_video = '''{
"touser":"OPENID",
"msgtype":"video",
"video":
{
"media_id":"MEDIA_ID",
"title":"TITLE",
"description":"DESCRIPTION"
}
}'''
json_music = '''{
"touser":"OPENID",
"msgtype":"music",
"music":
{
"title":"MUSIC_TITLE",
"description":"MUSIC_DESCRIPTION",
"musicurl":"MUSIC_URL",
"hqmusicurl":"HQ_MUSIC_URL",
"thumb_media_id":"THUMB_MEDIA_ID"
}
}'''
json_news = '''{
"touser":"OPENID",
"msgtype":"news",
"news":{
"articles": [
{
"title":"Happy Day",
"description":"Is Really A Happy Day",
"url":"URL",
"picurl":"PIC_URL"
},
{
"title":"Happy Day",
"description":"Is Really A Happy Day",
"url":"URL",
"picurl":"PIC_URL"
}
]
}
}'''
class SubscriberManager(object):
'''To manage the subscriber groups, profile, location, list.
Usage:
>>> sm = SubscriberManager()
>>> sm.loadToken('abcdefg1234567')
>>> hisprofile = sm.getSubscriberProfile(openid='his_open_id', lang='zh_CN')
'''
def __init__(self, token=''):
self._token = token
def loadToken(self, token=''):
'''Firstly load the access token, then use the functions below'''
self._token = token
def getSubscriberProfile(self, openid='', lang='zh_CN'):
'''The open_id parameter is unique to unique wechat public service.
This function will return a dict if ```token``` and ```open_id``` are valid.
If not exists or not valid will return None.
For the parameter 'zh_CN', there are others: 'zh_TW, en'
For more information: please visit, http://mp.weixin.qq.com/wiki/index.php?title=%E8%8E%B7%E5%8F%96%E7%94%A8%E6%88%B7%E5%9F%BA%E6%9C%AC%E4%BF%A1%E6%81%AF'''
url = "https://api.weixin.qq.com/cgi-bin/user/info?access_token=" + self._token + "&openid=" + openid + "&lang=" + lang
try:
a = urllib2.urlopen(url)
except Exception as e:
print e
return None
else:
gotten = a.read()
a_dict = json.loads(gotten)
# means wrong appid or secret
if a_dict.has_key('errcode'):
return None
else:
return a_dict
def createGroup(self, name=''):
'''Create a determained group name.
If created, then it will return the new group id of type 'int'.
If not, will return None.
'''
url = "https://api.weixin.qq.com/cgi-bin/groups/create?access_token=" + self._token
postData = '{"group": {"name": "%s"} }' % name
request = urllib2.Request(url,data=postData)
request.get_method = lambda : 'POST'
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return None
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
return None
else:
return a_dict['group']['id']
def getAllgroups(self):
''' A dict will be returned.
For more information please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E5%88%86%E7%BB%84%E7%AE%A1%E7%90%86%E6%8E%A5%E5%8F%A3#.E6.9F.A5.E8.AF.A2.E6.89.80.E6.9C.89.E5.88.86.E7.BB.84
'''
url = "https://api.weixin.qq.com/cgi-bin/groups/get?access_token=" + self._token
try:
response = urllib2.urlopen(url)
except Exception as e:
print e
return None
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
return None
else:
return a_dict
def getHisGroupID(self, openid=''):
'''Get a subscriber's group ID. The ID is of type 'int'.
If openid wrong or token invalid, 'None' will be returned.
For more information, please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E5%88%86%E7%BB%84%E7%AE%A1%E7%90%86%E6%8E%A5%E5%8F%A3#.E6.9F.A5.E8.AF.A2.E7.94.A8.E6.88.B7.E6.89.80.E5.9C.A8.E5.88.86.E7.BB.84'''
url = "https://api.weixin.qq.com/cgi-bin/groups/getid?access_token="+ self._token
postData = '{"openid":"%s"}' % openid
request = urllib2.Request(url,data=postData)
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return None
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
return None
else:
return a_dict['groupid']
def updateGroupName(self, groupid='', new_name=''):
'''Update the determained group id with the new_name.
'True' or False if updated or not.
For more information, please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E5%88%86%E7%BB%84%E7%AE%A1%E7%90%86%E6%8E%A5%E5%8F%A3#.E4.BF.AE.E6.94.B9.E5.88.86.E7.BB.84.E5.90.8D
'''
url = "https://api.weixin.qq.com/cgi-bin/groups/update?access_token=" + self._token
postData = '{"group":{"id":%s,"name":"%s"}}' % (groupid, new_name)
request = urllib2.Request(url,data=postData)
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return False
else:
a_dict = json.loads(response.read())
#print a_dict
if a_dict.has_key('errcode'):
if a_dict['errcode'] == 0:
return True
else:
return False
else:
return False
def moveHimToGroup(self, openid='', groupid=''):
'''Move him to other group.
'True' or 'False' if moved or not.
For more information please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E5%88%86%E7%BB%84%E7%AE%A1%E7%90%86%E6%8E%A5%E5%8F%A3#.E7.A7.BB.E5.8A.A8.E7.94.A8.E6.88.B7.E5.88.86.E7.BB.84'''
url = "https://api.weixin.qq.com/cgi-bin/groups/members/update?access_token=" + self._token
postData = '{"openid":"%s","to_groupid":%s}' % (openid, groupid)
request = urllib2.Request(url,data=postData)
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return False
else:
a_dict = json.loads(response.read())
#print a_dict
if a_dict.has_key('errcode'):
if a_dict['errcode'] == 0:
return True
else:
return False
else:
return False
def getSubscriberList(self, next_openid=''):
'''To get subscriber list.
A dict will be return if valid.
If ```token``` and ```next_openid``` are valid, then a dict will be returned.
If the ```next_openid``` does not exist, official wechat server takes it as '' by default
If not, a 'None' will be returned.
For more information please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E8%8E%B7%E5%8F%96%E5%85%B3%E6%B3%A8%E8%80%85%E5%88%97%E8%A1%A8
'''
url = "https://api.weixin.qq.com/cgi-bin/user/get?access_token=" + self._token + "&next_openid=" + next_openid
try:
response = urllib2.urlopen(url)
except Exception as e:
print e
return None
else:
a_dict = json.loads(response.read())
#print a_dict
if a_dict.has_key('errcode'):
return None
else:
return a_dict
def getAPIToken(appid='', appsecret=''):
'''Get wechat API token for cusmter service or others.
If ```appid``` and ```appsecret``` are correct then a string 'token' will be return.
If not , 'return None' '''
default_url = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&'
url = default_url + 'appid=' + appid + '&secret=' + appsecret
try:
a = urllib2.urlopen(url)
except Exception as e:
print e
return None
else:
gotten = a.read()
a_dict = json.loads(gotten)
if a_dict.has_key('access_token'):
return a_dict['access_token']
# means wrong appid or secret
else:
return None
def postMessage2API(token='',messageString=''):
'''Using the token, post the message to determained user.
This returns a Boolean value'''
url = "https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token=" + token
request = urllib2.Request(url, messageString)
request.get_method = lambda : 'POST'
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return False
else:
j = json.loads(response.read())
# The above works
#print j
# to check if the message was accepted
if j['errcode'] == 0:
return True
else:
return False
class MenuManager(object):
'''To manage the bottom menu of the wechat service
Usage:
>>> mm = MenuManager()
>>> mm.loadToken('something_the_api_token')
>>> flag = mm.createMenu('the_menu_format_constructed_from_a_JSON_as_a_string')
>>> flag
True
>>> menu_got = mm.getMenu()
>>> menu_got
{u'menu': {u'button': [{u'type': u'click', u'name': u'\u7b2c\u4e00\u94ae', u'key': u'V1001_TODAY_MUSIC', u'sub_button': []}, {u'type': u'click', u'name': u'\u7b2c\u4e8c\u94ae', u'key': u'V1001_TODAY_SINGER', u'sub_button': []}, {u'name': u'\u7b2c\u4e09\u94ae', u'sub_button': [{u'url': u'http://www.soso.com/', u'type': u'view', u'name': u'\u641c\u641c', u'sub_button': []}, {u'url': u'http://v.qq.com/', u'type': u'view', u'name': u'\u770b\u7535\u5f71', u'sub_button': []}, {u'type': u'click', u'name': u'\u5938\u6211\u5e05', u'key': u'V1001_GOOD', u'sub_button': []}]}]}}
>>> flag2 = mm.deleteMenu()
>>> flag2
True
>>> mm.getMenu()
>>> # nothing gotten: it means no menu at all
'''
def __init__(self, token=''):
self._token = token
def loadToken(self, token=''):
'''Load the token before using other functions'''
self._token = token
def createMenu(self, menu_format=''):
'''Create menu, it needs a token and the menu format.
The ```menu_format``` is of type string.
But ```menu_format``` is constructed from a JSON.
For more information please visit:
http://mp.weixin.qq.com/wiki/index.php?title=%E8%87%AA%E5%AE%9A%E4%B9%89%E8%8F%9C%E5%8D%95%E5%88%9B%E5%BB%BA%E6%8E%A5%E5%8F%A3
'''
token = self._token
url = "https://api.weixin.qq.com/cgi-bin/menu/create?access_token=" + token
request = urllib2.Request(url, menu_format)
request.get_method = lambda : 'POST'
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return False
else:
j = json.loads(response.read())
# The above works
#print j
# to check if the message was accepted
if j['errcode'] == 0:
return True
else:
return False
def getMenu(self):
'''Get the menu format from the API.
If there be, then a dict would be returned.
If not, 'None' will be returned.
'''
token = self._token
url = "https://api.weixin.qq.com/cgi-bin/menu/get?access_token="+ token
try:
response = urllib2.urlopen(url)
except Exception as e:
# its better to raise something here if the wechat remote server is down
print e
return None
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
if a_dict['errcode'] != 0:
return None
else:
return a_dict
else:
return a_dict
def deleteMenu(self):
token = self._token
url = "https://api.weixin.qq.com/cgi-bin/menu/delete?access_token=" + token
try:
response = urllib2.urlopen(url)
except Exception as e:
print e
return False
else:
a_dict = json.loads(response.read())
if a_dict.has_key('errcode'):
if a_dict['errcode'] == 0:
return True
else:
return False
else:
return False
class MediaManager(object):
'''There are four types of media suppored by wechat.
image, voice, video, thumb
Post the file to the offical wechat server and get the response.
'''
def __init__(self, media_type='image', token = ''):
self._media_type = media_type
self._token = token
def loadToken(self, token = ''):
self._token = token
def uploadMedia(self, media_type='image', media_path=''):
'''Post the determained media file to the offical URL
If the image is valid, then a_dict will be returned.
If not, 'None' will be returned.
For more information, please visit: http://mp.weixin.qq.com/wiki/index.php?title=%E4%B8%8A%E4%BC%A0%E4%B8%8B%E8%BD%BD%E5%A4%9A%E5%AA%92%E4%BD%93%E6%96%87%E4%BB%B6'''
if media_type not in ['image', 'voice', 'video', 'thumb']:
raise ValueError, "Media type: '%s' not valid" % media_type
else:
self._media_type = media_type
url = "http://file.api.weixin.qq.com/cgi-bin/media/upload?access_token=" + self._token + "&type=" + self._media_type
register_openers()
try:
datagen, headers = multipart_encode({"image1": open(media_path,"rb")})
except Exception as e:
#print e
return None
#raise e
else:
request = urllib2.Request(url,data=datagen,headers=headers)
try:
response = urllib2.urlopen(request)
except Exception as e:
print e
return None
| 37.391187
| 577
| 0.527237
| 4,799
| 41,579
| 4.487602
| 0.113982
| 0.028603
| 0.011283
| 0.016298
| 0.474043
| 0.434389
| 0.4095
| 0.384751
| 0.333488
| 0.320347
| 0
| 0.018928
| 0.343082
| 41,579
| 1,111
| 578
| 37.424842
| 0.769532
| 0.102143
| 0
| 0.572592
| 0
| 0.004071
| 0.216631
| 0.070952
| 0
| 0
| 0
| 0.0018
| 0
| 0
| null | null | 0.006784
| 0.010855
| null | null | 0.033921
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
d4f0759288304875f2de20fc2b91d86d509cb718
| 3,820
|
py
|
Python
|
examples/add_compensation_to_sample.py
|
whitews/ReFlowRESTClient
|
69369bbea501382291b71facea7a511ab8f7848b
|
[
"BSD-3-Clause"
] | null | null | null |
examples/add_compensation_to_sample.py
|
whitews/ReFlowRESTClient
|
69369bbea501382291b71facea7a511ab8f7848b
|
[
"BSD-3-Clause"
] | null | null | null |
examples/add_compensation_to_sample.py
|
whitews/ReFlowRESTClient
|
69369bbea501382291b71facea7a511ab8f7848b
|
[
"BSD-3-Clause"
] | null | null | null |
import getpass
import sys
import json
from reflowrestclient.utils import *
host = raw_input('Host: ')
username = raw_input('Username: ')
password = getpass.getpass('Password: ')
token = get_token(host, username, password)
if token:
print "Authentication successful"
print '=' * 40
else:
print "No token for you!!!"
sys.exit()
def start():
# Projects
project_list = get_projects(host, token)
for i, result in enumerate(project_list['data']):
print i, ':', result['project_name']
project_choice = raw_input('Choose Project:')
project = project_list['data'][int(project_choice)]
# Subjects
subject_list = get_subjects(host, token, project_pk=project['id'])
for i, result in enumerate(subject_list['data']):
print i, ':', result['subject_id']
subject_choice = raw_input('Choose Subject (leave blank for all subjects): ')
subject = None
if subject_choice:
subject = subject_list['data'][int(subject_choice)]
# Sites
site_list = get_sites(host, token, project_pk=project['id'])
if not site_list:
sys.exit('There are no sites')
for i, result in enumerate(site_list['data']):
print i, ':', result['site_name']
site_choice = raw_input('Choose Site (required): ')
site = site_list['data'][int(site_choice)]
# Samples
sample_args = [host, token]
sample_kwargs = {'site_pk': site['id']}
if subject:
sample_kwargs['subject_pk'] = subject['id']
sample_list = get_samples(*sample_args, **sample_kwargs)
if not sample_list:
sys.exit('There are no samples')
for i, result in enumerate(sample_list['data']):
print i, ':', result['original_filename']
sample_choice = raw_input('Choose Sample (leave blank for all samples): ')
sample = None
if sample_choice:
sample = sample_list['data'][int(sample_choice)]
# Compensation
compensation_list = get_compensations(host, token, site_pk=site['id'], project_pk=project['id'])
if not compensation_list:
sys.exit('There are no compensations')
for i, result in enumerate(compensation_list['data']):
print i, ':', result['original_filename']
compensation_choice = raw_input('Choose Compensation (required): ')
compensation = compensation_list['data'][int(compensation_choice)]
# Now have user verify information
print '=' * 40
print 'You chose to add this compensation to these samples:'
print '\Compensation: %s' % compensation['original_filename']
print 'Samples:'
if sample:
print '\t%s' % sample['original_filename']
else:
for s in sample_list['data']:
print '\t%s' % s['original_filename']
print '=' * 40
apply_choice = None
while apply_choice not in ['continue', 'exit']:
apply_choice = raw_input("Type 'continue' to upload, 'exit' abort: ")
if apply_choice == 'exit':
sys.exit()
print 'continue'
if sample:
response_dict = add_compensation_to_sample(
host,
token,
sample_pk=str(sample['id']),
compensation_pk=str(compensation['id'])
)
print "Response: ", response_dict['status'], response_dict['reason']
print 'Data: '
print json.dumps(response_dict['data'], indent=4)
else:
for sample in sample_list['data']:
response_dict = add_compensation_to_sample(
host,
token,
sample_pk=str(sample['id']),
compensation_pk=str(compensation['id']),
)
print "Response: ", response_dict['status'], response_dict['reason']
print 'Data: '
print json.dumps(response_dict['data'], indent=4)
while True:
start()
| 28.939394
| 100
| 0.625654
| 458
| 3,820
| 5.028384
| 0.189956
| 0.041685
| 0.033869
| 0.026053
| 0.330004
| 0.258359
| 0.195397
| 0.164134
| 0.164134
| 0.164134
| 0
| 0.002776
| 0.24555
| 3,820
| 132
| 101
| 28.939394
| 0.796322
| 0.020157
| 0
| 0.27957
| 0
| 0
| 0.192668
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.032258
| 0.043011
| null | null | 0.236559
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
be05301485051b024d0504eecb5189daad437a58
| 3,242
|
py
|
Python
|
600/unit-1/recursion/problem-set/mit-solutions/ps2_hangman_sol1.py
|
marioluan/mit-opencourseware-cs
|
5de013f8e321fed2ff3b7a13e8929a44805db78b
|
[
"MIT"
] | null | null | null |
600/unit-1/recursion/problem-set/mit-solutions/ps2_hangman_sol1.py
|
marioluan/mit-opencourseware-cs
|
5de013f8e321fed2ff3b7a13e8929a44805db78b
|
[
"MIT"
] | null | null | null |
600/unit-1/recursion/problem-set/mit-solutions/ps2_hangman_sol1.py
|
marioluan/mit-opencourseware-cs
|
5de013f8e321fed2ff3b7a13e8929a44805db78b
|
[
"MIT"
] | 1
|
2020-05-19T13:29:18.000Z
|
2020-05-19T13:29:18.000Z
|
# 6.00 Problem Set 2
#
# Hangman
# Name : Solutions
# Collaborators : <your collaborators>
# Time spent : <total time>
# -----------------------------------
# Helper code
# You don't need to understand this helper code,
# but you will have to know how to use the functions
import random
import string
WORDLIST_FILENAME = "words.txt"
def load_words():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# line: string
line = inFile.readline()
# wordlist: list of strings
wordlist = string.split(line)
print " ", len(wordlist), "words loaded."
return wordlist
def choose_word(wordlist):
"""
wordlist (list): list of words (strings)
Returns a word from wordlist at random
"""
return random.choice(wordlist)
# end of helper code
# -----------------------------------
# load the list of words into the wordlist variable
# so that it can be accessed from anywhere in the program
wordlist = load_words()
def partial_word(secret_word, guessed_letters):
"""
Return the secret_word in user-visible format, with underscores used
to replace characters that have not yet been guessed.
"""
result = ''
for letter in secret_word:
if letter in guessed_letters:
result = result + letter
else:
result = result + '_'
return result
def hangman():
"""
Runs the hangman game.
"""
print 'Welcome to the game, Hangman!'
secret_word = choose_word(wordlist)
print 'I am thinking of a word that is ' + str(len(secret_word)) + ' letters long.'
num_guesses = 8
word_guessed = False
guessed_letters = ''
available_letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',
'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',
's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# Letter-guessing loop. Ask the user to guess a letter and respond to the
# user based on whether the word has yet been correctly guessed.
while num_guesses > 0 and not word_guessed:
print '-------------'
print 'You have ' + str(num_guesses) + ' guesses left.'
print 'Available letters: ' + ''.join(available_letters)
guess = raw_input('Please guess a letter:')
if guess not in available_letters:
print 'Oops! You\'ve already guessed that letter: ' + partial_word(secret_word, guessed_letters)
elif guess not in secret_word:
num_guesses -= 1
available_letters.remove(guess)
print 'Oops! That letter is not in my word: ' + partial_word(secret_word, guessed_letters)
else:
available_letters.remove(guess)
guessed_letters += guess
print 'Good guess: ' + partial_word(secret_word, guessed_letters)
if secret_word == partial_word(secret_word, guessed_letters):
word_guessed = True
if word_guessed:
print 'Congratulations, you won!'
else:
print 'Game over.'
| 32.42
| 108
| 0.604874
| 415
| 3,242
| 4.616867
| 0.404819
| 0.057411
| 0.044363
| 0.054802
| 0.095511
| 0.095511
| 0.04071
| 0
| 0
| 0
| 0
| 0.003383
| 0.270512
| 3,242
| 99
| 109
| 32.747475
| 0.806765
| 0.189081
| 0
| 0.096154
| 0
| 0
| 0.1527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.038462
| null | null | 0.230769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
0779ab4524c7785b80eb2c94fee42447c65c7dbc
| 8,824
|
py
|
Python
|
utils.py
|
g4idrijs/CardiacUltrasoundPhaseEstimation
|
6bd2e157240133b6e306a7ca931d3d3b96647b88
|
[
"Apache-2.0"
] | 1
|
2020-11-17T16:14:06.000Z
|
2020-11-17T16:14:06.000Z
|
utils.py
|
g4idrijs/CardiacUltrasoundPhaseEstimation
|
6bd2e157240133b6e306a7ca931d3d3b96647b88
|
[
"Apache-2.0"
] | null | null | null |
utils.py
|
g4idrijs/CardiacUltrasoundPhaseEstimation
|
6bd2e157240133b6e306a7ca931d3d3b96647b88
|
[
"Apache-2.0"
] | 1
|
2020-06-28T09:19:02.000Z
|
2020-06-28T09:19:02.000Z
|
import os, time
import numpy as np
import scipy.signal
import scipy.misc
import scipy.ndimage.filters
import matplotlib.pyplot as plt
import PIL
from PIL import ImageDraw
import angles
import cv2
import SimpleITK as sitk
def cvShowImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255),
resizeAmount=None):
if resizeAmount is not None:
imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount,
fy=resizeAmount)
imDisp = cv2.cvtColor(imDisp, cv2.COLOR_GRAY2RGB)
if len(strAnnotation) > 0:
cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN,
2.0, textColor, thickness=2)
cv2.imshow(strName, imDisp)
def cvShowColorImage(imDisp, strName, strAnnotation='', textColor=(0, 0, 255),
resizeAmount=None):
if resizeAmount is not None:
imDisp = cv2.resize(imDisp.copy(), None, fx=resizeAmount,
fy=resizeAmount)
if len(strAnnotation) > 0:
cv2.putText(imDisp, strAnnotation, (10, 20), cv2.FONT_HERSHEY_PLAIN,
2.0, textColor, thickness=2)
cv2.imshow(strName, imDisp)
def mplotShowImage(imInput):
plt.imshow(imInput, cmap=plt.cm.gray)
plt.grid(False)
plt.xticks(())
plt.yticks(())
def normalizeArray(a):
return np.single(0.0 + a - a.min()) / (a.max() - a.min())
def AddTextOnImage(imInput, strText, loc=(2, 2), color=255):
imInputPIL = PIL.Image.fromarray(imInput)
d = ImageDraw.Draw(imInputPIL)
d.text(loc, strText, fill=color)
return np.asarray(imInputPIL)
def AddTextOnVideo(imVideo, strText, loc=(2, 2)):
imVideoOut = np.zeros_like(imVideo)
for i in range(imVideo.shape[2]):
imVideoOut[:, :, i] = AddTextOnImage(imVideo[:, :, i], strText, loc)
return imVideoOut
def cvShowVideo(imVideo, strWindowName, waitTime=30, resizeAmount=None):
if not isinstance(imVideo, list):
imVideo = [imVideo]
strWindowName = [strWindowName]
# find max number of frames
maxFrames = 0
for vid in range(len(imVideo)):
if imVideo[vid].shape[-1] > maxFrames:
maxFrames = imVideo[vid].shape[2]
# display video
blnLoop = True
fid = 0
while True:
for vid in range(len(imVideo)):
curVideoFid = fid % imVideo[vid].shape[2]
imCur = imVideo[vid][:, :, curVideoFid]
# resize image if requested
if resizeAmount:
imCur = scipy.misc.imresize(imCur, resizeAmount)
# show image
cvShowImage(imCur, strWindowName[vid], '%d' % (curVideoFid + 1))
# look for "esc" key
k = cv2.waitKey(waitTime) & 0xff
if blnLoop:
if k == 27:
break
elif k == ord(' '):
blnLoop = False
else:
fid = (fid + 1) % maxFrames
else:
if k == 27: # escape
break
elif k == ord(' '): # space
blnLoop = True
elif k == 81: # left arrow
fid = (fid - 1) % maxFrames
elif k == 83: # right arrow
fid = (fid + 1) % maxFrames
for vid in range(len(imVideo)):
cv2.destroyWindow(strWindowName[vid])
def normalizeArray(a, bounds=None):
if bounds is None:
return (0.0 + a - a.min()) / (a.max() - a.min())
else:
b = (0.0 + a - bounds[0]) / (bounds[1] - bounds[0])
b[b < 0] = bounds[0]
b[b > bounds[1]] = bounds[1]
return b
def loadVideoFromFile(dataFilePath, sigmaSmooth=None, resizeAmount=None):
vidseq = cv2.VideoCapture(dataFilePath)
print vidseq, vidseq.isOpened()
# print metadata
metadata = {}
numFrames = vidseq.get(cv2.CAP_PROP_FRAME_COUNT)
print '\tFRAME_COUNT = ', numFrames
metadata['FRAME_COUNT'] = numFrames
frameHeight = vidseq.get(cv2.CAP_PROP_FRAME_HEIGHT)
if frameHeight > 0:
print '\tFRAME HEIGHT = ', frameHeight
metadata['FRAME_HEIGHT'] = frameHeight
frameWidth = vidseq.get(cv2.CAP_PROP_FRAME_WIDTH)
if frameWidth > 0:
print '\tFRAME WIDTH = ', frameWidth
metadata['FRAME_WIDTH'] = frameWidth
fps = vidseq.get(cv2.CAP_PROP_FPS)
if fps > 0:
print '\tFPS = ', fps
metadata['FPS'] = fps
fmt = vidseq.get(cv2.CAP_PROP_FORMAT)
if fmt > 0:
print '\FORMAT = ', fmt
metadata['FORMAT'] = fmt
vmode = vidseq.get(cv2.CAP_PROP_MODE)
if vmode > 0:
print '\MODE = ', vmode
metadata['MODE'] = MODE
# smooth if wanted
if sigmaSmooth:
wSmooth = 4 * sigmaSmooth + 1
print metadata
# read video frames
imInput = []
fid = 0
prevPercent = 0
print '\n'
while True:
valid_object, frame = vidseq.read()
if not valid_object:
break
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if resizeAmount:
frame = scipy.misc.imresize(frame, resizeAmount)
if sigmaSmooth:
frame = cv2.GaussianBlur(frame, (wSmooth, wSmooth), 0)
imInput.append(frame)
# update progress
fid += 1
curPercent = np.floor(100.0 * fid / numFrames)
if curPercent > prevPercent:
prevPercent = curPercent
print '%.2d%%' % curPercent,
print '\n'
imInput = np.dstack(imInput)
vidseq.release()
return (imInput, metadata)
def writeVideoToFile(imVideo, filename, codec='DIVX', fps=30, isColor=False):
# start timer
tStart = time.time()
# write video
# fourcc = cv2.FOURCC(*list(codec)) # opencv 2.4
fourcc = cv2.VideoWriter_fourcc(*list(codec))
height, width = imVideo.shape[:2]
writer = cv2.VideoWriter(filename, fourcc, fps=fps,
frameSize=(width, height), isColor=isColor)
print writer.isOpened()
numFrames = imVideo.shape[-1]
for fid in range(numFrames):
if isColor:
writer.write(imVideo[:, :, :, fid].astype('uint8'))
else:
writer.write(imVideo[:, :, fid].astype('uint8'))
# end timer
tEnd = time.time()
print 'Writing video {} took {} seconds'.format(filename, tEnd - tStart)
# release
writer.release()
def writeVideoAsTiffStack(imVideo, strFilePrefix):
# start timer
tStart = time.time()
for fid in range(imVideo.shape[2]):
plt.imsave(strFilePrefix + '.%.3d.tif' % (fid + 1), imVideo[:, :, fid])
# end timer
tEnd = time.time()
print 'Writing video {} took {} seconds'.format(strFilePrefix,
tEnd - tStart)
def mplotShowMIP(im, axis, xlabel=None, ylabel=None, title=None):
plt.imshow(im.max(axis))
if title:
plt.title(title)
if xlabel:
plt.xlabel(xlabel)
if ylabel:
plt.ylabel(ylabel)
def convertFromRFtoBMode(imInputRF):
return np.abs(scipy.signal.hilbert(imInputRF, axis=0))
def normalizeAngles(angleList, angle_range):
return np.array(
[angles.normalize(i, angle_range[0], angle_range[1]) for i in
angleList])
def SaveFigToDisk(saveDir, fileName, saveext=('.png', '.eps'), **kwargs):
for ext in saveext:
plt.savefig(os.path.join(saveDir, fileName + ext), **kwargs)
def SaveImageToDisk(im, saveDir, fileName, saveext=('.png',)):
for ext in saveext:
plt.imsave(os.path.join(saveDir, fileName + ext), im)
def generateGatedVideoUsingSplineInterp(imInput, numOutFrames, minFrame,
maxFrame, splineOrder):
tZoom = np.float(numOutFrames) / (maxFrame - minFrame + 1)
return scipy.ndimage.interpolation.zoom(
imInput[:, :, minFrame:maxFrame + 1], (1, 1, tZoom), order=splineOrder)
def ncorr(imA, imB):
imA = (imA - imA.mean()) / imA.std()
imB = (imB - imB.mean()) / imB.std()
return np.mean(imA * imB)
def vis_checkerboard(im1, im2):
im_chk = sitk.CheckerBoard(sitk.GetImageFromArray(im1),
sitk.GetImageFromArray(im2))
return sitk.GetArrayFromImage(im_chk)
def fig2data(fig):
"""
@brief Convert a Matplotlib figure to a 4D numpy array with
RGBA channels and return it
@param fig a matplotlib figure
@return a numpy 3D array of RGBA values
"""
# draw the renderer
fig.canvas.draw()
# Get the RGBA buffer from the figure
w, h = fig.canvas.get_width_height()
buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
buf.shape = (w, h, 4)
# canvas.tostring_argb give pixmap in ARGB mode.
# Roll the ALPHA channel to have it in RGBA mode
buf = np.roll(buf, 3, axis=2)
return buf
| 24.241758
| 79
| 0.592248
| 1,031
| 8,824
| 5.031038
| 0.275461
| 0.008097
| 0.013881
| 0.017351
| 0.217467
| 0.174475
| 0.124157
| 0.124157
| 0.124157
| 0.118373
| 0
| 0.022293
| 0.288305
| 8,824
| 364
| 80
| 24.241758
| 0.803662
| 0.051904
| 0
| 0.241546
| 0
| 0
| 0.028841
| 0
| 0
| 0
| 0.000491
| 0
| 0
| 0
| null | null | 0
| 0.05314
| null | null | 0.067633
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
078810f30530e12e24a60251c7822cc072db8c3d
| 1,142
|
py
|
Python
|
typogrify/templatetags/typogrify_tags.py
|
tylerbutler/typogrify
|
7b7a67348a2d51400fd38c0b61e30e34ca98994e
|
[
"BSD-3-Clause"
] | null | null | null |
typogrify/templatetags/typogrify_tags.py
|
tylerbutler/typogrify
|
7b7a67348a2d51400fd38c0b61e30e34ca98994e
|
[
"BSD-3-Clause"
] | null | null | null |
typogrify/templatetags/typogrify_tags.py
|
tylerbutler/typogrify
|
7b7a67348a2d51400fd38c0b61e30e34ca98994e
|
[
"BSD-3-Clause"
] | null | null | null |
from typogrify.filters import amp, caps, initial_quotes, smartypants, titlecase, typogrify, widont, TypogrifyError
from functools import wraps
from django.conf import settings
from django import template
from django.utils.safestring import mark_safe
from django.utils.encoding import force_unicode
register = template.Library()
def make_safe(f):
"""
A function wrapper to make typogrify play nice with django's
unicode support.
"""
@wraps(f)
def wrapper(text):
text = force_unicode(text)
f.is_safe = True
out = text
try:
out = f(text)
except TypogrifyError, e:
if settings.DEBUG:
raise e
return text
return mark_safe(out)
wrapper.is_safe = True
return wrapper
register.filter('amp', make_safe(amp))
register.filter('caps', make_safe(caps))
register.filter('initial_quotes', make_safe(initial_quotes))
register.filter('smartypants', make_safe(smartypants))
register.filter('titlecase', make_safe(titlecase))
register.filter('typogrify', make_safe(typogrify))
register.filter('widont', make_safe(widont))
| 27.853659
| 114
| 0.69965
| 143
| 1,142
| 5.468531
| 0.356643
| 0.081841
| 0.038363
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205779
| 1,142
| 40
| 115
| 28.55
| 0.862183
| 0
| 0
| 0
| 0
| 0
| 0.05364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.206897
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
07a0beb6aad78f79be93a859fb255e52020dee2b
| 1,931
|
py
|
Python
|
geoist/cattools/Smoothing.py
|
wqqpp007/geoist
|
116b674eae3da4ee706902ce7f5feae1f61f43a5
|
[
"MIT"
] | 1
|
2020-06-04T01:09:24.000Z
|
2020-06-04T01:09:24.000Z
|
geoist/cattools/Smoothing.py
|
wqqpp007/geoist
|
116b674eae3da4ee706902ce7f5feae1f61f43a5
|
[
"MIT"
] | null | null | null |
geoist/cattools/Smoothing.py
|
wqqpp007/geoist
|
116b674eae3da4ee706902ce7f5feae1f61f43a5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import numpy as np
import .Selection as Sel
import .Exploration as Exp
import .CatUtils as CU
#-----------------------------------------------------------------------------------------
def GaussWin (Dis, Sig):
return np.exp(-(Dis**2)/(Sig**2.))
#-----------------------------------------------------------------------------------------
def SmoothMFD (Db, a, Wkt, Window=GaussWin, Par=50.,
Delta=0.1, SphereGrid=False,
Box=[], Buffer=[], Grid=[],
Threshold=-100, Unwrap=False,
ZeroRates=False):
if Par <= 0:
Par = np.inf
# Catalogue selection
DbS = Sel.AreaSelect(Db, Wkt, Owrite=0, Buffer=Buffer, Unwrap=Unwrap)
x,y,z = Exp.GetHypocenter(DbS)
# Creating the mesh grid
P = CU.Polygon()
P.Load(Wkt)
# Unwrapping coordinates
if Unwrap:
x = [i if i > 0. else i+360. for i in x]
P.Unwrap()
if Grid:
XY = [G for G in Grid if P.IsInside(G[0], G[1])]
else:
if SphereGrid:
XY = P.SphereGrid(Delta=Delta, Unwrap=Unwrap)
else:
XY = P.CartGrid(Dx=Delta, Dy=Delta, Bounds=Box)
Win = []
for xyP in XY:
Win.append(0)
for xyE in zip(x,y):
Dis = CU.WgsDistance(xyP[1], xyP[0], xyE[1], xyE[0])
Win[-1] += Window(Dis, Par)
# Scaling and normalising the rates
Norm = np.sum(Win)
A = []; X = []; Y = []
for I,W in enumerate(Win):
aT = -np.inf
if Norm > 0. and W > 0.:
aT = a + np.log10(W/Norm)
if aT < Threshold:
# Filter below threshold
aT = -np.inf
if ZeroRates:
A.append(aT)
X.append(XY[I][0])
Y.append(XY[I][1])
else:
if aT > -np.inf:
A.append(aT)
X.append(XY[I][0])
Y.append(XY[I][1])
if Unwrap:
# Wrap back longitudes
X = [x if x < 180. else x-360. for x in X]
return X, Y, A
| 23.26506
| 90
| 0.483169
| 271
| 1,931
| 3.442804
| 0.346863
| 0.021436
| 0.038585
| 0.019293
| 0.066452
| 0.066452
| 0.066452
| 0.066452
| 0.066452
| 0.066452
| 0
| 0.027496
| 0.284309
| 1,931
| 83
| 91
| 23.26506
| 0.647612
| 0.188503
| 0
| 0.245283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.075472
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
07a6ea2e95247eb4360055919661bfab2c787424
| 442
|
py
|
Python
|
audio.py
|
fernandoq/quiz-show
|
6e130db7923d14cf1976e1c522c58f848e48f2af
|
[
"MIT"
] | null | null | null |
audio.py
|
fernandoq/quiz-show
|
6e130db7923d14cf1976e1c522c58f848e48f2af
|
[
"MIT"
] | null | null | null |
audio.py
|
fernandoq/quiz-show
|
6e130db7923d14cf1976e1c522c58f848e48f2af
|
[
"MIT"
] | null | null | null |
import time
import subprocess
import os
print os.uname()
if not os.uname()[0].startswith("Darw"):
import pygame
pygame.mixer.init()
# Plays a song
def playSong(filename):
print "play song"
if not os.uname()[0].startswith("Darw"):
pygame.mixer.music.fadeout(1000) #fadeout current music over 1 sec.
pygame.mixer.music.load("music/" + filename)
pygame.mixer.music.play()
else:
subprocess.call(["afplay", "music/" + filename])
| 24.555556
| 69
| 0.708145
| 64
| 442
| 4.890625
| 0.5
| 0.140575
| 0.153355
| 0.076677
| 0.172524
| 0.172524
| 0.172524
| 0
| 0
| 0
| 0
| 0.018325
| 0.135747
| 442
| 18
| 70
| 24.555556
| 0.801047
| 0.11086
| 0
| 0.133333
| 0
| 0
| 0.089514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.266667
| null | null | 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
07b58dc361c480dc7628924d4fba99b729151138
| 687
|
py
|
Python
|
client/modules/Wikipedia.py
|
devagul93/Jarvis-System
|
8d1865b19bb8530831c868147c3b27a1c3bad59b
|
[
"MIT"
] | null | null | null |
client/modules/Wikipedia.py
|
devagul93/Jarvis-System
|
8d1865b19bb8530831c868147c3b27a1c3bad59b
|
[
"MIT"
] | null | null | null |
client/modules/Wikipedia.py
|
devagul93/Jarvis-System
|
8d1865b19bb8530831c868147c3b27a1c3bad59b
|
[
"MIT"
] | null | null | null |
import wikipedia
import re
import TCPclient as client
WORDS = ["WIKIPEDIA","SEARCH","INFORMATION"]
def handle(text,mic,profile):
# SEARCH ON WIKIPEDIA
# ny = wikipedia.summary("New York",sentences=3);
# mic.say("%s"% ny)
#mic.say("What you want to search about")
#text = mic.activeListen()
print "entering wiki term"
text = client.grab_input()
while text.upper()=="WIKIPEDIA":
print "entering while"
text = client.grab_input()
print text
answer = wikipedia.summary(text,sentences=3)
answer +="\n"
print answer
client.send_out(answer)
#mic.say(answer)
def isValid(text):
return bool(re.search(r'\bwikipedia\b',text, re.IGNORECASE))
| 19.083333
| 61
| 0.679767
| 93
| 687
| 4.989247
| 0.516129
| 0.038793
| 0.060345
| 0.081897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003534
| 0.176128
| 687
| 35
| 62
| 19.628571
| 0.816254
| 0.24163
| 0
| 0.117647
| 0
| 0
| 0.160156
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.176471
| null | null | 0.235294
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
07d274563189ebc57a38c1571e12c09ed638080d
| 18,828
|
py
|
Python
|
scanlogger.py
|
pythonhacker/pyscanlogd
|
64d6ad38127243e5c422be7f899ecfa802e1ad21
|
[
"BSD-3-Clause"
] | 1
|
2021-04-03T22:15:06.000Z
|
2021-04-03T22:15:06.000Z
|
scanlogger.py
|
pythonhacker/pyscanlogd
|
64d6ad38127243e5c422be7f899ecfa802e1ad21
|
[
"BSD-3-Clause"
] | null | null | null |
scanlogger.py
|
pythonhacker/pyscanlogd
|
64d6ad38127243e5c422be7f899ecfa802e1ad21
|
[
"BSD-3-Clause"
] | 2
|
2020-12-18T20:06:21.000Z
|
2021-04-08T02:47:40.000Z
|
# -- coding: utf-8
#!/usr/bin/env python
"""
pyscanlogger: Port scan detector/logger tool, inspired
by scanlogd {http://www.openwall.com/scanlogd} but with
added ability to log slow port-scans.
Features
1. Detects all stealth (half-open) and full-connect scans.
2. Detects Idle scan and logs it correctly using correlation!
3. Detects SCTP scan.
4. Detects slow port-scans also.
Modification History
Mar 17 2010 - Cleaned up code to publish to google.
Apr 8 2010 - Better detection of TCP full-connect scan without
spurious and incorrect logging. Better logging
functions.
Licensed under GNU GPL v3.0.
"""
import sys, os
import dpkt, pcap
import struct
import socket
import time
import threading
import optparse
import entry
import timerlist
__author__ = "pythonhacker"
__maintainer__ = "pythonhacker"
__version__ = '0.5.1'
__modified__ = 'Thu Apr 8 19:21:11 IST 2010'
# UDP - in progress...
SCAN_TIMEOUT = 5
WEIGHT_THRESHOLD = 25
PIDFILE="/var/run/pyscanlogger.pid"
# TCP flag constants
TH_URG=dpkt.tcp.TH_URG
TH_ACK=dpkt.tcp.TH_ACK
TH_PSH=dpkt.tcp.TH_PUSH
TH_RST=dpkt.tcp.TH_RST
TH_SYN=dpkt.tcp.TH_SYN
TH_FIN=dpkt.tcp.TH_FIN
# Protocols
TCP=dpkt.tcp.TCP
UDP=dpkt.udp.UDP
SCTP=dpkt.sctp.SCTP
get_timestamp = lambda : time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
ip2quad = lambda x: socket.inet_ntoa(struct.pack('I', x))
scan_ip2quad = lambda scan: map(ip2quad, [scan.src, scan.dst])
class ScanLogger(object):
""" Port scan detector/logger """
# TCP flags to scan type mapping
scan_types = {0: 'TCP null',
TH_FIN: 'TCP fin',
TH_SYN: 'TCP syn', TH_SYN|TH_RST: 'TCP syn',
TH_ACK: 'TCP ack',
TH_URG|TH_PSH|TH_FIN: 'TCP x-mas',
TH_URG|TH_PSH|TH_FIN|TH_ACK: 'TCP x-mas',
TH_SYN|TH_FIN: 'TCP syn/fin',
TH_FIN|TH_ACK: 'TCP fin/ack',
TH_SYN|TH_ACK: 'TCP full-connect',
TH_URG|TH_PSH|TH_ACK|TH_RST|TH_SYN|TH_FIN: 'TCP all-flags',
TH_SYN|TH_ACK|TH_RST: 'TCP full-connect',
# Not a scan
TH_RST|TH_ACK: 'reply'}
def __init__(self, timeout, threshold, maxsize, daemon=True, logfile='/var/log/scanlog'):
self.scans = entry.EntryLog(maxsize)
self.long_scans = entry.EntryLog(maxsize)
# Port scan weight threshold
self.threshold = threshold
# Timeout for scan entries
self.timeout = timeout
# Long-period scan timeouts
self.timeout_l = 3600
# Long-period scan threshold
self.threshold_l = self.threshold/2
# Daemonize ?
self.daemon = daemon
# Log file
try:
self.scanlog = open(logfile,'a')
print >> sys.stderr, 'Scan logs will be saved to %s' % logfile
except (IOError, OSError), (errno, strerror):
print >> sys.stderr, "Error opening scan log file %s => %s" % (logfile, strerror)
self.scanlog = None
# Recent scans - this list allows to keep scan information
# upto last 'n' seconds, so as to not call duplicate scans
# in the same time-period. 'n' is 60 sec by default.
# Since entries time out in 60 seconds, max size is equal
# to maximum such entries possible in 60 sec - assuming
# a scan occurs at most every 5 seconds, this would be 12.
self.recent_scans = timerlist.TimerList(12, 60.0)
def hash_func(self, addr):
""" Hash a host address """
value = addr
h = 0
while value:
# print value
h ^= value
value = value >> 9
return h & (8192-1)
def mix(self, a, b, c):
a -= b; a -= c; a ^= (c>>13)
b -= c; b -= a; b ^= (a<<8)
c -= a; c -= b; c ^= (b>>13)
a -= b; a -= c; a ^= (c>>12)
b -= c; b -= a; b ^= (a<<16)
c -= a; c -= b; c ^= (b>>5)
a -= b; a -= c; a ^= (c>>3)
b -= c; b -= a; b ^= (a<<10)
c -= a; c -= b; c ^= (b>>15)
return abs(c)
def host_hash(self, src, dst):
""" Hash mix two host addresses """
return self.hash_func(self.mix(src, dst, 0xffffff))
def log(self, msg):
""" Log a message to console and/or log file """
line = '[%s]: %s' % (get_timestamp(), msg)
if self.scanlog:
self.scanlog.write(line + '\n')
self.scanlog.flush()
if not self.daemon:
print >> sys.stderr, line
def log_scan(self, scan, continuation=False, slow_scan=False, unsure=False):
""" Log the scan to file and/or console """
srcip, dstip = scan_ip2quad(scan)
ports = ','.join([str(port) for port in scan.ports])
if not continuation:
tup = [scan.type,scan.flags_or,srcip,dstip, ports]
if not slow_scan:
if scan.type != 'Idle':
line = '%s scan (flags:%d) from %s to %s (ports:%s)'
else:
tup.append(ip2quad(scan.zombie))
line = '%s scan (flags: %d) from %s to %s (ports: %s) using zombie host %s'
else:
tup.append(scan.time_avg)
if unsure:
line = 'Possible slow %s scan (flags:%d) from %s to %s (ports:%s), average timediff %.2fs'
else:
line = 'Slow %s scan (flags:%d) from %s to %s (ports:%s), average timediff %.2fs'
else:
tup = [scan.type, srcip,dstip, ports]
if not slow_scan:
if scan.type != 'Idle':
line = 'Continuation of %s scan from %s to %s (ports:%s)'
else:
tup.append(ip2quad(scan.zombie))
line = 'Continuation of %s scan from %s to %s (ports: %s) using zombie host %s'
else:
tup.append(scan.time_avg)
line = 'Continuation of slow %s scan from %s to %s (ports:%s), average timediff %.2fs'
msg = line % tuple(tup)
self.log(msg)
def update_ports(self, scan, dport, flags):
scan.flags_or |= flags
if dport in scan.ports:
return
# Add weight for port
if dport < 1024:
scan.weight += 3
else:
scan.weight += 1
scan.ports.append(dport)
def inspect_scan(self, scan, slow_scan=False):
# Sure scan
is_scan = ((slow_scan and scan.weight >= self.threshold_l) or (not slow_scan and scan.weight >= self.threshold))
# Possible scan
maybe_scan = (slow_scan and len(scan.ports)>=3 and len(scan.timediffs)>=4 and (scan.weight < self.threshold_l))
not_scan = False
if is_scan or maybe_scan:
scan.logged = True
if scan.proto==TCP:
idle_scan = False
if scan.flags_or==TH_RST:
# None does scan using RST, however this could be
# return packets from a zombie host to the scanning
# host when a scanning host is doing an idle scan.
# Basically
# A -scanning host
# B - zombie host
# C - target host
# If A does an idle scan on C with B as zombie,
# it will appear to C as if B is syn scanning it
# and later we could get an apparent RST "scan"
# from B to A
# Correlation: If 'RST scan' detected from X to Y
# See if there was a SYN scan recently from host
# X to host Z. Then actually Y is idle scanning
# Z
dummy_scans, idle_ports = [], []
for item in reversed(self.recent_scans):
rscan = item[1]
if rscan.src==scan.src and rscan.flags_or==TH_SYN and ((rscan.timestamp - scan.timestamp)<30):
idle_scan = True
idle_ports.append(rscan.ports)
dummy_scans.append(item)
if idle_scan:
scan.src = scan.dst
scan.dst = rscan.dst
scan.zombie = rscan.src
scan.type = 'Idle'
scan.ports = idle_ports
# for d in dummy_scans:
# self.recent_scans.remove(d)
else:
# Remove entry
if slow_scan:
del self.long_scans[scan.hash]
else:
del self.scans[scan.hash]
return False
else:
scan.type = self.scan_types.get(scan.flags_or,'unknown')
if scan.type in ('', 'reply'):
not_scan = True
# If we see scan flags 22 from A->B, make sure that
# there was no recent full-connect scan from B->A, if
# so this is spurious and should be ignored.
if scan.flags_or == (TH_SYN|TH_ACK|TH_RST) and len(self.recent_scans):
recent1 = self.recent_scans[-1:-2:-1]
for recent in recent1:
# Was not a scan, skip
if not recent.is_scan: continue
if recent.type == 'TCP full-connect' and ((scan.src == recent.dst) and (scan.dst == recent.src)):
# Spurious
self.log("Ignoring spurious TCP full-connect scan from %s" % ' to '.join(scan_ip2quad(scan)))
not_scan = True
break
# If this is a syn scan, see if there was a recent idle scan
# with this as zombie, then ignore it...
elif scan.flags_or == TH_SYN and len(self.recent_scans):
# Try last 1 scans
recent1 = self.recent_scans[-1:-2:-1]
for recent in recent1:
if recent.type=='Idle' and scan.src==recent.zombie:
self.log('Ignoring mis-interpreted syn scan from zombie host %s' % ' to '.join(scan_ip2quad(scan)))
break
# Reply from B->A for full-connect scan from A->B
elif (recent.type == 'reply' and ((scan.src == recent.dst) and (scan.dst == recent.src))):
scan.type = 'TCP full-connect'
break
elif scan.proto==UDP:
scan.type = 'UDP'
# Reset flags for UDP scan
scan.flags_or = 0
elif scan.proto==SCTP:
if scan.chunk_type==1:
scan.type = 'SCTP Init'
elif scan.chunk_type==10:
scan.type = 'SCTP COOKIE_ECHO'
# See if this was logged recently
scanentry = entry.RecentScanEntry(scan, not not_scan)
if scanentry not in self.recent_scans:
continuation=False
self.recent_scans.append(scanentry)
else:
continuation=True
if not not_scan:
self.log_scan(scan, continuation=continuation, slow_scan=slow_scan, unsure=maybe_scan)
# Remove entry
if slow_scan:
del self.long_scans[scan.hash]
else:
del self.scans[scan.hash]
return True
else:
return False
def process(self, pkt):
if not hasattr(pkt, 'ip'):
return
ip = pkt.ip
# Ignore non-tcp, non-udp packets
if type(ip.data) not in (TCP, UDP, SCTP):
return
pload = ip.data
src,dst,dport,flags = int(struct.unpack('I',ip.src)[0]),int(struct.unpack('I', ip.dst)[0]),int(pload.dport),0
proto = type(pload)
if proto == TCP: flags = pload.flags
key = self.host_hash(src,dst)
curr=time.time()
# Keep dropping old entries
self.recent_scans.collect()
if key in self.scans:
scan = self.scans[key]
if scan.src != src:
# Skip packets in reverse direction or invalid protocol
return
timediff = curr - scan.timestamp
# Update only if not too old, else skip and remove entry
if (timediff > self.timeout):
# Add entry in long_scans if timediff not larger
# than longscan timeout
prev = self.scans[key].timestamp
if timediff<=self.timeout_l:
if key not in self.long_scans:
lscan = entry.ScanEntry(key)
lscan.src = src
lscan.dst = dst
lscan.timestamp = curr
lscan.timediffs.append(curr - prev)
lscan.flags_or |= flags
lscan.ports.append(dport)
lscan.proto = proto
self.long_scans[key] = lscan
else:
lscan = self.long_scans[key]
lscan.timestamp = curr
lscan.flags_or |= flags
lscan.timediffs.append(curr - prev)
lscan.update_time_sd()
self.update_ports(lscan, dport, flags)
if lscan.time_sd<2:
# SD is less than 2, possible slow scan
# update port weights...
# print 'Weight=>',lscan.weight
if not self.inspect_scan(lscan, True):
# Not a scan, check # of entries - if too many
# then this is a regular network activity
# but not a scan, so remove entry
if len(lscan.timediffs)>=10:
# print lscan.src, lscan.timediffs, lscan.time_sd
print 'Removing',key,lscan.src,'since not a scan'
del self.long_scans[key]
elif len(lscan.timediffs)>2:
# More than 2 entries, but SD is too large,
# delete the entry
# print 'Removing',key,lscan.src,'since SD is',lscan.time_sd
del self.long_scans[key]
else:
# Too large timeout, remove key
del self.long_scans[key]
del self.scans[key]
return
if scan.logged: return
scan.timestamp = curr
self.update_ports(scan, dport, flags)
self.inspect_scan(scan)
else:
# Add new entry
scan = entry.ScanEntry(key)
scan.src = src
scan.dst = dst
scan.timestamp = curr
scan.flags_or |= flags
if proto==SCTP:
scan.chunk_type = pload.chunks[0].type
scan.ports.append(dport)
scan.proto = proto
self.scans[key] = scan
def loop(self):
pc = pcap.pcap()
decode = { pcap.DLT_LOOP:dpkt.loopback.Loopback,
pcap.DLT_NULL:dpkt.loopback.Loopback,
pcap.DLT_EN10MB:dpkt.ethernet.Ethernet } [pc.datalink()]
try:
print 'listening on %s: %s' % (pc.name, pc.filter)
for ts, pkt in pc:
self.process(decode(pkt))
except KeyboardInterrupt:
if not self.daemon:
nrecv, ndrop, nifdrop = pc.stats()
print '\n%d packets received by filter' % nrecv
print '%d packets dropped by kernel' % ndrop
def run_daemon(self):
# Disconnect from tty
try:
pid = os.fork()
if pid>0:
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #1 failed", e
sys.exit(1)
os.setsid()
os.umask(0)
# Second fork
try:
pid = os.fork()
if pid>0:
open(PIDFILE,'w').write(str(pid))
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #2 failed", e
sys.exit(1)
self.loop()
def run(self):
# If dameon, then create a new thread and wait for it
if self.daemon:
print 'Daemonizing...'
self.run_daemon()
else:
# Run in foreground
self.loop()
def main():
if os.geteuid() != 0:
sys.exit("You must be super-user to run this program")
o=optparse.OptionParser()
o.add_option("-d", "--daemonize", dest="daemon", help="Daemonize",
action="store_true", default=False)
o.add_option("-f", "--logfile", dest="logfile", help="File to save logs to",
default="/var/log/scanlog")
options, args = o.parse_args()
s=ScanLogger(SCAN_TIMEOUT, WEIGHT_THRESHOLD, 8192, options.daemon, options.logfile)
s.run()
if __name__ == '__main__':
main()
| 37.8833
| 132
| 0.468929
| 2,152
| 18,828
| 4.015799
| 0.196097
| 0.013539
| 0.017357
| 0.00648
| 0.206781
| 0.151701
| 0.11583
| 0.103795
| 0.103217
| 0.099514
| 0
| 0.013553
| 0.439611
| 18,828
| 496
| 133
| 37.959677
| 0.805516
| 0.126036
| 0
| 0.258065
| 0
| 0.016129
| 0.084012
| 0.001656
| 0
| 0
| 0.00053
| 0
| 0
| 0
| null | null | 0
| 0.029032
| null | null | 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.