hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19e3cc99b66e2939b99c81e570efb9afd33fa23d
| 5,773
|
py
|
Python
|
rovina.py
|
Pandoro/tools
|
631c6036cb74dc845668fd912588fd31aae46f8b
|
[
"MIT"
] | 1
|
2019-04-22T16:38:03.000Z
|
2019-04-22T16:38:03.000Z
|
rovina.py
|
afcarl/tools-Pandoro
|
631c6036cb74dc845668fd912588fd31aae46f8b
|
[
"MIT"
] | 2
|
2018-03-13T10:49:48.000Z
|
2018-03-13T10:54:01.000Z
|
rovina.py
|
afcarl/tools-Pandoro
|
631c6036cb74dc845668fd912588fd31aae46f8b
|
[
"MIT"
] | 2
|
2018-03-08T19:40:10.000Z
|
2018-06-11T14:43:49.000Z
|
import json
import os
import sys
sys.path.append('/usr/lib/python2.7/dist-packages')
import cv2
import numpy as np
from tqdm import *
import dataset_utils
class Rovina(object):
def __init__(self, config_filename):
self.config_filename = config_filename
with open(config_filename) as config_file:
self.config = json.load(config_file)
if self.config['use_relative_paths']:
self.root_folder = os.path.dirname(config_filename)
else:
self.root_folder = ''
#Used if we want to use the "flipped" version of the camera 0.
self.folder_postfix = self.config['flipped_post_fix']
image_f = self.config['image_folder']
if image_f is not None:
image_f += self.folder_postfix
self.image_folder = os.path.join(self.root_folder, image_f)
self.image_extension = self.config['image_extension']
else:
self.image_folder = None
obj_label_f = self.config['object_label_folder']
if obj_label_f is not None:
obj_label_f += self.folder_postfix
self.obj_label_folder = os.path.join(self.root_folder, obj_label_f)
self.obj_label_extension = self.config['object_label_extension']
else:
self.obj_label_folder = None
mat_label_f = self.config['material_label_folder']
if mat_label_f is not None:
mat_label_f += self.folder_postfix
self.mat_label_folder = os.path.join(self.root_folder, mat_label_f)
self.mat_label_extension = self.config['material_label_extension']
else:
self.mat_label_folder = None
calib_f = self.config.get('calibration_folder')
if calib_f is not None:
calib_f += self.folder_postfix
self.calibration_folder = os.path.join(self.root_folder, calib_f)
self.calibration_extension = self.config.get('calibration_extension')
else:
self.calibration_folder = None
depth_f = self.config.get('depth_folder')
if depth_f is not None:
depth_f += self.folder_postfix
self.depth_folder = os.path.join(self.root_folder, depth_f)
self.depth_extension = self.config.get('depth_extension')
else:
self.depth_folder = None
self.train_filenames = self.config['train_images']
self.test_filenames = self.config['test_images']
self.dataset = self.config['dataset_name']
self.color_coding = { 'mat' : dataset_utils.LabelConversion(self.config['material_color_coding']),
'obj' : dataset_utils.LabelConversion(self.config['object_color_coding'])}
self.class_count = {k : self.color_coding[k].class_count for k in self.color_coding.keys()}
self.class_names = {k : self.color_coding[k].class_names for k in self.color_coding.keys()}
def label_to_rgb(self, image, type):
return self.color_coding[type].label_to_rgb(image)
def rgb_to_label(self, image, type):
return self.color_coding[type].rgb_to_label(image)
def get_data(self, data_type, color_images=True, mat_label_images=True, obj_label_images=True, calibrations=False, depth=False):
file_list = []
for t in data_type:
list_type = t + '_images'
if list_type in self.config:
file_list += self.config[list_type]
else:
raise Exception('The config does not contain a list for the entry: \'{0}_images\' \nConfig file located at: {1}'.format(t, self.config_filename))
return_list = []
if color_images:
images = []
for fn in tqdm(file_list):
i_n = os.path.join(self.image_folder, fn+self.image_extension)
images.append(self.load_color(i_n))
return_list.append(images)
if mat_label_images:
mat_labels = []
for fn in tqdm(file_list):
mat_l_n = os.path.join(self.mat_label_folder, fn+self.mat_label_extension)
mat_labels.append(self.load_labels(mat_l_n, 'mat'))
return_list.append(mat_labels)
if obj_label_images:
obj_labels = []
for fn in tqdm(file_list):
obj_l_n = os.path.join(self.obj_label_folder, fn+self.obj_label_extension)
obj_labels.append(self.load_labels(obj_l_n, 'obj'))
return_list.append(obj_labels)
if calibrations:
calibration_data = []
for fn in tqdm(file_list):
c_n = os.path.join(self.calibration_folder, fn+self.calibration_extension)
calibration_data.append(self.load_calibration(c_n))
return_list.append(calibration_data)
if depth:
depth_data = []
for fn in tqdm(file_list):
d_n = os.path.join(self.depth_folder, fn+self.depth_extension)
depth_data.append(self.load_depth(d_n))
return_list.append(depth_data)
if len(return_list) == 1:
return return_list[0]
else:
return return_list
def load_color(self, file_name):
return cv2.imread(file_name)[:,:,::-1] # flip bgr to rgb
def load_labels(self, file_name, type):
rgb = cv2.imread(file_name)[:,:,::-1]
return self.rgb_to_label(rgb, type)
def load_calibration(self, file_name):
with open(file_name) as calib_file:
return json.load(calib_file)
def load_depth(self, file_name):
d = cv2.imread(file_name, cv2.CV_LOAD_IMAGE_UNCHANGED)
if d.dtype == np.uint16:
d = d.astype(np.float32)/256.
return d
| 37.245161
| 161
| 0.625498
| 775
| 5,773
| 4.374194
| 0.154839
| 0.067847
| 0.029499
| 0.041298
| 0.257817
| 0.156637
| 0.112684
| 0.043068
| 0
| 0
| 0
| 0.005034
| 0.277325
| 5,773
| 155
| 162
| 37.245161
| 0.807526
| 0.013338
| 0
| 0.108333
| 0
| 0
| 0.073749
| 0.024759
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.058333
| 0.025
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19e85b96640382129fd31d8131a6692e41afddf9
| 4,952
|
py
|
Python
|
gpgLabs/GPR/GPRlab1.py
|
victortocantins/gpgLabs
|
310b69c681dd1ebf91ba8be2b5ac27adf5fc0f12
|
[
"MIT"
] | null | null | null |
gpgLabs/GPR/GPRlab1.py
|
victortocantins/gpgLabs
|
310b69c681dd1ebf91ba8be2b5ac27adf5fc0f12
|
[
"MIT"
] | null | null | null |
gpgLabs/GPR/GPRlab1.py
|
victortocantins/gpgLabs
|
310b69c681dd1ebf91ba8be2b5ac27adf5fc0f12
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.constants import mu_0, epsilon_0
import matplotlib.pyplot as plt
from PIL import Image
import warnings
warnings.filterwarnings('ignore')
from ipywidgets import interact, interactive, IntSlider, widget, FloatText, FloatSlider, fixed
from .Wiggle import wiggle, PrimaryWave, ReflectedWave
import requests
from io import BytesIO
########################################
# DOWNLOAD FUNCTIONS
########################################
def downloadRadargramImage(URL):
urlObj = requests.get(URL)
imgcmp = Image.open(BytesIO(urlObj.content))
return imgcmp
########################################
# WIDGETS
########################################
def PrimaryWidget(dataFile,timeFile):
i = interact(PrimaryWidgetFcn,
epsrL = (1, 10, 1),
epsrH = (1, 20, 1),
tinterpL = (0, 150, 2),
tinterpH = (0, 150, 2),
dFile = fixed(dataFile),
tFile = fixed(timeFile))
return i
def PrimaryFieldWidget(radargramImage):
i = interact(PrimaryFieldWidgetFcn,
tinterp = (0, 80, 2),
epsr = (1, 40, 1),
radgramImg = fixed(radargramImage))
return i
def PipeWidget(radargramImage):
i = interact(PipeWidgetFcn,
epsr = (0, 100, 1),
h=(0.1, 2.0, 0.1),
xc=(0., 40., 0.2),
r=(0.1, 3, 0.1),
imgcmp=fixed(radargramImage))
return i
def WallWidget(radargramImagePath):
i = interact(WallWidgetFcn,
epsr = (0, 100, 1),
h=(0.1, 2.0, 0.1),
x1=(1, 35, 1),
x2=(20, 40, 1),
imgcmp=fixed(radargramImagePath))
return i
########################################
# FUNCTIONS
########################################
def PrimaryWidgetFcn(tinterpL, epsrL, tinterpH, epsrH, dFile, tFile):
data = np.load(dFile)
time = np.load(tFile)
dt = time[1]-time[0]
v1 = 1./np.sqrt(epsilon_0*epsrL*mu_0)
v2 = 1./np.sqrt(epsilon_0*epsrH*mu_0)
dx = 0.3
nano = 1e9
xorig = np.arange(data.shape[0])*dx
out1 = PrimaryWave(xorig, v1, tinterpL/nano)
out2 = ReflectedWave(xorig, v2, tinterpH/nano)
kwargs = {
'skipt':1,
'scale': 0.5,
'lwidth': 0.1,
'dx': dx,
'sampr': dt*nano,
}
extent = [0., 30, 300, 0]
fig, ax1 = plt.subplots(1,1, figsize = (8,5))
ax1.invert_yaxis()
ax1.axis(extent)
ax1.set_xlabel('Offset (m)')
ax1.set_ylabel('Time (ns)')
ax1.set_title('Shot Gather')
wiggle(data, ax = ax1, **kwargs)
ax1.plot(xorig, out1*nano, 'b', lw = 2)
ax1.plot(xorig, out2*nano, 'r', lw = 2)
plt.show()
def PrimaryFieldWidgetFcn(tinterp, epsr, radgramImg):
imgcmp = Image.open(radgramImg)
fig = plt.figure(figsize = (6,7))
ax = plt.subplot(111)
plt.imshow(imgcmp, extent = [0, 150, 150, 0])
x = np.arange(81)*0.1
xconvert = x*150./8.
v = 1./np.sqrt(mu_0*epsilon_0*epsr)
nano = 1e9
# tinterp = 30
y = (1./v*x)*nano + tinterp
plt.plot(xconvert, y, lw = 2)
plt.xticks(np.arange(11)*15, np.arange(11)*0.8+2.4) #+2.4 for offset correction
plt.xlim(0., 150.)
plt.ylim(146.,0.)
plt.ylabel('Time (ns)')
plt.xlabel('Offset (m)')
plt.show()
def PipeWidgetFcn(epsr, h, xc, r, imgcmp):
# imgcmp = Image.open(dataImage)
imgcmp = imgcmp.resize((600, 800))
fig = plt.figure(figsize = (9,11))
ax = plt.subplot(111)
plt.imshow(imgcmp, extent = [0, 400, 250, 0])
x = np.arange(41)*1.
xconvert = x*10.
v = 1./np.sqrt(mu_0*epsilon_0*epsr)
nano = 1e9
time = (np.sqrt(((x-xc)**2+4*h**2)) - r)/v
plt.plot(xconvert, time*nano, 'r--',lw = 2)
plt.xticks(np.arange(11)*40, np.arange(11)*4.0 )
plt.xlim(0., 400)
plt.ylim(240., 0.)
plt.ylabel('Time (ns)')
plt.xlabel('Survey line location (m)')
plt.show()
def WallWidgetFcn(epsr, h, x1, x2, imgcmp):
# imgcmp = Image.open(dataImage)
imgcmp = imgcmp.resize((600, 800))
fig = plt.figure(figsize = (9,11))
ax = plt.subplot(111)
plt.imshow(imgcmp, extent = [0, 400, 250, 0])
x = np.arange(41)*1.
ind1 = x <= x1
ind2 = x >= x2
ind3 = np.logical_not(np.logical_or(ind1, ind2))
scale = 10.
xconvert = x*scale
v = 1./np.sqrt(mu_0*epsilon_0*epsr)
nano = 1e9
def arrival(x, xc, h, v):
return (np.sqrt(((x-xc)**2+4*h**2)))/v
plt.plot(xconvert[ind1], arrival(x[ind1], x1, h, v)*nano, 'b--',lw = 2)
plt.plot(xconvert[ind2], arrival(x[ind2], x2, h, v)*nano, 'b--',lw = 2)
plt.plot(np.r_[x1*scale, x2*scale], np.r_[2.*h/v, 2.*h/v]*nano, 'b--',lw = 2)
# plt.plot(xconvert[ind3], arrival(x[ind3], xc?, h, v)*nano, 'r--',lw = 2)
plt.xticks(np.arange(11)*40, np.arange(11)*4.0 )
plt.xlim(0., 400)
plt.ylim(240., 0.)
plt.ylabel('Time (ns)')
plt.xlabel('Survey line location (m)')
plt.show()
| 24.636816
| 94
| 0.544628
| 695
| 4,952
| 3.851799
| 0.238849
| 0.029884
| 0.015689
| 0.016436
| 0.331715
| 0.294733
| 0.294733
| 0.277176
| 0.261113
| 0.228614
| 0
| 0.078196
| 0.243336
| 4,952
| 200
| 95
| 24.76
| 0.636242
| 0.047254
| 0
| 0.310606
| 0
| 0
| 0.035458
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0
| 0.068182
| 0.007576
| 0.189394
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19ebb2a3f5203d8e575a8e0bab417177a0a48924
| 5,010
|
py
|
Python
|
third_party/unidecode/x0bd.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 39
|
2015-06-10T23:18:07.000Z
|
2021-10-21T04:29:06.000Z
|
third_party/unidecode/x0bd.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 2
|
2016-08-22T12:38:10.000Z
|
2017-01-26T18:37:33.000Z
|
third_party/unidecode/x0bd.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 26
|
2015-06-10T22:09:15.000Z
|
2021-06-27T15:45:15.000Z
|
data = (
'bols', # 0x00
'bolt', # 0x01
'bolp', # 0x02
'bolh', # 0x03
'bom', # 0x04
'bob', # 0x05
'bobs', # 0x06
'bos', # 0x07
'boss', # 0x08
'bong', # 0x09
'boj', # 0x0a
'boc', # 0x0b
'bok', # 0x0c
'bot', # 0x0d
'bop', # 0x0e
'boh', # 0x0f
'bwa', # 0x10
'bwag', # 0x11
'bwagg', # 0x12
'bwags', # 0x13
'bwan', # 0x14
'bwanj', # 0x15
'bwanh', # 0x16
'bwad', # 0x17
'bwal', # 0x18
'bwalg', # 0x19
'bwalm', # 0x1a
'bwalb', # 0x1b
'bwals', # 0x1c
'bwalt', # 0x1d
'bwalp', # 0x1e
'bwalh', # 0x1f
'bwam', # 0x20
'bwab', # 0x21
'bwabs', # 0x22
'bwas', # 0x23
'bwass', # 0x24
'bwang', # 0x25
'bwaj', # 0x26
'bwac', # 0x27
'bwak', # 0x28
'bwat', # 0x29
'bwap', # 0x2a
'bwah', # 0x2b
'bwae', # 0x2c
'bwaeg', # 0x2d
'bwaegg', # 0x2e
'bwaegs', # 0x2f
'bwaen', # 0x30
'bwaenj', # 0x31
'bwaenh', # 0x32
'bwaed', # 0x33
'bwael', # 0x34
'bwaelg', # 0x35
'bwaelm', # 0x36
'bwaelb', # 0x37
'bwaels', # 0x38
'bwaelt', # 0x39
'bwaelp', # 0x3a
'bwaelh', # 0x3b
'bwaem', # 0x3c
'bwaeb', # 0x3d
'bwaebs', # 0x3e
'bwaes', # 0x3f
'bwaess', # 0x40
'bwaeng', # 0x41
'bwaej', # 0x42
'bwaec', # 0x43
'bwaek', # 0x44
'bwaet', # 0x45
'bwaep', # 0x46
'bwaeh', # 0x47
'boe', # 0x48
'boeg', # 0x49
'boegg', # 0x4a
'boegs', # 0x4b
'boen', # 0x4c
'boenj', # 0x4d
'boenh', # 0x4e
'boed', # 0x4f
'boel', # 0x50
'boelg', # 0x51
'boelm', # 0x52
'boelb', # 0x53
'boels', # 0x54
'boelt', # 0x55
'boelp', # 0x56
'boelh', # 0x57
'boem', # 0x58
'boeb', # 0x59
'boebs', # 0x5a
'boes', # 0x5b
'boess', # 0x5c
'boeng', # 0x5d
'boej', # 0x5e
'boec', # 0x5f
'boek', # 0x60
'boet', # 0x61
'boep', # 0x62
'boeh', # 0x63
'byo', # 0x64
'byog', # 0x65
'byogg', # 0x66
'byogs', # 0x67
'byon', # 0x68
'byonj', # 0x69
'byonh', # 0x6a
'byod', # 0x6b
'byol', # 0x6c
'byolg', # 0x6d
'byolm', # 0x6e
'byolb', # 0x6f
'byols', # 0x70
'byolt', # 0x71
'byolp', # 0x72
'byolh', # 0x73
'byom', # 0x74
'byob', # 0x75
'byobs', # 0x76
'byos', # 0x77
'byoss', # 0x78
'byong', # 0x79
'byoj', # 0x7a
'byoc', # 0x7b
'byok', # 0x7c
'byot', # 0x7d
'byop', # 0x7e
'byoh', # 0x7f
'bu', # 0x80
'bug', # 0x81
'bugg', # 0x82
'bugs', # 0x83
'bun', # 0x84
'bunj', # 0x85
'bunh', # 0x86
'bud', # 0x87
'bul', # 0x88
'bulg', # 0x89
'bulm', # 0x8a
'bulb', # 0x8b
'buls', # 0x8c
'bult', # 0x8d
'bulp', # 0x8e
'bulh', # 0x8f
'bum', # 0x90
'bub', # 0x91
'bubs', # 0x92
'bus', # 0x93
'buss', # 0x94
'bung', # 0x95
'buj', # 0x96
'buc', # 0x97
'buk', # 0x98
'but', # 0x99
'bup', # 0x9a
'buh', # 0x9b
'bweo', # 0x9c
'bweog', # 0x9d
'bweogg', # 0x9e
'bweogs', # 0x9f
'bweon', # 0xa0
'bweonj', # 0xa1
'bweonh', # 0xa2
'bweod', # 0xa3
'bweol', # 0xa4
'bweolg', # 0xa5
'bweolm', # 0xa6
'bweolb', # 0xa7
'bweols', # 0xa8
'bweolt', # 0xa9
'bweolp', # 0xaa
'bweolh', # 0xab
'bweom', # 0xac
'bweob', # 0xad
'bweobs', # 0xae
'bweos', # 0xaf
'bweoss', # 0xb0
'bweong', # 0xb1
'bweoj', # 0xb2
'bweoc', # 0xb3
'bweok', # 0xb4
'bweot', # 0xb5
'bweop', # 0xb6
'bweoh', # 0xb7
'bwe', # 0xb8
'bweg', # 0xb9
'bwegg', # 0xba
'bwegs', # 0xbb
'bwen', # 0xbc
'bwenj', # 0xbd
'bwenh', # 0xbe
'bwed', # 0xbf
'bwel', # 0xc0
'bwelg', # 0xc1
'bwelm', # 0xc2
'bwelb', # 0xc3
'bwels', # 0xc4
'bwelt', # 0xc5
'bwelp', # 0xc6
'bwelh', # 0xc7
'bwem', # 0xc8
'bweb', # 0xc9
'bwebs', # 0xca
'bwes', # 0xcb
'bwess', # 0xcc
'bweng', # 0xcd
'bwej', # 0xce
'bwec', # 0xcf
'bwek', # 0xd0
'bwet', # 0xd1
'bwep', # 0xd2
'bweh', # 0xd3
'bwi', # 0xd4
'bwig', # 0xd5
'bwigg', # 0xd6
'bwigs', # 0xd7
'bwin', # 0xd8
'bwinj', # 0xd9
'bwinh', # 0xda
'bwid', # 0xdb
'bwil', # 0xdc
'bwilg', # 0xdd
'bwilm', # 0xde
'bwilb', # 0xdf
'bwils', # 0xe0
'bwilt', # 0xe1
'bwilp', # 0xe2
'bwilh', # 0xe3
'bwim', # 0xe4
'bwib', # 0xe5
'bwibs', # 0xe6
'bwis', # 0xe7
'bwiss', # 0xe8
'bwing', # 0xe9
'bwij', # 0xea
'bwic', # 0xeb
'bwik', # 0xec
'bwit', # 0xed
'bwip', # 0xee
'bwih', # 0xef
'byu', # 0xf0
'byug', # 0xf1
'byugg', # 0xf2
'byugs', # 0xf3
'byun', # 0xf4
'byunj', # 0xf5
'byunh', # 0xf6
'byud', # 0xf7
'byul', # 0xf8
'byulg', # 0xf9
'byulm', # 0xfa
'byulb', # 0xfb
'byuls', # 0xfc
'byult', # 0xfd
'byulp', # 0xfe
'byulh', # 0xff
)
| 19.343629
| 20
| 0.436128
| 513
| 5,010
| 4.259259
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.179328
| 0.358882
| 5,010
| 258
| 21
| 19.418605
| 0.500934
| 0.255289
| 0
| 0
| 0
| 0
| 0.359764
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19ed8ee16410261911df594fb0af9ff20f20ca7e
| 6,556
|
py
|
Python
|
pystitchy/grid.py
|
iht/Stitchy-Studio
|
f7faf846d7ce498ef5945caaff2b09f9108e2919
|
[
"MIT"
] | 1
|
2021-02-28T17:27:16.000Z
|
2021-02-28T17:27:16.000Z
|
pystitchy/grid.py
|
iht/Stitchy-Studio
|
f7faf846d7ce498ef5945caaff2b09f9108e2919
|
[
"MIT"
] | null | null | null |
pystitchy/grid.py
|
iht/Stitchy-Studio
|
f7faf846d7ce498ef5945caaff2b09f9108e2919
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2012 Israel Herraiz <isra@herraiz.org>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import wx
import numpy
from numpy import zeros
class Grid:
def __init__ (self):
self._xcells = 120
self._ycells = 80
self._xsize = 1200
self._ysize = 800
self._xoffset = self._xsize / self._xcells * 5
self._yoffset = self._xoffset
self._zoom_factor = 100
self._init_matrix ()
def _init_matrix (self):
self._cells = zeros ((self._xcells, self._ycells), dtype=numpy.bool)
self._colors = {}
for x in range (self._xcells):
for y in range (self._ycells):
self._colors[(x,y)] = []
def decrease_zoom (self):
self._xsize = self._xsize - self._zoom_factor
self._ysize = self._ysize - self._zoom_factor
self._xoffset = self._xsize / self._xcells * 5
self._yoffset = self._xoffset
def increase_zoom (self):
self._xsize = self._xsize + self._zoom_factor
self._ysize = self._ysize + self._zoom_factor
self._xoffset = self._xsize / self._xcells * 5
self._yoffset = self._xoffset
def get_size (self):
return (self._xsize + self._xoffset, self._ysize + self._yoffset)
def draw_grid(self, dc):
step = self._xsize / self._xcells
boldstep = step * 10
# Vertical lines
dc.SetPen (wx.Pen(wx.LIGHT_GREY, 1))
for x in range(self._xcells+1):
xsize = x*step
ysize = step * self._ycells
dc.DrawLine(self._xoffset + xsize, self._yoffset, xsize + self._xoffset, ysize + self._yoffset)
# Draw bold lines
dc.SetPen (wx.Pen(wx.BLACK,1))
for x in range((self._xcells)/10+1):
xsize = x*boldstep
ysize = step * self._ycells
dc.DrawLine(xsize + self._xoffset, self._yoffset, xsize + self._xoffset, ysize + self._yoffset)
# Horizontal lines
dc.SetPen (wx.Pen(wx.LIGHT_GREY, 1))
for y in range(self._ycells+1):
ysize = y*step
xsize = self._xcells*step
dc.DrawLine(self._xoffset, ysize + self._yoffset, xsize + self._xoffset, ysize + self._yoffset)
# Draw bold lines
dc.SetPen (wx.Pen(wx.BLACK,1))
for y in range((self._ycells)/10+1):
ysize = y*boldstep
xsize = self._xcells*step
dc.DrawLine(self._xoffset, ysize + self._yoffset, xsize + self._xoffset, ysize + self._yoffset)
for x in range(self._xcells):
for y in range(self._ycells):
if self._cells[x][y]:
self._paint_cell (x, y, dc, self._colors[(x,y)][-1])
def add_cell (self, xcell, ycell, dc, color, erase):
if not erase:
if xcell >= 0 and ycell >= 0 and xcell < self._xcells and ycell < self._ycells:
self._cells[xcell][ycell] = True
if not len(self._colors[(xcell,ycell)]):
self._colors[(xcell,ycell)].append(color)
elif self._colors[(xcell,ycell)][-1] != color:
self._colors[(xcell,ycell)].append(color)
self._paint_cell (xcell, ycell, dc, color)
else:
if xcell >= 0 and ycell >= 0 and xcell < self._xcells and ycell < self._ycells:
self._cells[xcell][ycell] = False
if not len(self._colors[(xcell,ycell)]):
self._colors[(xcell,ycell)].append(None)
elif self._colors[(xcell,ycell)][-1]:
self._colors[(xcell,ycell)].append(None)
self._paint_cell (xcell, ycell, dc, None, erase)
return len(self._colors[(xcell,ycell)])-1
def get_color_by_mouse (self, x, y):
step = self._xsize / self._xcells
xcell = int((x - self._xoffset)/step)
ycell = int((y - self._yoffset)/step)
try:
c = self._colors[(xcell, ycell)][-1]
if c:
# Return a copy of the color, otherwise two consecutive colors in the same
# cell would have the same colour, due to Python's pass by reference
r, g, b = c.Get()
return wx.Colour(r, g, b)
else:
return c
except KeyError:
return None
except IndexError:
return None
def get_color_by_index (self, xcell, ycell, i):
return self._colors[(xcell,ycell)][i]
def mouse2cell (self, mousex, mousey):
step = self._xsize / self._xcells
xcell = int((mousex - self._xoffset)/step)
ycell = int((mousey - self._yoffset)/step)
return (xcell, ycell)
def cell2mouse (self, xcell, ycell):
step = self._xsize / self._xcells
mousex = int(xcell*step + self._xoffset)
mousey = int(ycell*step + self._yoffset)
return (mousex, mousey)
def _paint_cell (self, xcell, ycell, dc, color, erase = False):
step = self._xsize / self._xcells
px = xcell * step + self._xoffset
py = ycell * step + self._yoffset
if not erase:
dc.SetPen (wx.Pen(color))
dc.SetBrush (wx.Brush (color))
else:
dc.SetPen (wx.WHITE_PEN)
dc.SetBrush (wx.WHITE_BRUSH)
dc.DrawRectangle(px + 1,py + 1,step - 1,step - 1)
| 33.968912
| 107
| 0.585265
| 838
| 6,556
| 4.409308
| 0.236277
| 0.05115
| 0.045737
| 0.05954
| 0.440866
| 0.397564
| 0.335859
| 0.283356
| 0.271719
| 0.271719
| 0
| 0.011341
| 0.314063
| 6,556
| 192
| 108
| 34.145833
| 0.810318
| 0.194783
| 0
| 0.350877
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.026316
| 0.017544
| 0.219298
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19effa59bdd92c4854c56be758df2693cacdcb3d
| 1,158
|
py
|
Python
|
scraper/engine.py
|
pesya/scraper
|
c088dc3dc613fec94e297ac71302d2305b44b14c
|
[
"BSD-3-Clause"
] | null | null | null |
scraper/engine.py
|
pesya/scraper
|
c088dc3dc613fec94e297ac71302d2305b44b14c
|
[
"BSD-3-Clause"
] | null | null | null |
scraper/engine.py
|
pesya/scraper
|
c088dc3dc613fec94e297ac71302d2305b44b14c
|
[
"BSD-3-Clause"
] | null | null | null |
import sys
import csv
import requests
from parsel import Selector
from scraper.parser import get_features_from_item
start_url = 'http://www.world-art.ru/animation/rating_top.php'
SIGN_STDOUT = '-'
FORMAT_CSV = 'csv'
FORMAT_JL = 'jl'
def parse(url: str, out_path: str, out_format: str):
"""
gets link and returns the response
"""
response = requests.get(url)
assert response.status_code == 200, f'bad status code: {response.status_code}'
response_html = Selector(response.text)
links_to_films = response_html.xpath('//td[@class="review"]/a[@class="review"]/@href').getall()
out_file = sys.stdout if out_path == SIGN_STDOUT else open(out_path, 'w', buffering=1, newline='')
for link in links_to_films:
item_response = requests.get(link)
assert response.status_code == 200, f'bad status code: {item_response.status_code}'
item = get_features_from_item(item_response)
if out_format == FORMAT_CSV:
item_writer = csv.writer(out_file, delimiter=' ', quotechar=',', quoting=csv.QUOTE_MINIMAL)
item_writer.writerow(item.values())
out_file.close()
return
| 28.243902
| 103
| 0.69171
| 162
| 1,158
| 4.716049
| 0.469136
| 0.078534
| 0.094241
| 0.049738
| 0.10733
| 0.10733
| 0.10733
| 0.10733
| 0.10733
| 0
| 0
| 0.007423
| 0.185665
| 1,158
| 40
| 104
| 28.95
| 0.802757
| 0.029361
| 0
| 0
| 0
| 0
| 0.16787
| 0.08574
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.041667
| false
| 0
| 0.208333
| 0
| 0.291667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19f6250f9d15cae4fb338cfbac1c36e435b2c1ca
| 3,188
|
py
|
Python
|
third_party/nkata/tests/transformvideo_test.py
|
google/offline-content-packager
|
5a023eeeed4973e452309b434a59ce745487fdd6
|
[
"Apache-2.0"
] | 32
|
2016-05-31T13:01:46.000Z
|
2022-03-18T11:17:36.000Z
|
third_party/nkata/tests/transformvideo_test.py
|
google/offline-content-packager
|
5a023eeeed4973e452309b434a59ce745487fdd6
|
[
"Apache-2.0"
] | null | null | null |
third_party/nkata/tests/transformvideo_test.py
|
google/offline-content-packager
|
5a023eeeed4973e452309b434a59ce745487fdd6
|
[
"Apache-2.0"
] | 29
|
2016-06-08T18:11:00.000Z
|
2021-09-28T04:14:34.000Z
|
# Copyright 2015 The Offline Content Packager Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import makedirs
from os.path import dirname
from os.path import isdir
from os.path import join
import shutil
import tempfile
import unittest
import jinja2
from scripts.transformations import VideoTransformation
import yaml
class VideoTestCase(unittest.TestCase):
def setUp(self):
self.src_dir = tempfile.mkdtemp()
self.dst_dir = tempfile.mkdtemp()
self.tracking_code = "123456"
self.video_subtitle = "test subtitle"
self.video_summary = "test summary"
self.video_name = "test_video"
self.setUpMetadata()
self.JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(self.src_dir),
extensions=["jinja2.ext.autoescape"],
autoescape=False)
def setUpTemplate(self, template, content):
template = join(self.src_dir, template)
template_dir = dirname(template)
if not isdir(template_dir):
makedirs(template_dir)
with open(template, "w") as f:
f.write(content)
def setUpMetadata(self):
self.meta_data_content = {
"title": "test video",
"description": "test description",
"sub_title": "test subtitle",
"tags": "",
"image_src": ""
}
self.metadata_file = join(self.src_dir, "video.yaml")
f = open(self.metadata_file, "w")
yaml.dump(self.meta_data_content, f)
self.meta_data = {self.video_name: self.metadata_file}
def createInstance(self):
return VideoTransformation(self.tracking_code, self.JINJA_ENVIRONMENT)
def tearDown(self):
shutil.rmtree(self.src_dir)
shutil.rmtree(self.dst_dir)
def test_generate_html(self):
html_name = "test_output.html"
video_source = "/test/file/path/video_source.avi"
video_type = "video/test"
video_info = ("video_title", "video_subtitle", "video_description")
template_content = ("{{ video_name }} / {{ video_type}} /"
" {{ video_source }} / {{ tracking_code }}")
expected_output = "%s / %s / %s / %s" % (self.video_name, video_type,
video_source, self.tracking_code)
self.setUpTemplate("templates/video.html", template_content)
transformation = self.createInstance()
video_detail = (self.video_name, video_source, video_type, video_info)
transformation.generate_html(self.dst_dir, html_name, video_detail, None)
# assert the output
with open(join(self.dst_dir, "html_files", html_name), "r") as f:
output = f.read()
self.assertEquals(output, expected_output)
if __name__ == "__main__":
unittest.main()
| 32.20202
| 78
| 0.69542
| 409
| 3,188
| 5.244499
| 0.366748
| 0.027972
| 0.02331
| 0.022378
| 0.02704
| 0.02704
| 0
| 0
| 0
| 0
| 0
| 0.007042
| 0.198243
| 3,188
| 98
| 79
| 32.530612
| 0.83216
| 0.192284
| 0
| 0
| 0
| 0
| 0.15
| 0.020703
| 0
| 0
| 0
| 0
| 0.015385
| 1
| 0.092308
| false
| 0
| 0.153846
| 0.015385
| 0.276923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19f8e4fcaecd9a3968eed26a324bf80026d1583f
| 246
|
py
|
Python
|
algorithm/python/BAEKJOON_1436.py
|
cjsrhd94/TIL
|
b91bab7d99d10c63f91af0790cb28ec3d228b68b
|
[
"MIT"
] | 1
|
2021-08-19T06:23:00.000Z
|
2021-08-19T06:23:00.000Z
|
algorithm/python/BAEKJOON_1436.py
|
cjsrhd94/TIL
|
b91bab7d99d10c63f91af0790cb28ec3d228b68b
|
[
"MIT"
] | null | null | null |
algorithm/python/BAEKJOON_1436.py
|
cjsrhd94/TIL
|
b91bab7d99d10c63f91af0790cb28ec3d228b68b
|
[
"MIT"
] | null | null | null |
n = int(input())
count = 0
number = 0
while True:
if '666' in str(number): #문자열로 변경하였을때 '666'이 포함되어있다면 count를 세준다.
count += 1
if count == n: #count가 입력값과 동일할 때 print -> n번째 값 출력
print(number)
break
number += 1
| 24.6
| 68
| 0.565041
| 39
| 246
| 3.564103
| 0.717949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059524
| 0.317073
| 246
| 10
| 69
| 24.6
| 0.767857
| 0.296748
| 0
| 0
| 0
| 0
| 0.017442
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19f91845aaff11955f6b430aa3684474c464bf80
| 3,599
|
py
|
Python
|
cacheTraceAnalysis/plot/reqRate.py
|
Thesys-lab/cacheWorkloadAnalysisOSDI20
|
cfc5bbb5c8d909571546c78c247561c9db449469
|
[
"Apache-2.0"
] | 6
|
2020-11-12T07:51:02.000Z
|
2022-03-27T20:20:01.000Z
|
cacheTraceAnalysis/plot/reqRate.py
|
Thesys-lab/InMemoryCachingWorkloadAnalysis
|
5f6f9f7e29a164478f3fc28eb64c170bbbafdec7
|
[
"Apache-2.0"
] | null | null | null |
cacheTraceAnalysis/plot/reqRate.py
|
Thesys-lab/InMemoryCachingWorkloadAnalysis
|
5f6f9f7e29a164478f3fc28eb64c170bbbafdec7
|
[
"Apache-2.0"
] | 1
|
2021-12-31T01:16:09.000Z
|
2021-12-31T01:16:09.000Z
|
""" plot request rate
"""
import os, sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../"))
from utils.common import *
def _cal_req_rate(trace_reader, window):
metadata_name = "reqRateList_w{}_{}.pickle".format(window, trace_reader.trace_path.split("/")[-1])
loaded = load_metadata(metadata_name)
if loaded is not None:
return loaded
start_ts = -1
req_cnt_list, obj_cnt_list, req_Gbps_list, obj_Gbps_list = [], [], [], []
req_cnt, obj_cnt, req_byte, obj_byte = 0, 0, 0, 0
seen_obj = set()
for req in trace_reader:
if start_ts == -1:
start_ts = req.real_time
req_cnt += req.cnt
req_byte += req.req_size
if req.obj_id not in seen_obj:
obj_cnt += 1
obj_byte += req.req_size
seen_obj.add(req.obj_id)
if (req.real_time - start_ts)//window > len(req_cnt_list):
req_cnt_list.append(req_cnt/window)
obj_cnt_list.append(obj_cnt/window)
req_Gbps_list.append(req_byte/GB/window*8)
obj_Gbps_list.append(obj_byte/GB/window*8)
req_cnt, obj_cnt, req_byte, obj_byte = 0, 0, 0, 0
seen_obj.clear()
trace_reader.reset()
save_metadata((req_cnt_list, obj_cnt_list, req_Gbps_list, obj_Gbps_list), metadata_name)
return req_cnt_list, obj_cnt_list, req_Gbps_list, obj_Gbps_list
def plot_req_rate(trace_reader, window, plot_type=("cnt", "byte")):
COLOR = JPlot.get_color(2)
req_cnt_list, obj_cnt_list, req_Gbps_list, obj_Gbps_list = _cal_req_rate(trace_reader, window)
ret_dict = {
"mean_req_cnt": sum(req_cnt_list)/len(req_cnt_list),
"mean_obj_cnt": sum(obj_cnt_list)/len(obj_cnt_list),
"mean_req_Gbps": sum(req_Gbps_list)/len(req_Gbps_list),
"mean_obj_Gbps": sum(obj_Gbps_list)/len(obj_Gbps_list),
}
if "cnt" in plot_type or plot_type == "cnt":
plt.plot([i*window/3600 for i in range(len(req_cnt_list))], [i/1000 for i in req_cnt_list], nomarker=True, label="request", color=next(COLOR), linewidth=1)
plt.plot([i*window/3600 for i in range(len(obj_cnt_list))], [i/1000 for i in obj_cnt_list], nomarker=True, label="object", color=next(COLOR), linewidth=1)
plt.xlabel("Time (Hour)")
plt.ylabel("Request rate (K QPS)")
plt.legend()
plt.savefig("{}/{}_reqRateCnt_w{}.png".format(FIG_DIR, trace_reader.trace_path.split("/")[-1], window), no_save_plot_data=True)
plt.clf()
COLOR = JPlot.get_color(2)
if "byte" in plot_type or plot_type == "byte":
y1, y2, ylabel = req_Gbps_list, obj_Gbps_list, "Request rate (Gbps)"
if sum(req_Gbps_list)/len(req_Gbps_list) < 1:
y1 = [i*1024 for i in req_Gbps_list]
y2 = [i*1024 for i in obj_Gbps_list]
ylabel = "Request rate (Mbps)"
plt.plot([i*window/3600 for i in range(len(req_Gbps_list))], y1, nomarker=True, color=next(COLOR), label="request", linewidth=1)
plt.plot([i*window/3600 for i in range(len(obj_Gbps_list))], y2, nomarker=True, color=next(COLOR), label="object", linewidth=1)
plt.xlabel("Time (Hour)")
plt.ylabel(ylabel)
plt.legend()
plt.savefig("{}/{}_reqRateTraffic_w{}.png".format(FIG_DIR, trace_reader.trace_path.split("/")[-1], window), no_save_plot_data=True)
plt.clf()
return ret_dict
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("--trace", type=str, help="trace path")
ap.add_argument("--type", type=str, default="cnt", help="plot type")
ap.add_argument("--window", type=int, default=60, help="the size of window in sec")
p = ap.parse_args()
plot_req_rate(TwrShortBinTraceReader(p.trace), p.window, plot_type=(p.type, ))
| 38.698925
| 159
| 0.689358
| 600
| 3,599
| 3.825
| 0.193333
| 0.076688
| 0.057516
| 0.030501
| 0.467974
| 0.410458
| 0.309368
| 0.293682
| 0.237909
| 0.237909
| 0
| 0.02082
| 0.159211
| 3,599
| 92
| 160
| 39.119565
| 0.737607
| 0.004724
| 0
| 0.140845
| 0
| 0
| 0.094065
| 0.021557
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028169
| false
| 0
| 0.042254
| 0
| 0.112676
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19fac7af0c83f21b636a9b1fa9c53ac1705d1cfb
| 5,097
|
py
|
Python
|
utils.py
|
sjenni/DeepBilevel
|
9db6c9d81188e891104677a7ffc4b045421fb097
|
[
"MIT"
] | 8
|
2019-10-23T12:16:13.000Z
|
2020-11-16T02:20:28.000Z
|
utils.py
|
sjenni/DeepBilevel
|
9db6c9d81188e891104677a7ffc4b045421fb097
|
[
"MIT"
] | null | null | null |
utils.py
|
sjenni/DeepBilevel
|
9db6c9d81188e891104677a7ffc4b045421fb097
|
[
"MIT"
] | 4
|
2020-02-06T14:54:47.000Z
|
2020-10-25T03:03:04.000Z
|
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def montage_tf(imgs, num_h, num_w):
"""Makes a montage of imgs that can be used in image_summaries.
Args:
imgs: Tensor of images
num_h: Number of images per column
num_w: Number of images per row
Returns:
A montage of num_h*num_w images
"""
imgs = tf.unstack(imgs)
img_rows = [None] * num_h
for r in range(num_h):
img_rows[r] = tf.concat(axis=1, values=imgs[r * num_w:(r + 1) * num_w])
montage = tf.concat(axis=0, values=img_rows)
return tf.expand_dims(montage, 0)
def remove_missing(var_list, model_path):
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
if isinstance(var_list, dict):
var_dict = var_list
else:
var_dict = {var.op.name: var for var in var_list}
available_vars = {}
for var in var_dict:
if reader.has_tensor(var):
available_vars[var] = var_dict[var]
else:
logging.warning(
'Variable %s missing in checkpoint %s', var, model_path)
var_list = available_vars
return var_list
def assign_from_checkpoint_fn(model_path, var_list, ignore_missing_vars=False,
reshape_variables=False):
"""Returns a function that assigns specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of `Variable` objects or a dictionary mapping names in the
checkpoint to the correspoing variables to initialize. If empty or None,
it would return no_op(), None.
ignore_missing_vars: Boolean, if True it would ignore variables missing in
the checkpoint with a warning instead of failing.
reshape_variables: Boolean, if True it would automatically reshape variables
which are of different shape then the ones stored in the checkpoint but
which have the same number of elements.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation.
Raises:
ValueError: If the checkpoint specified at `model_path` is missing one of
the variables in `var_list`.
"""
if ignore_missing_vars:
var_list = remove_missing(var_list, model_path)
saver = tf_saver.Saver(var_list, reshape=reshape_variables)
def callback(session):
saver.restore(session, model_path)
return callback
def get_variables_to_train(trainable_scopes=None):
"""Returns a list of variables to train.
Returns:
A list of variables to train by the optimizer.
"""
if trainable_scopes is None:
variables_to_train = tf.trainable_variables()
else:
scopes = [scope.strip() for scope in trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
print('Variables to train: {}'.format([v.op.name for v in variables_to_train]))
return variables_to_train
def get_checkpoint_path(checkpoint_dir):
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if not ckpt:
print("No checkpoint in {}".format(checkpoint_dir))
return None
return ckpt.model_checkpoint_path
| 36.148936
| 84
| 0.673141
| 706
| 5,097
| 4.685552
| 0.290368
| 0.025393
| 0.043531
| 0.00786
| 0.05925
| 0.035671
| 0.018138
| 0
| 0
| 0
| 0
| 0.004223
| 0.256622
| 5,097
| 140
| 85
| 36.407143
| 0.868831
| 0.429468
| 0
| 0.046875
| 0
| 0
| 0.028592
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109375
| false
| 0
| 0.0625
| 0
| 0.28125
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19fd46480858b4a1d5b5836cc3a46a14d32272f9
| 828
|
py
|
Python
|
tests/backup_bsps.py
|
LaudateCorpus1/bsp_tool
|
e8c2489ac3bda5a4467f1dce220a76bbf4ce5b19
|
[
"MIT"
] | null | null | null |
tests/backup_bsps.py
|
LaudateCorpus1/bsp_tool
|
e8c2489ac3bda5a4467f1dce220a76bbf4ce5b19
|
[
"MIT"
] | null | null | null |
tests/backup_bsps.py
|
LaudateCorpus1/bsp_tool
|
e8c2489ac3bda5a4467f1dce220a76bbf4ce5b19
|
[
"MIT"
] | null | null | null |
import os
import shutil
import sys
from maplist import installed_games
backup_dir = "F:/bsps"
if len(sys.argv) == 2:
backup_dir = sys.argv[1]
print(f"Making backups in '{backup_dir}'")
i = 0
for base_dir, game_dir in installed_games:
i += 1
print(f"Backing up ({i}/{len(installed_games)}) {game_dir}...")
for map_dir in installed_games[(base_dir, game_dir)]:
src_dir = os.path.join(base_dir, game_dir, map_dir)
dest_dir = os.path.join(backup_dir, game_dir, map_dir)
os.makedirs(dest_dir, exist_ok=True)
try: # note the missed file(s) and continue
shutil.copytree(src_dir, dest_dir, dirs_exist_ok=True)
except shutil.Error as err:
print(f"*** ERROR *** {err}")
except FileNotFoundError as err:
print(f"*** ERROR *** {err}")
| 30.666667
| 67
| 0.642512
| 128
| 828
| 3.945313
| 0.40625
| 0.069307
| 0.079208
| 0.083168
| 0.138614
| 0.075248
| 0
| 0
| 0
| 0
| 0
| 0.00625
| 0.227053
| 828
| 26
| 68
| 31.846154
| 0.782813
| 0.043478
| 0
| 0.090909
| 0
| 0
| 0.164557
| 0.035443
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
19ff517f6d368213182e5f5031c40842eae17a49
| 1,391
|
py
|
Python
|
examples/server.py
|
fhamborg/Giveme5W
|
b5f49712654ab466e605716b4cd9f8dce9bcdd88
|
[
"Apache-2.0"
] | 16
|
2018-03-28T11:20:11.000Z
|
2020-09-17T19:39:25.000Z
|
examples/server.py
|
fhamborg/Giveme5W
|
b5f49712654ab466e605716b4cd9f8dce9bcdd88
|
[
"Apache-2.0"
] | 3
|
2018-03-15T10:17:29.000Z
|
2018-05-16T13:14:28.000Z
|
examples/server.py
|
fhamborg/Giveme5W
|
b5f49712654ab466e605716b4cd9f8dce9bcdd88
|
[
"Apache-2.0"
] | 6
|
2018-05-08T12:53:51.000Z
|
2021-09-25T03:21:02.000Z
|
import logging
from flask import Flask, request, jsonify
from extractor.document import Document
from extractor.five_w_extractor import FiveWExtractor
app = Flask(__name__)
log = logging.getLogger(__name__)
host = None
port = 5000
debug = False
options = None
extractor = FiveWExtractor()
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
log.addHandler(ch)
log.setLevel(logging.DEBUG)
def run():
log.info("starting server on port %i", port)
app.run(host, port, debug)
log.info("server has stopped")
@app.route('/extract', methods=['GET', 'POST'])
def extract():
json_article = request.get_json()
if not json_article:
log.warning("received no article")
return jsonify({"error": "no article defined"})
article = {}
if json_article.get('title'):
article['title'] = json_article.get('title')
article['description'] = json_article.get('description')
article['text'] = json_article.get('text')
else:
article['title'] = json_article['articletext']
article['description'] = None
article['text'] = None
log.debug("retrieved raw article for extraction: %s", json_article['title'])
document = Document(article['title'], article['description'], article['text'])
extractor.parse(document)
return jsonify(document.questions)
if __name__ == "__main__":
run()
| 26.245283
| 82
| 0.675054
| 165
| 1,391
| 5.527273
| 0.381818
| 0.096491
| 0.061404
| 0.041667
| 0.057018
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003537
| 0.186916
| 1,391
| 52
| 83
| 26.75
| 0.802829
| 0
| 0
| 0
| 0
| 0
| 0.179727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c202c2c6ef86a127b7a659f1ab70e457fb054b54
| 4,799
|
py
|
Python
|
dserve/__init__.py
|
JIC-CSB/dserve
|
5f20d9de8ffb52f98ef9c68b327fe1ca9fcee17e
|
[
"MIT"
] | null | null | null |
dserve/__init__.py
|
JIC-CSB/dserve
|
5f20d9de8ffb52f98ef9c68b327fe1ca9fcee17e
|
[
"MIT"
] | null | null | null |
dserve/__init__.py
|
JIC-CSB/dserve
|
5f20d9de8ffb52f98ef9c68b327fe1ca9fcee17e
|
[
"MIT"
] | null | null | null |
"""Script for running the dserve server."""
import os
from flask import (
Flask,
jsonify,
send_file,
abort,
request,
)
from flask_cors import CORS, cross_origin
app = Flask(__name__)
cors = CORS(app)
@app.route("/")
@cross_origin()
def root():
content = {
"_links": {
"self": {"href": "/"},
"items": {"href": "/items"},
"overlays": {"href": "/overlays"}
},
"uuid": app._dataset._admin_metadata["uuid"],
"dtool_version": app._dataset._admin_metadata["dtool_version"],
"name": app._dataset._admin_metadata["name"],
"creator_username": app._dataset._admin_metadata["creator_username"],
}
return jsonify(content)
def items_root():
items = []
for i in app._dataset.manifest["file_list"]:
item = {
"_links": {"self": {"href": "/items/{}".format(i["hash"])}},
"identifier": i["hash"],
}
items.append(item)
content = {
"_links": {
"self": {"href": "/items"},
},
"_embedded": {
"items": items,
}
}
return jsonify(content)
def specific_item(identifier):
try:
app._dataset.item_from_identifier(identifier)
except KeyError:
abort(404)
content = {
"_links": {
"self": {"href": "/items/{}".format(identifier)},
"content": {"href": "/items/{}/raw".format(identifier)},
"overlays": {"href": "/items/{}/overlays".format(identifier)},
},
}
overlays = app._dataset.access_overlays()
for overlay_name, overlay in overlays.items():
content[overlay_name] = overlay[identifier]
return jsonify(content)
@app.route("/items")
@app.route("/items/<identifier>")
@cross_origin()
def items(identifier=None):
if identifier is None:
return items_root()
else:
return specific_item(identifier)
@app.route("/items/<identifier>/raw")
@cross_origin()
def raw_item(identifier):
try:
item = app._dataset.item_from_identifier(identifier)
except KeyError:
abort(404)
item_path = os.path.join(
app._dataset._abs_path,
app._dataset.data_directory,
item["path"]
)
return send_file(item_path, item["mimetype"])
@app.route("/items/<identifier>/overlays")
@cross_origin()
def item_overlays(identifier):
try:
app._dataset.item_from_identifier(identifier)
except KeyError:
abort(404)
content = {
"_links": {
"self": {"href": "/items/{}/overlays".format(identifier)},
},
}
overlays = app._dataset.access_overlays()
for overlay_name in overlays.keys():
href = "/overlays/{}/{}".format(overlay_name, identifier)
content["_links"][overlay_name] = {"href": href}
return jsonify(content)
@app.route("/overlays/<overlay>/<identifier>", methods=["GET", "PUT"])
@cross_origin()
def item_overlay_content(overlay, identifier):
overlays = app._dataset.access_overlays()
try:
requested_overlay = overlays[overlay]
requested_overlay[identifier]
except KeyError:
abort(404)
if request.method == "PUT":
if not request.is_json:
abort(422)
new_value = request.get_json()
requested_overlay[identifier] = new_value
try:
app._dataset.persist_overlay(
overlay, requested_overlay, overwrite=True)
except KeyError:
abort(405)
return "", 201
elif request.method == "GET":
value = requested_overlay[identifier]
return jsonify(value)
def overlay_root():
overlays = app._dataset.access_overlays()
content = {
"_links": {
"self": {"href": "/overlays"}},
}
for overlay_name in overlays.keys():
value = {"href": "/overlays/{}".format(overlay_name)}
content["_links"][overlay_name] = value
return jsonify(content)
def specific_overlay(overlay_name):
overlays = app._dataset.access_overlays()
try:
overlay = overlays[overlay_name]
except KeyError:
abort(404)
return jsonify(overlay)
def creaate_new_overlay(overlay_name):
empty_overlay = app._dataset.empty_overlay()
try:
app._dataset.persist_overlay(overlay_name, empty_overlay)
except IOError:
abort(409)
return "", 201
@app.route("/overlays")
@app.route("/overlays/<overlay_name>", methods=["GET", "PUT"])
@cross_origin()
def overalys(overlay_name=None):
if overlay_name is None:
return overlay_root()
else:
if request.method == "PUT":
return creaate_new_overlay(overlay_name)
elif request.method == "GET":
return specific_overlay(overlay_name)
| 25.526596
| 77
| 0.600542
| 503
| 4,799
| 5.497018
| 0.17495
| 0.065099
| 0.03038
| 0.036166
| 0.383725
| 0.237613
| 0.164557
| 0.146474
| 0.146474
| 0.146474
| 0
| 0.00838
| 0.254011
| 4,799
| 187
| 78
| 25.663102
| 0.763966
| 0.00771
| 0
| 0.36129
| 0
| 0
| 0.119008
| 0.022498
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070968
| false
| 0
| 0.019355
| 0
| 0.187097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c203136ec3038930bc5926aaf959f30e095e46a5
| 1,610
|
py
|
Python
|
kkutil/security.py
|
kaka19ace/kkutils
|
1ac449488d85ba2c6b18c5dc9cf77a0bc36579b1
|
[
"MIT"
] | 1
|
2015-12-13T18:42:52.000Z
|
2015-12-13T18:42:52.000Z
|
kkutil/security.py
|
kaka19ace/kkutil
|
1ac449488d85ba2c6b18c5dc9cf77a0bc36579b1
|
[
"MIT"
] | null | null | null |
kkutil/security.py
|
kaka19ace/kkutil
|
1ac449488d85ba2c6b18c5dc9cf77a0bc36579b1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
"""
util regex tool
refs:
http://www.symantec.com/connect/articles/detection-sql-injection-and-cross-site-scripting-attacks
"""
import re
INJECTION_REGEX = re.compile(
r"(%27)|(\')|(\-\-)|(%23)|(#)|" # Regex for detection of SQL meta-characters
r"\w*((%27)|(\'))\s+((%6F)|o|(%4F))((%72)|r|(%52))\s*|" # Modified regex for detection of SQL meta-characters eg: ' or 1 = 1' detect word 'or',
r"((%3D)|(=))[^\n]*((%27)|(\')|(\-\-)|(%3B)|(;))" # Regex for typical SQL Injection attack eg: '= 1 --'
r"((%27)|(\'))union|" # Regex for detecting SQL Injection with the UNION keyword
r"((%27)|(\'))select|" # Regex for detecting SQL Injection with the UNION keyword
r"((%27)|(\'))insert|" # Regex for detecting SQL Injection with the UNION keyword
r"((%27)|(\'))update|" # Regex for detecting SQL Injection with the UNION keyword
r"((%27)|(\'))drop", # Regex for detecting SQL Injection with the UNION keyword
re.IGNORECASE
)
CSS_ATTACK_REGREX = re.compile(r"((%3C)|<)((%2F)|/)*[a-z0-9%]+((%3E)|>)", re.IGNORECASE)
CSS_IMG_SRC_ATTACK_REGEX = re.compile(
r"((%3C)|<)((%69)|i|(%49))((%6D)|m|(%4D))((%67)|g|(%47))[^\n]+((%3E)|>)",
re.IGNORECASE
)
CSS_PARANOID_ATTACK_REGEX = re.compile("((%3C)|<)[^\n]+((%3E)|>)", re.IGNORECASE)
def is_injection_string(s):
return True if INJECTION_REGEX.match(s) else False
def is_css_attack_string(s):
if CSS_ATTACK_REGREX.match(s) or \
CSS_IMG_SRC_ATTACK_REGEX.match(s) or \
CSS_PARANOID_ATTACK_REGEX.match(s):
return True
return False
| 35
| 148
| 0.608075
| 234
| 1,610
| 4.076923
| 0.393162
| 0.067086
| 0.089099
| 0.104822
| 0.381551
| 0.339623
| 0.339623
| 0.264151
| 0.264151
| 0.213836
| 0
| 0.036136
| 0.157764
| 1,610
| 45
| 149
| 35.777778
| 0.667404
| 0.390062
| 0
| 0.076923
| 0
| 0.076923
| 0.360996
| 0.266598
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.038462
| 0.038462
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c204bfd19101390dbf534e7049d9b49aef3685e3
| 1,520
|
py
|
Python
|
update_eeprom_rc.py
|
rkojedzinszky/thermo-sensor
|
f0b5aa6dbf231b566e00a683c5bb1551569d2463
|
[
"BSD-3-Clause"
] | 2
|
2019-04-25T17:38:02.000Z
|
2020-03-03T22:50:04.000Z
|
update_eeprom_rc.py
|
rkojedzinszky/thermo-sensor
|
f0b5aa6dbf231b566e00a683c5bb1551569d2463
|
[
"BSD-3-Clause"
] | null | null | null |
update_eeprom_rc.py
|
rkojedzinszky/thermo-sensor
|
f0b5aa6dbf231b566e00a683c5bb1551569d2463
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
REGISTERS = {
'IOCFG2': 0x00,
'IOCFG1': 0x01,
'IOCFG0': 0x02,
'FIFOTHR': 0x03,
'SYNC1': 0x04,
'SYNC0': 0x05,
'PKTLEN': 0x06,
'PKTCTRL1': 0x07,
'PKTCTRL0': 0x08,
'ADDR': 0x09,
'CHANNR': 0x0A,
'FSCTRL1': 0x0B,
'FSCTRL0': 0x0C,
'FREQ2': 0x0D,
'FREQ1': 0x0E,
'FREQ0': 0x0F,
'MDMCFG4': 0x10,
'MDMCFG3': 0x11,
'MDMCFG2': 0x12,
'MDMCFG1': 0x13,
'MDMCFG0': 0x14,
'DEVIATN': 0x15,
'MCSM2': 0x16,
'MCSM1': 0x17,
'MCSM0': 0x18,
'FOCCFG': 0x19,
'BSCFG': 0x1A,
'AGCCTRL2': 0x1B,
'AGCCTRL1': 0x1C,
'AGCCTRL0': 0x1D,
'WOREVT1': 0x1E,
'WOREVT0': 0x1F,
'WORCTRL': 0x20,
'FREND1': 0x21,
'FREND0': 0x22,
'FSCAL3': 0x23,
'FSCAL2': 0x24,
'FSCAL1': 0x25,
'FSCAL0': 0x26,
'RCCTRL1': 0x27,
'RCCTRL0': 0x28,
'FSTEST': 0x29,
'PTEST': 0x2A,
'AGCTEST': 0x2B,
'TEST2': 0x2C,
'TEST1': 0x2D,
'TEST0': 0x2E,
'PATABLE': 0x3E,
}
if __name__ == '__main__':
import sys
import re
with open('eeprom', 'r+b') as fh:
fh.seek(20)
for line in sys.stdin:
if re.match('^\s*#', line):
continue
m = re.match('(?P<reg>\w+)\s+(?P<value>[0-9a-fA-F]+)', line)
if not m:
continue
m = m.groupdict()
fh.write(chr(REGISTERS[m['reg']]))
fh.write(chr(int(m['value'], 16)))
fh.write(b"\xff" * (512 - fh.tell()))
| 20.540541
| 72
| 0.484211
| 170
| 1,520
| 4.282353
| 0.829412
| 0.028846
| 0.027473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.165377
| 0.319737
| 1,520
| 73
| 73
| 20.821918
| 0.538685
| 0.013158
| 0
| 0.030769
| 0
| 0.015385
| 0.247498
| 0.02535
| 0
| 0
| 0.128085
| 0
| 0
| 1
| 0
| false
| 0
| 0.030769
| 0
| 0.030769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2065e5fc7e61fdabd4ab6fd12c1ead2ad9d477a
| 78,713
|
py
|
Python
|
htdeblur/acquisition/motion.py
|
zfphil/htdeblur
|
ac557284f9913292721a6b9f943ff9b921043978
|
[
"BSD-3-Clause"
] | 2
|
2020-01-16T18:30:55.000Z
|
2020-02-06T08:33:51.000Z
|
htdeblur/acquisition/motion.py
|
zfphil/htdeblur
|
ac557284f9913292721a6b9f943ff9b921043978
|
[
"BSD-3-Clause"
] | null | null | null |
htdeblur/acquisition/motion.py
|
zfphil/htdeblur
|
ac557284f9913292721a6b9f943ff9b921043978
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2017 Regents of the University of California
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with # the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os, sys, time, copy, collections, math, json
import numpy as np
import scipy as sp
import matplotlib
from matplotlib import pyplot as plt
import llops as yp
# Custom scale bar object
from matplotlib_scalebar.scalebar import ScaleBar
# Libwallerlab imports
from llops import display
from llops import Roi
class StopAndStareAcquisition():
# Initialization
def __init__(self, hardware_controller_list, system_metadata,
illumination_type='bf',
illumination_sequence=None,
frame_spacing_mm=1,
object_size_mm=(0.5, 0.5),
reuse_illumination_sequence=True,
max_exposure_time_s=2,
exposure_time_pad_s=0.0,
velocity_mm_s=None,
exposure_time_s=None,
debug=False,
trigger_mode='software',
motion_acceleration_mm_s_2=1e3,
flip_pathway=False,
acquisition_timeout_s=3,
illumination_na_pad=0.03,
illumination_color={'w': 127},
settle_time_s=0):
# Parse options
self.illumination_type = illumination_type
self.settle_time_s = settle_time_s
self.object_size_mm = object_size_mm
self.frame_spacing_mm = frame_spacing_mm
self.flip_pathway = flip_pathway
self.exposure_time_pad_s = exposure_time_pad_s
self.debug = debug
self.motion_acceleration_mm_s_2 = motion_acceleration_mm_s_2
self.velocity_mm_s = velocity_mm_s
self.max_exposure_time_s = max_exposure_time_s
self.illumination_na_pad = illumination_na_pad
self.illumination_color = illumination_color
self.acquisition_timeout_s = acquisition_timeout_s
# Define controller objects, which act as hardware interfaces.
# These should be in an ordered dictionary because the order which they
# are initialized matters when using a mix of hardware and software triggering.
self.hardware_controller_list = collections.OrderedDict()
# First add hardware triggered elements so they perform their set-up before we trigger software elements
for controller in hardware_controller_list:
if controller.trigger_mode is 'hardware':
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Then, add software triggered elements
for controller in hardware_controller_list:
if controller.trigger_mode is 'software':
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Check to be sure a sequence acquisition is not running
assert 'camera' in self.hardware_controller_list, 'Did not find camera controller!'
# Store metadata object
self.metadata = system_metadata
# Ensure we have all necessary metadata for basic acquisition
assert self.metadata.objective.na is not None, 'Missing objective.na in metadata.'
assert self.metadata.objective.mag is not None, 'Missing objective.mag in metadata.'
assert self.metadata.camera.pixel_size_um is not None, 'Missing pixel size in metadata.'
# Update effective pixel size (for scale bar)
self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / (self.metadata.objective.mag * self.metadata.system.mag)
# Trigger Constants
self.TRIG_MODE_EVERY_PATTERN = 1
self.TRIG_MODE_ITERATION = -1
self.TRIG_MODE_START = -2
# Frame state time sequence, will default to a sequence of one exposure time per frame if left as None
self.time_sequence_s = None
self.exposure_time_s = None
self.hardware_sequence_timing = None
# Turn off fast sequencing for illumination by default since this is only avaolable with certain LED arrays
if 'illumination' in self.hardware_controller_list:
self.hardware_controller_list['illumination'].use_fast_sequence = False
# print(type(self.))
self.metadata.type = 'stop and stare'
assert 'illumination' in self.hardware_controller_list, 'Stop and Stare acquisition requires programmable light source'
assert 'position' in self.hardware_controller_list, 'Stop and Stare acquisition requires programmable positioning device'
# Generate motion pathway
self.hardware_controller_list['position'].state_sequence = self.genStopAndStarePathwayRaster(
self.object_size_mm, self.frame_spacing_mm)
# Generate illumination sequence
illuminaiton_pattern_sequence = [self.illumination_type] * \
len(self.hardware_controller_list['position'].state_sequence)
self.hardware_controller_list['illumination'].state_sequence = self.genMultiContrastSequence(
illuminaiton_pattern_sequence)
# Tell device not to use feedback
self.hardware_controller_list['illumination'].trigger_wait_flag = False
self.hardware_controller_list['illumination'].command('trs.0.500.0')
self.hardware_controller_list['illumination'].command('trs.1.500.0')
self.hardware_controller_list['position'].goToPosition((0,0))
self.hardware_controller_list['position'].command('ENCODER X 1')
self.hardware_controller_list['position'].command('ENCODER Y 1')
self.hardware_controller_list['position'].command('ENCW X 100')
self.hardware_controller_list['position'].command('ENCW Y 100')
def acquire(self, exposure_time_ms=50):
# Allocate memory for frames
if self.hardware_controller_list['camera'].isSequenceRunning():
self.hardware_controller_list['camera'].sequenceStop()
self.hardware_controller_list['camera'].setBufferSizeMb(
20 * len(self.hardware_controller_list['position'].state_sequence))
# Set camera exposure
self.hardware_controller_list['camera'].setExposure(exposure_time_ms / 1e3)
self.hardware_controller_list['camera'].setTriggerMode('hardware')
self.hardware_controller_list['camera'].runSequence()
self.hardware_controller_list['illumination'].bf()
# Snap one image to ensure all acquisitons are started
self.hardware_controller_list['camera'].snap()
# generate frame_list
t0 = time.time()
frames_acquired = 0
frame_list = []
for frame in yp.display.progressBar(self.hardware_controller_list['position'].state_sequence, name='Frames Acquired'):
pos = frame['states']
x = pos[0][0]['value']['x']
y = pos[0][0]['value']['y']
self.hardware_controller_list['position'].goToPosition((x, y), blocking=True)
time.sleep(self.settle_time_s)
frame_list.append(self.hardware_controller_list['camera'].snap())
frames_acquired += 1
# print('Acquired %d of %d frames' % (frames_acquired, len(self.hardware_controller_list['position'].state_sequence)))
t_acq_sns = time.time() - t0
print("Acquisition took %.4f seconds" % (t_acq_sns))
# Create dataset
from htdeblur.mddataset import MotionDeblurDataset
dataset = MotionDeblurDataset()
# Assign acquisition time
self.metadata.acquisition_time_s = t_acq_sns
# Apply simple geometric transformations
if self.metadata.camera.transpose:
frame_list = frame_list.transpose(0, 2, 1)
if self.metadata.camera.flip_x:
frame_list = np.flip(frame_list, 2)
if self.metadata.camera.flip_y:
frame_list = np.flip(frame_list, 1)
# Assign
dataset.frame_list = [frame for frame in frame_list]
# Set frame state list
self.n_frames = len(self.hardware_controller_list['position'].state_sequence)
frame_state_list = []
for frame_index in range(self.n_frames):
single_frame_state_list = {}
# Loop over hardware controllers and record their state sequences
for hardware_controller_name in self.hardware_controller_list:
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hardware_controller.state_sequence is not None:
single_frame_state_list[hardware_controller_name] = hardware_controller.state_sequence[frame_index]
# Record time_sequence_s
single_frame_state_list['time_sequence_s'] = [0]
# Add to list of all frames
frame_state_list.append(single_frame_state_list)
dataset.metadata = self.metadata
dataset.type = 'stop_and_stare'
dataset.frame_state_list = frame_state_list
return dataset
def genStopAndStarePathwayRaster(self, object_size_mm, frame_spacing_mm, major_axis=1, include_minor_axis=False):
# Determine major axis
if major_axis is None:
major_axis = np.argmax(np.asarray(object_size_mm))
if object_size_mm[0] == object_size_mm[1]:
major_axis = 1
# Detemine number of measurements
measurement_count = np.ceil(np.asarray(object_size_mm) / np.asarray(frame_spacing_mm)
).astype(np.int) # two components in x and y
# Determine slightly smaller frame spacing for optimal coverage of object
frame_spacing_mm = (object_size_mm[0] / measurement_count[0], object_size_mm[1] / measurement_count[1])
# Error checking
assert np.any(measurement_count > 1), "image_size must be smaller than object_size!"
print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1]))
# This variable will be populated by the loop below
raster_segments = np.zeros((measurement_count[0] * 2, 2))
# Generate raster points
raster_end_point_list = []
pathway = []
linear_segment_index = 0 # This variable keeps track of linear segments, for use with path planning
for row in np.arange(measurement_count[0]):
if row % 2 == 0:
for index, col in enumerate(range(measurement_count[1])):
# Add pathway to list
pathway.append({'x_start': frame_spacing_mm[1] * col,
'y_start': frame_spacing_mm[0] * row,
'x_end': frame_spacing_mm[1] * col,
'y_end': frame_spacing_mm[0] * row,
'linear_segment_index': linear_segment_index})
else:
for index, col in enumerate(reversed(range(measurement_count[1]))):
# Add pathway to list
frame_spacing_mm[0] * row
pathway.append({'x_start': frame_spacing_mm[1] * col,
'y_start': frame_spacing_mm[0] * row,
'x_end': frame_spacing_mm[1] * col,
'y_end': frame_spacing_mm[0] * row,
'linear_segment_index': linear_segment_index})
linear_segment_index += 1
# make the center the mean of the pathway
path_means = []
for path in pathway:
path_mean = ((path['y_start']), (path['x_start']))
path_means.append(path_mean)
# mean = np.sum(np.asarray(path_means), axis=1) / len(path_means)
mean = np.sum(np.asarray(path_means), axis=0) / len(path_means)
for path in pathway:
path['x_start'] -= mean[1]
path['x_end'] -= mean[1]
path['y_start'] -= mean[0]
path['y_end'] -= mean[0]
# return pathway
state_sequence = []
for path in pathway:
# Store common information about this frame
common_state_dict = {}
common_state_dict['frame_time'] = self.hardware_controller_list['camera'].getExposure()
common_state_dict['led_update_rate_us'] = None
common_state_dict['linear_segment_index'] = None
common_state_dict['frame_distance'] = 0
common_state_dict['exposure_distance'] = 0
common_state_dict['velocity'] = self.velocity_mm_s
common_state_dict['acceleration'] = self.motion_acceleration_mm_s_2
common_state_dict['n_blur_positions_exposure'] = 1
common_state_dict['position_delta_x_mm'] = 0
common_state_dict['position_delta_y_mm'] = 0
path_dict = {'value': {'time_index' : 0,
'x': path['x_start'],
'y': path['y_start']}}
state_sequence.append({'states' : [[path_dict]], 'common' : common_state_dict})
return(state_sequence)
def plotPathway(self):
sequence_list = self.hardware_controller_list['position'].state_sequence
point_list_start = []
point_list_end = []
for sequence in sequence_list:
start_pos = (sequence['states'][0][0]['value']['x'], sequence['states'][0][0]['value']['y'])
end_pos = (sequence['states'][-1][0]['value']['x'], sequence['states'][-1][0]['value']['y'])
point_list_start.append(start_pos)
point_list_end.append(end_pos)
point_list_start = np.asarray(point_list_start)
point_list_end = np.asarray(point_list_end)
plt.figure()
for index in range(len(point_list_start)):
plt.scatter(point_list_start[index, 0], point_list_start[index, 1], c='b')
plt.scatter(point_list_end[index, 0], point_list_end[index, 1], c='r')
plt.plot([point_list_start[index, 0], point_list_end[index, 0]],
[point_list_start[index, 1], point_list_end[index, 1]], c='y')
plt.xlabel('Position X (mm)')
plt.ylabel('Position Y (mm)')
plt.title('Pathway (b is start, y/o is end)')
plt.gca().invert_yaxis()
def genMultiContrastSequence(self, illumination_pattern_sequence, n_acquisitions=1,
darkfield_annulus_width_na=0.1):
led_list = np.arange(self.metadata.illumination.state_list.design.shape[0])
bf_mask = self.metadata.illumination.state_list.design[:, 0] ** 2 \
+ self.metadata.illumination.state_list.design[:, 1] ** 2 < (
self.metadata.objective.na + self.illumination_na_pad) ** 2
led_list_bf = led_list[bf_mask]
led_list_df = led_list[~bf_mask]
led_list_an = led_list[~bf_mask & (self.metadata.illumination.state_list.design[:, 0] ** 2
+ self.metadata.illumination.state_list.design[:, 1] ** 2 < (self.metadata.objective.na + darkfield_annulus_width_na) ** 2)]
illumination_sequence = []
self.pattern_type_list = []
pattern_dict = {'dpc.top': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 1] > 0]),
'dpc.bottom': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 1] < 0]),
'dpc.left': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 0] > 0]),
'dpc.right': np.ndarray.tolist(led_list_bf[self.metadata.illumination.state_list.design[bf_mask, 0] < 0]),
'single': [0],
'bf': np.ndarray.tolist(led_list_bf),
'df': np.ndarray.tolist(led_list_df),
'an': np.ndarray.tolist(led_list_an),
'full': np.ndarray.tolist(led_list)
}
# DPC does not flicker patterns within frames
n_time_points_per_frame = 1
illumination_state_list = []
# Write image sequence to list
for acquisition_index in range(n_acquisitions):
# Loop over DPC patterns (frames)
for frame_index, pattern in enumerate(illumination_pattern_sequence):
single_frame_state_list_illumination = []
# Loop over time points (irrelevent for dpc)
for time_index in range(n_time_points_per_frame):
time_point_state_list = []
# Loop over DPC patterns (which are themselves frames)
for led_idx in pattern_dict[pattern]:
values_dict = {}
for color_name in self.illumination_color:
values_dict[color_name] = self.illumination_color[color_name]
led_dict = {
'index': int(led_idx),
'time_index': 0,
'value': values_dict
}
# Append this to list with elements for each interframe time point
time_point_state_list.append(led_dict)
# Append to frame_dict
single_frame_state_list_illumination.append(time_point_state_list)
# Define illumination sequence
illumination_state_list.append({'states' : single_frame_state_list_illumination, 'common' : {}})
# Define illumination list
self.state_list = self.metadata.illumination.state_list.design
return illumination_state_list
class MotionDeblurAcquisition():
# Initialization
def __init__(self, hardware_controller_list, system_metadata,
illumination_sequence=None,
motion_path_type='linear',
use_l1_distance_for_motion_calculations=True,
blur_vector_method='pseudo_random',
kernel_pulse_count=150,
saturation_factor=1.0,
frame_spacing_mm=1,
object_size_mm=(0.5, 0.5),
reuse_illumination_sequence=True,
max_exposure_time_s=2,
max_velocity_mm_s=40.0,
max_led_update_rate_us=0.01,
exposure_time_pad_s=0.0,
velocity_mm_s=None,
exposure_time_s=None,
debug=False,
motion_acceleration_mm_s_2=1e3,
extra_run_up_time_s=0,
flip_pathway=False,
segment_delay_s=0,
initial_auto_exposure=False,
acquisition_timeout_s=3,
illumination_sequence_count=1,
illumination_na_pad=0.03,
illumination_color={'w': 127},
only_store_first_and_last_position=True):
# Parse options
self.motion_path_type = motion_path_type
self.object_size_mm = object_size_mm
self.frame_spacing_mm = frame_spacing_mm
self.flip_pathway = flip_pathway
self.use_l1_distance_for_motion_calculations = use_l1_distance_for_motion_calculations
self.velocity_mm_s = velocity_mm_s
self.exposure_time_pad_s = exposure_time_pad_s
self.debug = debug
self.motion_acceleration_mm_s_2 = motion_acceleration_mm_s_2
self.max_led_update_rate_us = max_led_update_rate_us
self.max_exposure_time_s = max_exposure_time_s
self.max_velocity_mm_s = max_velocity_mm_s
self.illumination_na_pad = illumination_na_pad
self.saturation_factor = saturation_factor
self.reuse_illumination_sequence = reuse_illumination_sequence
self.blur_vector_method = blur_vector_method
self.kernel_pulse_count = kernel_pulse_count
self.illumination_color = illumination_color
self.extra_run_up_time_s = extra_run_up_time_s
self.initial_auto_exposure = initial_auto_exposure
self.acquisition_timeout_s = acquisition_timeout_s
self.segment_delay_s = segment_delay_s
self.only_store_first_and_last_position = only_store_first_and_last_position
self.illumination_sequence = illumination_sequence
self.illumination_sequence_count = illumination_sequence_count
# Define controller objects, which act as hardware interfaces.
# These should be in an ordered dictionary because the order which they
# are initialized matters when using a mix of hardware and software triggering.
self.hardware_controller_list = collections.OrderedDict()
# First add hardware triggered elements so they perform their set-up before we trigger software elements
for controller in hardware_controller_list:
if hasattr(controller, 'trigger_mode'):
if controller.trigger_mode is 'hardware':
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Then, add software triggered elements
for controller in hardware_controller_list:
self.hardware_controller_list[controller.type] = controller
controller.reset()
controller.seq_clear()
# Check to be sure a sequence acquisition is not running
assert 'camera' in self.hardware_controller_list, 'Did not find camera controller!'
# Store metadata object
self.metadata = system_metadata
# Ensure we have all necessary metadata for basic acquisition
assert self.metadata.objective.na is not None, 'Missing objective.na in metadata.'
assert self.metadata.objective.mag is not None, 'Missing objective.mag in metadata.'
assert self.metadata.camera.pixel_size_um is not None, 'Missing pixel size in metadata.'
# Update effective pixel size (for scale bar)
self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / (self.metadata.objective.mag * self.metadata.system.mag)
# Trigger Constants
self.TRIG_MODE_EVERY_PATTERN = 1
self.TRIG_MODE_ITERATION = -1
self.TRIG_MODE_START = -2
# Frame state time sequence, will default to a sequence of one exposure time per frame if left as None
self.time_sequence_s = None
self.exposure_time_s = None
self.hardware_sequence_timing = None
# Turn off fast sequencing for illumination by default since this is only avaolable with certain LED arrays
if 'illumination' in self.hardware_controller_list:
self.hardware_controller_list['illumination'].use_fast_sequence = False
# Set metadata type
self.metadata.type = 'motiondeblur'
assert 'illumination' in self.hardware_controller_list, 'Motion deblur object requires programmable light source'
assert 'position' in self.hardware_controller_list, 'Motion deblur object requires motion stage'
# Initialize state_sequence
self.state_sequence = []
# Generate position sequence
self.hardware_controller_list['position'].state_sequence, self.time_sequence_s = self.genMotionPathway(
pathway_type=self.motion_path_type, frame_spacing_mm=frame_spacing_mm)
# Generate illumination sequence
self.hardware_controller_list['illumination'].state_sequence = self.genMotionIlluminationSequenceRandom(illumination_sequence=illumination_sequence,
sequence_count=self.illumination_sequence_count)
# Set up subframe captures
self.subframe_capture_count = len(self.hardware_controller_list['illumination'].state_sequence[0])
self.force_preload_all_frames = True
self.hardware_controller_list['position'].continuous_states_between_frames = True
# Configure illuination to use fast sequence updating if specified in options
self.hardware_controller_list['illumination'].use_fast_sequence = True
# Set bit depth
self.illumination_sequence_bit_depth = 1
# Set extra options for position controller
self.hardware_controller_list['position'].extra_run_up_time_s = self.extra_run_up_time_s
# Calculate effective pixel size if it hasn't already been calculated
self.metadata.system.eff_pixel_size_um = self.metadata.camera.pixel_size_um / \
(self.metadata.objective.mag * self.metadata.system.mag)
def preAcquire(self):
''' This method sets up the camera for an acquisition '''
# Check that the length of motion, illuimination, pupil, and focal sequences are same (or None)
frame_counts = []
for hardware_controller_name in list(self.hardware_controller_list):
# Get controller object from dictionary
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hardware_controller.state_sequence is not None:
# Reset Controller
hardware_controller.reset()
# Get number of frames in sequence. If there is no sequence, remove this element from hw_controller_list
if hardware_controller.type is not 'camera':
if hardware_controller.state_sequence is not None:
frame_counts.append(len(hardware_controller.state_sequence))
else:
self.hardware_controller_list.pop(hardware_controller_name)
else:
# Remove this controller from the list
if hardware_controller_name is not 'camera':
del self.hardware_controller_list[hardware_controller_name]
# Turn on hardware triggering for initialization
self.hardware_controller_list['camera'].setTriggerMode('hardware')
# Set illumination parameters
if 'illumination' in self.hardware_controller_list:
# self.hardware_controller_list['illumination'].setColor(self.illumination_color)
self.hardware_controller_list['illumination'].setSequenceBitDepth(
self.illumination_sequence_bit_depth)
# Ensure all hardware elements have the same number of frames
if len(frame_counts) > 0:
if not np.sum(np.mean(np.asarray(frame_counts)) == np.asarray(frame_counts)) == len(frame_counts):
raise ValueError('Sequence lengths are not the same (or None).')
else:
self.n_frames = frame_counts[0]
else:
raise ValueError('No sequence provided!')
# Initialize frame_list
self.frame_list = np.zeros((self.n_frames,
self.hardware_controller_list['camera'].getImageHeight(), self.hardware_controller_list['camera'].getImageWidth()), dtype=np.uint16)
# Apply simple geometric transformations
if self.metadata.camera.transpose:
self.frame_list = self.frame_list.transpose(0, 2, 1)
if self.metadata.camera.flip_x:
self.frame_list = np.flip(self.frame_list, 2)
if self.metadata.camera.flip_y:
self.frame_list = np.flip(self.frame_list, 1)
# Generate frame_state_list
frame_state_list = []
if self.time_sequence_s is None:
self.time_sequence_s = []
for _ in range(self.n_frames):
self.time_sequence_s.append([0])
# Loop over frames
for frame_index in range(self.n_frames):
single_frame_state_list = {}
# Loop over hardware controllers and record their state sequences
for hardware_controller_name in self.hardware_controller_list:
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hardware_controller.state_sequence is not None:
single_frame_state_list[hardware_controller_name] = hardware_controller.state_sequence[frame_index]
# Record time_sequence_s
single_frame_state_list['time_sequence_s'] = self.time_sequence_s[frame_index]
# Add to list of all frames
frame_state_list.append(single_frame_state_list)
self.frame_state_list = frame_state_list
# Perform auto-exposure if user desires
if self.initial_auto_exposure:
# Illuminate with first pattern
if 'illumination' in self.hardware_controller_list:
self.hardware_controller_list['illumination'].sequenceReset()
self.hardware_controller_list['illumination'].time_sequence_s = [[0]]
self.hardware_controller_list['illumination'].preloadSequence(0)
self.hardware_controller_list['illumination'].sequenceStep()
# Small delay to ensure illumination gets updated
time.sleep(0.1)
# Run Auto-Exposure
self.hardware_controller_list['camera'].autoExposure()
# Set camera memory footprint
if (self.hardware_controller_list['camera'].getBufferTotalCapacity() < self.frame_list.shape[0]):
self.frame_size_mb = int(
np.ceil(float(self.frame_list.shape[0] / 1e6) * float(self.frame_list.shape[1]) * float(self.frame_list.shape[2]) * 2))
print('Allocating %dmb for frames' % self.frame_size_mb)
self.hardware_controller_list['camera'].setBufferSizeMb(self.frame_size_mb)
assert self.hardware_controller_list['camera'].getBufferTotalCapacity(
) >= self.frame_list.shape[0], 'Buffer size too small!'
# Store initial time (acquisition start)
t0 = time.time()
# Tell camera to start waiting for frames
self.hardware_controller_list['camera'].runSequence()
# Keep track of how many images we have acquired
self.total_frame_count = 0
def acquire(self,
dataset=None,
reset_devices=False):
'''
This is a generic acquisition class, where LEDs are updated according to the sequence variable.
'''
# Call preacquire. which initializes hardware and variables
self.preAcquire()
# Determine which frames can be preloaded before serial acquisition. If each frame is only one state, we assume that we can preload all frames. But, if the state of any hardware element changes within any frame, we will assume we can't preload the frames
frame_count = 0
linear_segment_list = []
for frame_state in self.hardware_controller_list['position'].state_sequence:
if frame_state['common']['linear_segment_index'] >= 0:
frame_count += 1
if frame_state['common']['linear_segment_index'] not in linear_segment_list:
linear_segment_list.append(frame_state['common']['linear_segment_index'])
print("Found %d segments and %d frames" % (len(linear_segment_list), frame_count))
t_start = time.time()
for linear_segment_index in linear_segment_list:
self.frames_to_acquire = []
# Determine which linear segments to run
for frame_index, frame_state in enumerate(self.hardware_controller_list['position'].state_sequence):
if frame_state['common']['linear_segment_index'] == linear_segment_index:
self.frames_to_acquire += [frame_index]
self.n_frames_to_acquire = len(self.frames_to_acquire)
x_start = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[0]]['states'][0][0]['value']['x']
y_start = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[0]]['states'][0][0]['value']['y']
x_end = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[-1]]['states'][0][0]['value']['x']
y_end = self.hardware_controller_list['position'].state_sequence[self.frames_to_acquire[-1]]['states'][0][0]['value']['y']
print('Starting linear segment %d which has %d frames moving from (%.4f, %.4f)mm to (%.4f, %.4f)mm' %
(linear_segment_index, self.n_frames_to_acquire, x_start, y_start, x_end, y_end))
frame_has_multiple_states = []
for frame_index in self.frames_to_acquire:
number_of_states_in_current_frame = 0
for hardware_controller_name in self.hardware_controller_list:
if hardware_controller_name is not 'camera' and self.hardware_controller_list[hardware_controller_name].state_sequence is not None:
# Check if this frame can be preloaded (if it has more than one state, it can't be preloaded)
number_of_states_in_current_frame = max(number_of_states_in_current_frame, len(
self.hardware_controller_list[hardware_controller_name].state_sequence[frame_index]['states']))
# Check that the length of time_sequence_s matches the max number of state changes within this frame
if number_of_states_in_current_frame > 1:
frame_has_multiple_states.append(True)
assert self.time_sequence_s is not None, "time_sequence_s can not be None if any frame has multiple states!"
assert len(self.time_sequence_s[frame_index]) == number_of_states_in_current_frame, "time_sequence_s for frame %d is of wrong length!" % len(
self.time_sequence_s[frame_index]['states'])
else:
frame_has_multiple_states.append(False)
# Determine if the entire multi-frame sequence can be preloaded (this will be False if ther eis only one system state (e.g. LED pattern) within each frame)
all_frames_will_be_preloaded = (not any(frame_has_multiple_states)) or self.force_preload_all_frames
# Determine optimal exposure time for all frames
if self.exposure_time_s is not None:
self.hardware_controller_list['camera'].setExposure(self.exposure_time_s)
elif self.time_sequence_s is not None and max(self.time_sequence_s[0]) > 0:
frame_exposures = []
for frame_index in range(self.n_frames_to_acquire):
frame_exposures.append(max(self.time_sequence_s[frame_index]))
self.exposure_time_s = sum(frame_exposures) / (self.n_frames_to_acquire)
self.hardware_controller_list['camera'].setExposure(self.exposure_time_s)
else:
self.exposure_time_s = self.hardware_controller_list['camera'].getExposure()
# Check that exposure time is correct
assert abs(self.exposure_time_s - self.hardware_controller_list['camera'].getExposure(
)) < 1e-3, "Desired exposure time %.2f is not equal to device exposure %.2f. This is probably a MM issue" % (self.exposure_time_s, self.hardware_controller_list['camera'].getExposure())
# print('Using exposure time %.2fs (%d ms)' % (self.exposure_time_s, int(self.exposure_time_s * 1000)))
# Check that time_sequence_s for multiple frames exists if there are inter-frame state changes
if (not any(frame_has_multiple_states)) or self.time_sequence_s is None:
self.time_sequence_s = [self.exposure_time_s]
# Configure hardware triggering
trigger_output_settings = [0, 0]
trigger_input_settings = [0, 0]
for hardware_controller_name in self.hardware_controller_list:
hardware_controller = self.hardware_controller_list[hardware_controller_name]
if hasattr(hardware_controller, 'trigger_mode') and 'hardware' in hardware_controller.trigger_mode:
# Check that trigger pins are configured
assert hardware_controller.trigger_pin is not None, 'Trigger pin must be configured for hardware triggering!'
# Determine if we're performing preloadable acquisitions or not
if self.subframe_capture_count > 1:
if self.reuse_illumination_sequence:
if hardware_controller_name == 'camera':
if self.illumination_sequence_count == 1:
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION
else:
trigger_output_settings[hardware_controller.trigger_pin] = len(self.hardware_controller_list['position'].state_sequence[0]['states']) // self.illumination_sequence_count
trigger_input_settings[hardware_controller.trigger_pin] = len(self.hardware_controller_list['position'].state_sequence[0]['states']) // self.illumination_sequence_count
elif hardware_controller_name == 'position':
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START
else:
if hardware_controller_name == 'camera':
trigger_output_settings[hardware_controller.trigger_pin] = self.subframe_capture_count
trigger_input_settings[hardware_controller.trigger_pin] = self.subframe_capture_count
elif hardware_controller_name == 'position':
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_START
# Case where there is only one system state wihtin each frame (trigger each frame)
elif all_frames_will_be_preloaded:
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_EVERY_PATTERN
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_EVERY_PATTERN
# Case where we only want to trigger on first frame. This is probably not a good default.
else:
trigger_output_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION
trigger_input_settings[hardware_controller.trigger_pin] = self.TRIG_MODE_ITERATION
# Check that this hardware controller is ready for a sequence, if it is sequencable.
if hardware_controller.state_sequence is not None:
# Reset controller sequence to initial state
hardware_controller.sequenceReset()
time.sleep(0.1)
# Wait until initialization is complete
initialization_wait_time = 0
for hardware_controller_name in self.hardware_controller_list:
while not self.hardware_controller_list[hardware_controller_name].isReadyForSequence():
time.sleep(0.05)
initialization_wait_time += 0.05
if initialization_wait_time > self.acquisition_timeout_s:
raise ValueError('Pre-acquisiton isReadyForSequence timeout for %s' % hardware_controller_name)
# Tell the hardware controller about the acquisition time sequence
if len(hardware_controller.state_sequence) == len(self.time_sequence_s):
hardware_controller.time_sequence_s = [self.time_sequence_s[i] for i in self.frames_to_acquire]
else:
hardware_controller.time_sequence_s = [
[self.hardware_controller_list['camera'].getExposure()]] * self.n_frames_to_acquire
# Set up triggering for hardware acquision
self.hardware_controller_list['illumination'].trigger_output_settings = trigger_output_settings
self.hardware_controller_list['illumination'].trigger_input_settings = trigger_input_settings
# Determine which sequences get preloaded
if all_frames_will_be_preloaded: # One system state per acquisition
frame_preload_sequence = [-1] # Preload all frames at once
else:
frame_preload_sequence = range(self.n_frames_to_acquire) # Preload each frame serially
# Loop over frames to capture (may only execute once if we're preloading all frames)
for preload_index in frame_preload_sequence:
# Loop over hardware controllers, preload, and determine necessary exposure time (if using inter-frame state changes)
for hardware_controller_name in self.hardware_controller_list:
# If we're using the motion stage, calculate the mechanical delay
if hardware_controller_name == 'position':
# Get velocity and acceleration from state sequence
if preload_index == -1:
index = 0
else:
index = preload_index
velocity = self.hardware_controller_list[hardware_controller_name].state_sequence[0]['common']['velocity']
acceleration = self.hardware_controller_list[hardware_controller_name].acceleration
jerk = self.hardware_controller_list[hardware_controller_name].jerk
# Calculate spin-up time and distance
# http://www.wolframalpha.com/input/?i=v+%3D+t+*+(a+%2B+0.5*j+*+t)+solve+for+t
# http://www.wolframalpha.com/input/?i=v+%3D+t+*+(a+%2B+(1%2F8)*j+*+t)+solve+for+t
# Good reference:
# http://www.et.byu.edu/~ered/ME537/Notes/Ch5.pdf
# Total period
if False:
# First period (acceleration of acceleration)
t_1 = acceleration / jerk
# x_1 = 1/6 * jerk * t_1 ** 3
x_1 = acceleration ** 2 / (6 * jerk) * t_1
# v_1 = 1/2 * jerk * t_1 ** 2
v_1 = acceleration ** 2 / (2 * jerk)
# Second period (linear region)
dv = velocity - 2 * v_1
assert dv > 0
t_2 = dv / acceleration
x_2 = v_1 * t_2 + 1/2 * acceleration * t_2 ** 2
v_2 = velocity - v_1
# Third period (decelleration of acceleration)
t_3 = acceleration / jerk
x_3 = (v_2 + acceleration ** 2 / (3 * jerk)) * t_3
v_3 = v_1
# Calculate spin-up distance and time
spin_up_time_s = t_1 + t_2 + t_3
spin_up_distance_mm = x_1 + x_2 + x_3
assert (v_1 + v_2 + v_3 - velocity) < 1e-1, "Calculated velocity is %.4f, desired is %.4f" % (v_1 + v_2 + v_3, velocity)
else:
spin_up_time_s = velocity / acceleration
spin_up_distance_mm = 1/2 * acceleration * spin_up_time_s ** 2
# Add extra spin_up time
spin_up_time_s += self.extra_run_up_time_s
spin_up_distance_mm += self.extra_run_up_time_s * velocity
# spin_up_distance_mm = 0
spin_up_time_s = max(spin_up_time_s, 0.0001)
self.hardware_controller_list['illumination'].setupTriggering(self.hardware_controller_list['position'].trigger_pin, int(
self.hardware_controller_list['position'].trigger_pulse_width_us), int(spin_up_time_s * 1e6)) # convert to seconds
# Tell motion stage to offset it's positions by these amounts
self.hardware_controller_list['position'].preload_run_up_distance_mm = spin_up_distance_mm
else:
# no delay for other components
self.hardware_controller_list[hardware_controller_name].trigger_start_delay_s = 0
if hardware_controller_name is not 'camera' and self.hardware_controller_list[hardware_controller_name].state_sequence is not None:
if hardware_controller_name is not 'illumination' or linear_segment_index == 0:
if hardware_controller_name == 'illumination' and self.reuse_illumination_sequence:
self.hardware_controller_list[hardware_controller_name].preloadSequence(0)
else:
state_sequence_used = [
self.hardware_controller_list[hardware_controller_name].state_sequence[i] for i in self.frames_to_acquire]
self.hardware_controller_list[hardware_controller_name].preloadSequence(
preload_index, state_sequence=state_sequence_used)
if preload_index < 0 or self.reuse_illumination_sequence:
frames_to_wait_for = self.n_frames_to_acquire # wait for all frames
else:
frames_to_wait_for = 1
# Set trigger frame time based on first pathway TODO: This is a hack
if 'position' in self.hardware_controller_list:
self.hardware_controller_list['illumination'].trigger_frame_time_s[self.hardware_controller_list['camera']
.trigger_pin] = self.hardware_controller_list['position'].state_sequence[0]['common']['frame_time'] * 1e6
# Tell stage to start moving
self.hardware_controller_list['position'].runSequence()
if linear_segment_index == 0:
t_start = time.time()
# Tell illumination to start moving
if self.reuse_illumination_sequence:
self.hardware_controller_list['illumination'].runSequence(
n_acquisitions=1 * self.n_frames_to_acquire)
else:
self.hardware_controller_list['illumination'].runSequence(n_acquisitions=1)
# Wait for frames to be captured
t_frame = time.time()
frame_count = 0
while frame_count < frames_to_wait_for:
if self.total_frame_count + frame_count == frames_to_wait_for:
break
else:
if self.total_frame_count + frame_count == self.hardware_controller_list['camera'].getBufferSizeFrames():
time.sleep(0.01)
if (time.time() - t_frame) > self.acquisition_timeout_s:
print(self.hardware_controller_list['illumination'].response())
raise ValueError('Acquisition timeout (Total frame count: %d, Buffer size: %d, preload index %d, frames to wait for: %d)' % (
self.total_frame_count, self.hardware_controller_list['camera'].getBufferSizeFrames(), preload_index, frames_to_wait_for))
else:
if ((self.total_frame_count + frame_count) % int((self.n_frames) / min(10, self.n_frames_to_acquire))) == 0:
print('Acquired %d of %d frames' % (
self.hardware_controller_list['camera'].getBufferSizeFrames(), self.n_frames_to_acquire))
frame_count = self.hardware_controller_list['camera'].getBufferSizeFrames(
) - self.total_frame_count
self.total_frame_count = self.hardware_controller_list['camera'].getBufferSizeFrames()
t_frame = time.time()
# Get sequence timing information
time.sleep(0.1)
print(self.hardware_controller_list['illumination'].response())
# Wait for hardware to stop
for hardware_controller_name in self.hardware_controller_list:
while not self.hardware_controller_list[hardware_controller_name].isReadyForSequence():
time.sleep(0.05)
self.sequence_timing_dict = {}
# Reset sequences
for hardware_controller_name in self.hardware_controller_list:
if hardware_controller_name is not 'camera':
self.hardware_controller_list[hardware_controller_name].sequenceReset()
# Let user know we're finished
print('Finished linear segment %d' % linear_segment_index)
time.sleep(self.segment_delay_s)
t_acq = time.time() - t_start
self.metadata.acquisition_time_s = t_acq
print("Acquisition took %.4f seconds" % (t_acq))
# Call post-acquire functions
dataset = self.postAcquire(dataset=dataset, reset_devices=reset_devices)
# Return
return dataset
def postAcquire(self, dataset=None, reset_devices=True):
"""Post-acquisition steps for resetting hardware and preparing dataset."""
# Stop acquisition
# self.hardware_controller_list['camera'].sequenceStop()
# Parse dataset
if dataset is None:
from htdeblur.mddataset import MotionDeblurDataset
dataset = MotionDeblurDataset()
# Read frames and timestamps from buffer
(self.frame_list, elapsed_frame_time_ms) = self.hardware_controller_list['camera'].readFramesFromBuffer()
# Apply simple geometric transformations
if self.metadata.camera.transpose:
self.frame_list = self.frame_list.transpose(0, 2, 1)
if self.metadata.camera.flip_x:
self.frame_list = np.flip(self.frame_list, 2)
if self.metadata.camera.flip_y:
self.frame_list = np.flip(self.frame_list, 1)
# Let user know we're finished
print('Read frames from buffer.')
# Store camera timing in a standardized timing dict
self.sequence_timing_dict = {}
self.sequence_timing_dict['sequence_timing'] = []
for frame_index, frame_time in enumerate(elapsed_frame_time_ms):
timing_dict = {'trigger_number' : 0, 'acquisition_number' : frame_index, 'camera_start_time_us' : frame_time * 1000}
self.sequence_timing_dict['sequence_timing'].append(timing_dict)
# Reset all hardware elements
if reset_devices:
for hardware_controller_name in self.hardware_controller_list:
self.hardware_controller_list[hardware_controller_name].reset()
if self.only_store_first_and_last_position:
for frame_state in self.frame_state_list[1:]:
frame_state['position']['states'] = [frame_state['position']['states'][0], frame_state['position']['states'][-1]]
# Remove repeated illumination patterns and time_sequence_s if we used the same illumination for each pulse
if self.reuse_illumination_sequence:
for frame_state in self.frame_state_list[1:]:
frame_state['time_sequence_s'] = 'see_frame_#1'
frame_state['illumination'] = 'see_frame_#1'
# Illuminate with brightfield to indicate we're Finished
self.hardware_controller_list['illumination'].bf()
self.hardware_controller_list['position'].goToPosition((0,0))
# Save results to an itoools.Dataset object
dataset.frame_list = self.frame_list
dataset.frame_state_list = self.frame_state_list
dataset.metadata = self.metadata
dataset.type = 'motion_deblur'
# Return
return dataset
def genMotionPathway(self, n_acquisitions=1, pathway_type='raster', frame_spacing_mm=1.):
'''
This function generates a few example motion pathways.
'''
if pathway_type is 'raster':
pathway = self.genMotionPathwayRaster(self.object_size_mm, self.frame_spacing_mm)
elif (pathway_type is 'linear') or (pathway_type is 'linear_x'):
# predefine linear y sequence
n_frames = int(math.ceil(self.object_size_mm[1] / self.frame_spacing_mm[1]))
pathway = []
for frame_index in range(n_frames):
pathway.append({'x_start': frame_index * self.frame_spacing_mm[1],
'x_end': (frame_index + 1) * self.frame_spacing_mm[1],
'y_start': 0, 'y_end': 0, 'linear_segment_index': 0})
elif pathway_type in ['linear_y']:
# predefine linear y sequence
n_frames = int(np.ceil(self.object_size_mm[0] / self.frame_spacing_mm[0]))
pathway = []
for frame_index in range(n_frames):
pathway.append({'y_start': -frame_index * self.frame_spacing_mm[0],
'y_end': -(frame_index + 1) * self.frame_spacing_mm[0],
'x_start': 0, 'x_end': 0, 'linear_segment_index': 0})
elif pathway_type is 'linear_diag':
# predefine linear y sequence
n_frames = int(np.ceil(self.object_size_mm[0] / self.frame_spacing_mm[0]))
pathway = []
for frame_index in range(n_frames):
pathway.append({'y_start': frame_index * self.frame_spacing_mm[0],
'y_end': (frame_index + 1) * self.frame_spacing_mm[0],
'x_start': frame_index * self.frame_spacing_mm[0],
'x_end': (frame_index + 1) * self.frame_spacing_mm[0],
'linear_segment_index': 0})
else:
raise ValueError('Pathway type %s is not implemented.' % pathway_type)
# make the center the mean of the pathway
path_xmin = 1e8
path_ymin = 1e8
path_xmax = -1e8
path_ymax = -1e8
for path in pathway:
path_mean = ((path['y_start']), (path['y_start']))
path_xmin = min(path_xmin, min([path['x_start'], path['x_end']]))
path_xmax = max(path_xmax, max([path['x_start'], path['x_end']]))
path_ymin = min(path_ymin, min([path['y_start'], path['y_end']]))
path_ymax = max(path_ymax, max([path['y_start'], path['y_end']]))
mean = ((path_ymax + path_ymin) / 2, (path_xmax + path_xmin) / 2)
for path in pathway:
path['x_start'] = path['x_start'] - mean[1]
path['x_end'] = path['x_end'] - mean[1]
path['y_start'] = path['y_start'] - mean[0]
path['y_end'] = path['y_end'] - mean[0]
# Flip pathway if user desired
if self.flip_pathway:
for path in pathway:
path['x_start'] *= -1
path['x_end'] *= -1
path['y_start'] *= -1
path['y_end'] *= -1
position_state_list = []
time_sequence_s = []
# Write image sequence to list
for acquisition_index in range(n_acquisitions):
# Loop over DPC patterns (frames)
for frame_index, position in enumerate(pathway):
# define distance in terms of l1 or l2 distance
distance_l2 = float(np.sqrt((position['x_end'] - position['x_start'])
** 2 + (position['y_end'] - position['y_start']) ** 2))
distance_l1 = float(abs(position['x_end'] - position['x_start']) +
abs(position['y_end'] - position['y_start']))
if self.use_l1_distance_for_motion_calculations:
position['frame_distance'] = int(round(distance_l1 * 1000)) / 1000 # round to nearest um
else:
position['frame_distance'] = int(round(distance_l2 * 1000)) / 1000 # round to nearest um
# Determine number of qunatifiable positions in pathway
position['n_blur_positions_frame'] = int(
math.floor(position['frame_distance'] / (self.metadata.system.eff_pixel_size_um / 1000)))
# Determine necessary velocity
if self.velocity_mm_s is not None:
position['velocity_mm_s'] = self.velocity_mm_s
else:
position['velocity_mm_s'] = self.max_velocity_mm_s # Use fastest speed possible
# Calculate time between frames
position['frame_time_s'] = position['frame_distance'] / position['velocity_mm_s'] # t = x / v
# Determine camera exposure time for this frame
position['exposure_time_s'] = int(math.floor((self.hardware_controller_list['camera'].calcExposureTimeFromBusyTime(
position['frame_time_s']) - self.exposure_time_pad_s) * 1000)) / 1000 # round to nearest ms
# Determine LED update rate
dx_pixel = position['frame_distance'] / position['n_blur_positions_frame']
dt_pixel_raw = dx_pixel / position['velocity_mm_s']
position['led_update_rate_us'] = math.ceil(dt_pixel_raw * 1e6) # Round up to integer us
# Determine new velocity (ps / update rate)
new_velocity_mm_s = (self.metadata.system.eff_pixel_size_um / 1e3) / (position['led_update_rate_us'] / 1e6)
if self.debug > 0:
print('Reducing velocity to %.4f mm/s from %.4f mm/s to match illumination update rate of %d us' % (new_velocity_mm_s, position['velocity_mm_s'], position['led_update_rate_us']))
position['velocity_mm_s'] = new_velocity_mm_s
# Update frame time based on velocity
position['frame_time_s'] = position['frame_distance'] / position['velocity_mm_s']
# Determine number of pixels in exposure time
position['n_blur_positions_exposure'] = math.floor(position['exposure_time_s'] / (position['led_update_rate_us'] / 1e6))
# Determine the distance traveled during the exposure time
position['exposure_distance'] = position['n_blur_positions_exposure'] * position['led_update_rate_us'] / 1e6 * position['velocity_mm_s']
# Store acceleration
position['acceleration_mm_s_2'] = self.motion_acceleration_mm_s_2
# Print information about this pattern
if self.debug > 0:
print('Segment %d, index %d will require %d blur positions per frame (%d during exposure), %.2fms exposure time (%.2fms total frame time), scan %.2fmm (%.2fmm with exposure), move at %.2fmm/s, and update speed %dus' %
(position['linear_segment_index'], frame_index, position['n_blur_positions_frame'],position['n_blur_positions_exposure'], 1000. * position['exposure_time_s'], 1000. * position['frame_time_s'], position['frame_distance'], position['exposure_distance'], position['velocity_mm_s'], position['led_update_rate_us']))
# Check that all blur parameters are valid
assert position['led_update_rate_us'] >= self.max_led_update_rate_us, "LED Array update rate (%dms) < max update rate (%dms)" % (
position['led_update_rate_us'], self.max_led_update_rate_us)
assert position['exposure_time_s'] <= self.max_exposure_time_s, "Exposure time (%.3fs) > max_exposure_time_s (%.3f)" % (
position['exposure_time_s'], self.max_exposure_time_s)
assert position['velocity_mm_s'] <= self.max_velocity_mm_s, "Velocity (%.3fs) > max_velocity_mm_s (%.3f)" % (
position['velocity_mm_s'], self.max_velocity_mm_s)
# List for this positions
single_frame_state_list_position = []
single_frame_time_sequence_s = []
# Determine movement direction
direction = np.asarray((position['y_end'] - position['y_start'],
position['x_end'] - position['x_start']))
direction /= np.linalg.norm(direction)
# Store common information about this frame
common_state_dict = {}
common_state_dict['frame_time'] = position['frame_time_s']
common_state_dict['led_update_rate_us'] = position['led_update_rate_us']
common_state_dict['linear_segment_index'] = position['linear_segment_index']
common_state_dict['frame_distance'] = position['frame_distance']
common_state_dict['exposure_distance'] = position['exposure_distance']
common_state_dict['velocity'] = position['velocity_mm_s']
common_state_dict['acceleration'] = position['acceleration_mm_s_2']
common_state_dict['n_blur_positions_exposure'] = position['n_blur_positions_exposure']
common_state_dict['position_delta_x_mm'] = direction[1] * position['velocity_mm_s'] * position['led_update_rate_us'] / 1e6
common_state_dict['position_delta_y_mm'] = direction[0] * position['velocity_mm_s'] * position['led_update_rate_us'] / 1e6
# Loop over time points (irrelevent for dpc)
for time_index in range(position['n_blur_positions_exposure']):
time_point_state_list = []
x = position['x_start'] + direction[1] * abs(common_state_dict['position_delta_x_mm']) * time_index
y = position['y_start'] + direction[0] * abs(common_state_dict['position_delta_x_mm']) * time_index
# Append this to list with elements for each interframe time point
time_point_state_list.append({'time_index': time_index,
'value': {'x': x, 'y': y}})
# Append to frame_dict
single_frame_state_list_position.append(time_point_state_list)
single_frame_time_sequence_s.append((time_index + 1) * position['led_update_rate_us'] / 1e6)
# Define illumination sequence
position_state_list.append({'states' : single_frame_state_list_position, 'common' : common_state_dict})
# Define time_sequence
time_sequence_s.append(single_frame_time_sequence_s)
# for state in position_state_list:
# print(state['states'][0][0]['value']['x'] - state['states'][-1][0]['value']['x'])
return (position_state_list, time_sequence_s)
def genMotionPathwayRaster(self, object_size_mm, frame_spacing_mm, major_axis=None, include_minor_axis=False):
# Hard-code major axis since the rest of the code doesn't respect it for now
_major_axis = 1
# Detemine number of measurements
measurement_count = np.ceil(np.asarray(object_size_mm) / np.asarray(frame_spacing_mm)).astype(np.int) # two components in x and y
# Error checking
assert np.any(measurement_count > 1), "image_size must be smaller than object_size!"
print("Image size requires %d x %d images" % (measurement_count[0], measurement_count[1]))
# If number of measurements along major axis is odd, center this row
offset = [0, 0]
offset[_major_axis] -= frame_spacing_mm[_major_axis] / 2
# Generate raster points
raster_end_point_list = []
pathway = []
linear_segment_index = 0 # This variable keeps track of linear segments, for use with path planning
for row in np.arange(measurement_count[0]):
if row % 2 == 0:
for index, col in enumerate(range(measurement_count[1])):
# Add pathway to list
pathway.append({'x_start': frame_spacing_mm[1] * col + offset[1],
'y_start': frame_spacing_mm[0] * row + offset[0],
'x_end': frame_spacing_mm[1] * (col + 1) + offset[1],
'y_end': frame_spacing_mm[0] * row + offset[0],
'linear_segment_index': linear_segment_index})
# Add minor stride
if row < (measurement_count[0] - 1) and include_minor_axis:
pathway.append({'x_start': frame_spacing_mm[1] * (measurement_count[1] - 1) + offset[1],
'y_start': frame_spacing_mm[0] * row + offset[0],
'x_end': frame_spacing_mm[1] * (measurement_count[1] - 1) + offset[1],
'y_end': frame_spacing_mm[0] * (row + 1) + offset[0],
'linear_segment_index': -1 * (linear_segment_index + 1)})
else:
for index, col in enumerate(reversed(range(measurement_count[1]))):
# Add pathway to list
pathway.append({'x_start': frame_spacing_mm[1] * col - offset[1],
'y_start': frame_spacing_mm[0] * row - offset[0],
'x_end': frame_spacing_mm[1] * (col - 1) - offset[1],
'y_end': frame_spacing_mm[0] * row - offset[0],
'linear_segment_index': linear_segment_index})
# Add minor stride
if row < (measurement_count[0] - 1) and include_minor_axis:
pathway.append({'x_start': - offset[1],
'y_start': frame_spacing_mm[0] * row - offset[0],
'x_end': 0 - offset[1],
'y_end': frame_spacing_mm[0] * (row + 1) - offset[0],
'linear_segment_index': -1 * (linear_segment_index + 1)})
linear_segment_index += 1
print('Generated motion pathway with %d linear segments' % (linear_segment_index))
return pathway
def plotPathway(self):
sequence_list = self.hardware_controller_list['position'].state_sequence
point_list_start = []
point_list_end = []
for sequence in sequence_list:
start_pos = (sequence['states'][0][0]['value']['x'], sequence['states'][0][0]['value']['y'])
end_pos = (sequence['states'][-1][0]['value']['x'], sequence['states'][-1][0]['value']['y'])
point_list_start.append(start_pos)
point_list_end.append(end_pos)
point_list_start = np.asarray(point_list_start)
point_list_end = np.asarray(point_list_end)
plt.figure()
for index in range(len(point_list_start)):
plt.scatter(point_list_start[index, 0], point_list_start[index, 1], c='b')
plt.scatter(point_list_end[index, 0], point_list_end[index, 1], c='r')
plt.plot([point_list_start[index, 0], point_list_end[index, 0]],
[point_list_start[index, 1], point_list_end[index, 1]], c='y')
plt.xlabel('Position X (mm)')
plt.ylabel('Position Y (mm)')
plt.title('Pathway (b is start, y/o is end)')
plt.gca().invert_yaxis()
def genMotionIlluminationSequenceRandom(self, sequence_count=1,
illumination_sequence=None):
led_list = np.arange(self.metadata.illumination.state_list.design.shape[0])
bf_mask = self.metadata.illumination.state_list.design[:, 0] ** 2 \
+ self.metadata.illumination.state_list.design[:, 1] ** 2 < (
self.metadata.objective.na + self.illumination_na_pad) ** 2
illumination_state_list = []
linear_segments_processed = {}
# Loop over DPC patterns (frames)
for frame_index, frame_position_dict in enumerate(self.hardware_controller_list['position'].state_sequence):
frame_position_list = frame_position_dict['states']
# Get number of positions in blur kernel from this frame. Divide into subsequences
pattern_count = len(frame_position_list) // sequence_count
# Determine the number of non-zero illumination positions
pattern_count_used = int(round(pattern_count * self.saturation_factor))
# Place patterns at the END of the full sequence
pattern_count_start = 0
# Get linear segment index
current_segment_index = frame_position_dict['common']['linear_segment_index']
if not self.reuse_illumination_sequence or frame_index == 0:
blur_vector_full = []
# Generate several blur vectors
for _ in range(sequence_count):
# Use provided illumination seqence if given
if illumination_sequence:
blur_vector = illumination_sequence
else:
blur_vector = np.zeros(pattern_count)
# Generate blur vector
blur_vector = np.zeros(pattern_count)
if self.blur_vector_method == 'strobe':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start + pattern_count_used // 2] = 1
elif self.blur_vector_method == 'center':
blur_vector = np.zeros(pattern_count)
# Determine distance traveled within this frame (including readout time)
frame_pixel_count = round(frame_position_list[0][0]['frame_distance'] / (self.metadata.system.eff_pixel_size_um / 1000))
exposure_pixel_count = round(frame_position_list[0][0]['exposure_distance'] / (self.metadata.system.eff_pixel_size_um / 1000))
if not frame_pixel_count // 2 < exposure_pixel_count:
print("WARNING: Camera will not expose during center flash (%d pixels, %d pixels used of %d pixels total)" % (frame_pixel_count // 2, exposure_pixel_count, pattern_count))
blur_vector[pattern_count_used] = 1
else:
# Set center position to be on
blur_vector[frame_pixel_count // 2] = 1
elif self.blur_vector_method == 'start_end':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start] = 1
blur_vector[pattern_count_start + pattern_count_used - 1] = 1
elif self.blur_vector_method == 'start_middle_end':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start] = 1
blur_vector[pattern_count_start + pattern_count_used // 2] = 1
blur_vector[pattern_count_start + pattern_count_used - 1] = 1
elif self.blur_vector_method == 'tens':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start] = 1
blur_vector[pattern_count_start + 10] = 1
blur_vector[pattern_count_start + 20] = 1
blur_vector[pattern_count_start + 30] = 1
blur_vector[pattern_count_start + 40] = 1
elif self.blur_vector_method == 'twenties':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start + 0] = 1
blur_vector[pattern_count_start + 20] = 1
blur_vector[pattern_count_start + 40] = 1
blur_vector[pattern_count_start + 60] = 1
blur_vector[pattern_count_start + 80] = 1
blur_vector[pattern_count_start + 100] = 1
blur_vector[pattern_count_start + 120] = 1
blur_vector[pattern_count_start + 140] = 1
blur_vector[pattern_count_start + 160] = 1
blur_vector[pattern_count_start + 180] = 1
elif self.blur_vector_method == 'quarters':
blur_vector = np.zeros(pattern_count)
blur_vector[pattern_count_start] = 1
blur_vector[pattern_count_start + pattern_count_used // 4] = 1
blur_vector[pattern_count_start + pattern_count_used // 2] = 1
blur_vector[pattern_count_start + pattern_count_used // 2 + pattern_count_used // 4] = 1
blur_vector[pattern_count_start + pattern_count_used - 1] = 1
elif self.blur_vector_method == 'random':
blur_vector[pattern_count_start:pattern_count_start +
pattern_count_used] = np.random.rand(pattern_count_used)
elif self.blur_vector_method == 'constant':
blur_vector[pattern_count_start:pattern_count_start +
pattern_count_used] = np.ones(pattern_count_used)
elif self.blur_vector_method in ['coded', 'pseudo_random']:
if self.kernel_pulse_count is not None:
pulse_count = self.kernel_pulse_count
else:
pulse_count = pattern_count_used // 2
from htdeblur import blurkernel
blur_vector_tmp, kappa = blurkernel.vector(pulse_count, kernel_length=pattern_count_used)
blur_vector[pattern_count_start:pattern_count_start + pattern_count_used] = blur_vector_tmp
else:
raise ValueError('Invalid blur kernel method: %s' % self.blur_vector_method)
# Append to blur_vector_full
blur_vector_full += list(blur_vector)
# Ensure the pattern is the correct length
if len(blur_vector_full) < len(frame_position_list):
blur_vector_full += [0] * (len(frame_position_list) - len(blur_vector_full))
elif len(blur_vector_full) > len(frame_position_list):
raise ValueError
# Assign
linear_segments_processed[str(frame_index)] = blur_vector_full
else:
blur_vector_full = linear_segments_processed['0']
single_frame_state_list_illumination = []
# Loop over time points (irrelevent for dpc)
for time_index, illumination_value in enumerate(blur_vector_full):
time_point_state_list = []
# Loop over DPC patterns (which are themselves frames)
# for led_number in led_list[bf_mask]:
led_number = -1
values_dict = {}
for color_name in self.illumination_color:
values_dict[color_name] = self.illumination_color[color_name] * illumination_value
led_dict = {
'index': int(led_number),
'time_index': time_index,
'value': values_dict
}
# Append this to list with elements for each interframe time point
time_point_state_list.append(led_dict)
# Append to frame_dict
single_frame_state_list_illumination.append(time_point_state_list)
# Define illumination sequence
illumination_state_list.append({'states' : single_frame_state_list_illumination, 'common' : {}})
return(illumination_state_list)
| 54.284828
| 757
| 0.612656
| 9,155
| 78,713
| 4.967013
| 0.085745
| 0.088272
| 0.0687
| 0.078904
| 0.65905
| 0.598135
| 0.524905
| 0.469795
| 0.426869
| 0.386625
| 0
| 0.012827
| 0.305693
| 78,713
| 1,449
| 758
| 54.322291
| 0.819235
| 0.151385
| 0
| 0.417735
| 0
| 0.005342
| 0.094554
| 0.003625
| 0
| 0
| 0
| 0.00069
| 0.025641
| 1
| 0.013889
| false
| 0
| 0.012821
| 0
| 0.035256
| 0.017094
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2094cbd00b0292a602f2ea788a9486c162b5e7e
| 2,053
|
py
|
Python
|
leetcode/weekly150/last_substring.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | 2
|
2018-01-18T11:01:36.000Z
|
2021-12-20T18:14:48.000Z
|
leetcode/weekly150/last_substring.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | null | null | null |
leetcode/weekly150/last_substring.py
|
jan25/code_sorted
|
f405fd0898f72eb3d5428f9e10aefb4a009d5089
|
[
"Unlicense"
] | null | null | null |
'''
https://leetcode.com/contest/weekly-contest-150/problems/last-substring-in-lexicographical-order/
SA algorithm mostly copied from https://cp-algorithms.com/string/suffix-array.html
Status: tle. probably py3 lists
'''
class SuffixArray:
def __init__(self, s):
self.s = s
self.n = len(s)
self.p = [0] * self.n
self.c = [0] * self.n
c = self.preprocess()
self.process(c)
def preprocess(self):
counter = [0] * 260
for c in self.s:
counter[ord(c)] += 1
for i in range(1, len(counter)):
counter[i] += counter[i - 1]
for i in range(self.n):
c = ord(self.s[i])
counter[c] -= 1
self.p[counter[c]] = i
c = 0
self.c[0] = c
for i in range(1, self.n):
if self.s[self.p[i]] != self.s[self.p[i - 1]]:
c += 1
self.c[self.p[i]] = c
return c + 1
def process(self, c):
cn = [0] * self.n
i = 0
pn = [0] * self.n
while (1 << i) < self.n:
for j in range(self.n):
pn[j] = self.p[j] - (1 << i)
if pn[j] < 0: pn[j] += self.n
counter = [0] * c
for j in range(self.n):
counter[self.c[pn[j]]] += 1
for j in range(1, c):
counter[j] += counter[j - 1]
for j in range(self.n - 1, -1, -1):
counter[self.c[pn[j]]] -= 1
self.p[counter[self.c[pn[j]]]] = pn[j]
cn[self.p[0]] = 0
c = 1
for j in range(1, self.n):
a = [self.c[self.p[j]], self.c[(self.p[j] + (1 << i)) % self.n]]
b = [self.c[self.p[j - 1]], self.c[(self.p[j - 1] + (1 << i)) % self.n]]
if a != b: c += 1
cn[self.p[j]] = c - 1
self.c, cn = cn, self.c
i += 1
class Solution:
def lastSubstring(self, s: str) -> str:
sa = SuffixArray(s)
return s[sa.p[-1]:]
| 31.584615
| 97
| 0.431076
| 313
| 2,053
| 2.814696
| 0.188498
| 0.085131
| 0.040863
| 0.056754
| 0.292849
| 0.162316
| 0
| 0
| 0
| 0
| 0
| 0.037187
| 0.397467
| 2,053
| 64
| 98
| 32.078125
| 0.67502
| 0.103751
| 0
| 0.037037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c20c7d9e299f07af3208c0a8aedd483571769bbb
| 18,555
|
py
|
Python
|
schemagen/schemagen.py
|
GoZaddy/SchemaGen
|
c8374382f6b52ad3cec398c77fd5bc90fe891818
|
[
"MIT"
] | 3
|
2021-03-26T22:51:41.000Z
|
2021-03-27T15:17:24.000Z
|
schemagen/schemagen.py
|
GoZaddy/SchemaGen
|
c8374382f6b52ad3cec398c77fd5bc90fe891818
|
[
"MIT"
] | null | null | null |
schemagen/schemagen.py
|
GoZaddy/SchemaGen
|
c8374382f6b52ad3cec398c77fd5bc90fe891818
|
[
"MIT"
] | null | null | null |
from antlr4 import *
from .antlr import GraphQLLexer, GraphQLListener, GraphQLParser
from .codegen import CodegenTool, Class, String, ClassInstance, IfElse, If, Method, Expr, Variable
import re
from math import floor
from datetime import datetime
from .utils import strip_string_quotes, camel_case_to_snake_case, process_input_value_definition
from .errors import ParsingError
GraphQLParser = GraphQLParser.GraphQLParser
graphene = 'graphene'
built_in_scalars = [
'Int',
'Float',
'String',
'Boolean',
'ID',
'Date',
'Datetime',
'Time'
'Decimal',
'JSONString',
'Base64',
]
class SchemaGen(GraphQLListener.GraphQLListener):
"""
SchemaGen is the entry point through which the package is used.
Attributes:
input_file: a string containing the name of the GraphQL schema file
output_file: an optional string containing the name of the file to which the result of the code generation should be written to.
"""
def __init__(self, input_file: str, output_file: str = None):
if output_file is None:
output_file = input_file.split(sep='.')[0] + '_' + str(floor(datetime.now().timestamp())) + '.py'
is_valid_file_name = re.match("\w+.py$", output_file)
if is_valid_file_name is None:
raise Exception('File is not a python file')
self.output_file = output_file
self.input_file = input_file
self.codegen = CodegenTool(output_file=self.output_file)
super().__init__()
def enterTypeDefinition(self, ctx: GraphQLParser.TypeDefinitionContext):
for child in ctx.children:
# type definition is for an Object Type Definition
if isinstance(child, GraphQLParser.ObjectTypeDefinitionContext) or isinstance(child,
GraphQLParser.InterfaceTypeDefinitionContext):
is_object_type = isinstance(child, GraphQLParser.ObjectTypeDefinitionContext)
is_interface = isinstance(child, GraphQLParser.InterfaceTypeDefinitionContext)
type_class = Class(name=child.name().getText(), add_init_method=False)
if is_object_type:
type_class.base_class = "ObjectType"
elif is_interface:
type_class.base_class = "Interface"
is_mutation = False
if type_class.name == 'Mutation':
is_mutation = True
is_object_type = False
meta_class = Class(name='meta')
# create map for methods to be resolved
methods_to_be_resolved = {}
# get type description
desc = child.description()
if desc:
meta_class.add_class_variable('description', String(strip_string_quotes(desc.getText())))
# get implemented interfaces
if is_object_type or is_mutation:
if child.implementsInterfaces() is not None:
interfaces = child.implementsInterfaces().getText().split(sep='implements')
interfaces = interfaces[1].split(sep='&')
interface_string = ''
for i in interfaces:
interface_string = interface_string + i + ','
meta_class.add_class_variable('interfaces', f"({interface_string})")
# get fields of the ObjectType or Interface
if child.fieldsDefinition():
fields = child.fieldsDefinition().fields
if not is_mutation:
for field in fields:
# get field name and type
field_name = camel_case_to_snake_case(field.name().getText())
field_type = field.type_().getText()
field_required = False
# get field description
field_desc = field.description()
if field_desc is not None:
field_desc = String(strip_string_quotes(field_desc.getText()))
else:
field_desc = ''
if is_interface:
if field_type.lower() == type_class.name.lower():
field_type = 'lambda: ' + field_type
# if field is a required field
if field_type[len(field_type) - 1] == '!':
field_required = True
field_code = ClassInstance('Field', field_type[:-1], required=True)
else:
field_code = ClassInstance('Field', field_type)
# if field is a list type
if field.type_().listType() is not None:
list_type_named_type = field.type_().listType().type_().getText()
if is_interface:
if list_type_named_type.lower() == type_class.name.lower():
list_type_named_type = 'lambda: ' + list_type_named_type
if list_type_named_type[len(list_type_named_type) - 1] == '!':
field_code = ClassInstance('List',
str(ClassInstance('NonNull', list_type_named_type[:-1])),
required=field_required)
else:
field_code = ClassInstance('List', list_type_named_type, required=field_required)
# get field arguments
if is_object_type:
args = field.argumentsDefinition()
args_string = []
if args is not None:
args = args.args
for arg in args:
# add info to method_to_be_resolved map
if field_name not in methods_to_be_resolved:
methods_to_be_resolved[field_name] = [arg.name().getText()]
else:
methods_to_be_resolved[field_name].append(arg.name().getText())
processed_arg = process_input_value_definition(arg)
args_string.append(
f"{String(processed_arg['name'])}: {str(processed_arg['arg_impl'])}")
field_code.add_kwarg('args', "{" + ', '.join(args_string) + "}")
if field_desc != '':
field_code.add_kwarg(key='description', value=field_desc)
type_class.class_variables[field_name] = str(field_code)
else:
for field in fields:
# get field name and type
field_name = camel_case_to_snake_case(field.name().getText())
field_type = field.type_().getText()
field_required = False
field_class = Class(field.name().getText(), add_init_method=False, base_class='Mutation')
argument_class = Class(name='arguments')
# get field description
field_desc = field.description()
if field_desc is not None:
field_desc = String(strip_string_quotes(field_desc.getText()))
else:
field_desc = ''
# if field is a required field
if field_type[len(field_type) - 1] == '!':
field_required = True
field_code = ClassInstance('Field', field_type[:-1], required=True)
else:
field_code = ClassInstance('Field', field_type)
# if field is a list type
if field.type_().listType() is not None:
list_type_named_type = field.type_().listType().type_().getText()
if list_type_named_type[len(list_type_named_type) - 1] == '!':
field_code = ClassInstance('List',
str(ClassInstance('NonNull', list_type_named_type[:-1])),
required=field_required)
else:
field_code = ClassInstance('List', list_type_named_type, required=field_required)
# get field arguments
args = field.argumentsDefinition()
arg_list = []
if args is not None:
args = args.args
for arg in args:
processed_arg = process_input_value_definition(arg)
argument_class.add_class_variable(processed_arg['name'],
str(processed_arg['arg_impl']))
arg_list.append(processed_arg['name'])
field_class.add_sub_class(argument_class)
field_class.add_method(
method=Method(
name='mutate',
arguments=['root', 'info'] + arg_list
)
)
if field_desc != '':
field_code.add_kwarg(key='description', value=field_desc)
# write mutation classes for the mutation's fields
self.codegen.write_class(field_class)
type_class.class_variables[field_name] = str(field_code)
# add resolver methods
if not is_mutation:
for method in methods_to_be_resolved:
type_class.add_method(method_name='resolve_' + method,
arguments_names=['info'] + methods_to_be_resolved[method])
if type_class.name == 'Query':
for var in type_class.class_variables:
if var not in methods_to_be_resolved:
type_class.add_method(method_name='resolve_' + var, arguments_names=['info'])
if len(meta_class.class_variables) != 0:
type_class.add_sub_class(meta_class)
self.codegen.write_class(type_class)
# type definition is for an EnumTypeDefinition
elif isinstance(child, GraphQLParser.EnumTypeDefinitionContext):
enum_class = Class(name=child.name().getText(), base_class="Enum", add_init_method=False)
meta_class = Class(name='meta')
# get enum description
desc = child.description()
if desc:
meta_class.add_class_variable('description', String(strip_string_quotes(desc.getText())))
# get fields of the Enum
fields = child.enumValuesDefinition().fields
fields_and_desc = {}
for field in fields:
# get field name and type
enum_value = field.enumValue().getText()
# get enum description
field_desc = field.description()
if field_desc is not None:
field_desc = String(strip_string_quotes(field_desc.getText()))
else:
field_desc = ''
if field_desc != '':
# do something
fields_and_desc[enum_value] = field_desc
# add enums as class variables to main class
enum_class.add_class_variable(enum_value, String(enum_value))
if fields_and_desc:
# add enums description
method = Method(
name='description',
decorators=['@property'],
arguments=[]
)
if_else = IfElse(
indent_level=method.get_indent_level() + 1,
else_action=[Expr("pass")],
)
for i in fields_and_desc:
if_else.add_elif(If(
expr=Expr(f"self == {enum_class.name}.{i}"),
action=[Expr(f"return {fields_and_desc[i]}")]
))
method.set_body([if_else])
enum_class.add_method(method=method)
if len(meta_class.class_variables) != 0:
enum_class.add_sub_class(meta_class)
self.codegen.write_class(enum_class)
# type definition is for an EnumTypeDefinition
elif isinstance(child, GraphQLParser.ScalarTypeDefinitionContext):
if child.name().getText().capitalize() in built_in_scalars:
continue
scalar_class = Class(name=child.name().getText(), base_class="Scalar", add_init_method=False)
desc = child.description()
if desc is not None:
scalar_class.description = strip_string_quotes(desc.getText())
serialize_method = Method(
name='serialize',
arguments=['val'],
decorators=['@staticmethod'],
body=[Expr('# write method body'), Expr('pass')],
is_static=True
)
parse_literal_method = Method(
name='parse_literal',
arguments=['node'],
decorators=['@staticmethod'],
body=[Expr('# write method body'), Expr('pass')],
is_static=True
)
parse_value_method = Method(
name='parse_value',
arguments=['value'],
decorators=['@staticmethod'],
body=[Expr('# write method body'), Expr('pass')],
is_static=True
)
scalar_class.add_method(method=serialize_method)
scalar_class.add_method(method=parse_literal_method)
scalar_class.add_method(method=parse_value_method)
self.codegen.write_class(scalar_class)
elif isinstance(child, GraphQLParser.UnionTypeDefinitionContext):
union_class = Class(name=child.name().getText(), base_class='Union')
meta_class = Class(name='Meta')
unions = child.unionMemberTypes().getText()
if unions[0] == '=':
unions = unions[1:]
unions = ', '.join(unions.split(sep='|'))
meta_class.add_class_variable(variable_name='types', variable_value=f"({unions})")
desc = child.description()
if desc is not None:
meta_class.add_class_variable(variable_name='description',
variable_value=String(strip_string_quotes(desc.getText())))
union_class.add_sub_class(meta_class)
self.codegen.write_class(union_class)
print(unions)
elif isinstance(child, GraphQLParser.InputObjectTypeDefinitionContext):
type_class = Class(name=child.name().getText(), base_class="InputObjectType", add_init_method=False)
meta_class = Class(name='meta')
# get type description
desc = child.description()
if desc:
meta_class.add_class_variable('description', String(strip_string_quotes(desc.getText())))
# get fields
if child.inputFieldsDefinition():
fields = child.inputFieldsDefinition().fields
for field in fields:
processed_ivd = process_input_value_definition(field)
type_class.add_class_variable(processed_ivd['name'], str(processed_ivd['arg_impl']))
if len(meta_class.class_variables) != 0:
type_class.add_sub_class(meta_class)
self.codegen.write_class(type_class)
else:
print(type(child))
def enterSchemaDefinition(self, ctx: GraphQLParser.SchemaDefinitionContext):
schema_obj = ClassInstance('Schema')
fields = ctx.fields
for field in fields:
schema_obj.add_kwarg(strip_string_quotes(field.operationType().getText()),
strip_string_quotes(field.namedType().getText()))
var = Variable(
name='schema',
value=schema_obj
)
self.codegen.write_variable(var)
def __call__(self):
try:
self.codegen.import_package(package=graphene, mode=2, object='*')
input_stream = FileStream(self.input_file)
lexer = GraphQLLexer.GraphQLLexer(input_stream)
stream = CommonTokenStream(lexer)
parser = GraphQLParser(stream)
tree = parser.document()
walker = ParseTreeWalker()
walker.walk(self, tree)
except Exception as err:
raise ParsingError(str(err))
| 46.156716
| 136
| 0.492643
| 1,654
| 18,555
| 5.261185
| 0.136638
| 0.019306
| 0.019421
| 0.025396
| 0.495059
| 0.456332
| 0.425075
| 0.390715
| 0.347276
| 0.333142
| 0
| 0.001883
| 0.427648
| 18,555
| 401
| 137
| 46.27182
| 0.817514
| 0.05896
| 0
| 0.386207
| 0
| 0
| 0.045045
| 0.004884
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013793
| false
| 0.013793
| 0.031034
| 0
| 0.048276
| 0.006897
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c20cac9dd66122173bfd30ba53957fea5bb5307b
| 2,231
|
py
|
Python
|
app/api/views.py
|
rickywang432/flask
|
c956dee6c7dfbb57a5fcd247d23af37e20b96da7
|
[
"MIT"
] | null | null | null |
app/api/views.py
|
rickywang432/flask
|
c956dee6c7dfbb57a5fcd247d23af37e20b96da7
|
[
"MIT"
] | 1
|
2021-06-02T02:01:38.000Z
|
2021-06-02T02:01:38.000Z
|
app/api/views.py
|
rickywang432/flask
|
c956dee6c7dfbb57a5fcd247d23af37e20b96da7
|
[
"MIT"
] | null | null | null |
from flask import Flask, request, jsonify,Blueprint
from flask_marshmallow import Marshmallow
from app.models import User, Group, Role
from app import ma
api = Blueprint('api', __name__)
class UserSchema(ma.Schema):
class Meta:
# Fields to expose
fields = ('id', 'confirmed','first_name','last_name', 'email', 'active')
user_schema = UserSchema()
users_schema = UserSchema(many=True)
class GroupSchema(ma.Schema):
users = ma.Nested(UserSchema, many=True)
class Meta:
# Fields to expose
fields = ('id', 'name','users')
group_schema = GroupSchema()
groups_schema = GroupSchema(many=True)
class RoleSchema(ma.Schema):
users = ma.Nested(UserSchema, many=True)
class Meta:
# Fields to expose
fields = ('id', 'name','default','permissions','users')
role_schema = RoleSchema()
roles_schema = RoleSchema(many=True)
@api.route("/user", methods=["GET"])
def get_user():
all_users = User.query.all()
result = users_schema.dump(all_users)
return jsonify(result)
# endpoint to get user detail by id
@api.route('/user/<int:id>', methods=["GET"])
def user_detail(id):
user = User.query.get(id)
return user_schema.jsonify(user)
@api.route("/group", methods=["GET"])
def get_group():
all_groups = Group.query.all()
result = groups_schema.dump(all_groups)
return jsonify(result)
# endpoint to get group detail by id
@api.route('/group/<int:id>', methods=["GET"])
def group_detail_id(id):
group = Group.query.get(id)
return group_schema.jsonify(group)
@api.route('/group/<string:name>', methods=["GET"])
def group_detail_name(name):
group = Group.query.filter_by(name=name).first()
return group_schema.jsonify(group)
@api.route("/role", methods=["GET"])
def get_role():
all_roles = Role.query.all()
result = roles_schema.dump(all_roles)
return jsonify(result)
# endpoint to get group detail by id
@api.route('/role/<int:id>', methods=["GET"])
def role_detail_id(id):
role = Role.query.get(id)
return role_schema.jsonify(role)
@api.route('/role/<string:name>', methods=["GET"])
def role_detail_name(name):
role = Role.query.filter_by(name=name).first()
return role_schema.jsonify(role)
| 26.247059
| 80
| 0.685791
| 311
| 2,231
| 4.787781
| 0.170418
| 0.042982
| 0.069846
| 0.034251
| 0.443922
| 0.304231
| 0.28274
| 0.173271
| 0.173271
| 0.173271
| 0
| 0
| 0.160466
| 2,231
| 85
| 81
| 26.247059
| 0.794981
| 0.069027
| 0
| 0.210526
| 0
| 0
| 0.099469
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.140351
| false
| 0
| 0.070175
| 0
| 0.491228
| 0.035088
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c20d8ed82808f42c1ce9f7452c5668af8015a2b5
| 2,335
|
py
|
Python
|
setup.py
|
maljovec/samply
|
9364c2f671c02cb7bab484c0e856a0a0ca6ecc40
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
maljovec/samply
|
9364c2f671c02cb7bab484c0e856a0a0ca6ecc40
|
[
"BSD-3-Clause"
] | 2
|
2019-02-21T00:28:36.000Z
|
2019-11-09T04:35:39.000Z
|
setup.py
|
maljovec/samplers
|
9364c2f671c02cb7bab484c0e856a0a0ca6ecc40
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Setup script for samply
"""
from setuptools import setup
import re
extra_args = {}
def get_property(prop, project):
"""
Helper function for retrieving properties from a project's
__init__.py file
@In, prop, string representing the property to be retrieved
@In, project, string representing the project from which we will
retrieve the property
@Out, string, the value of the found property
"""
result = re.search(
r'{}\s*=\s*[\'"]([^\'"]*)[\'"]'.format(prop),
open(project + "/__init__.py").read(),
)
return result.group(1)
VERSION = get_property("__version__", "samply")
def long_description():
""" Reads the README.rst file and extracts the portion tagged between
specific LONG_DESCRIPTION comment lines.
"""
description = ""
recording = False
with open("README.rst") as f:
for line in f:
if "END_LONG_DESCRIPTION" in line:
return description
elif "LONG_DESCRIPTION" in line:
recording = True
continue
if recording:
description += line
# Consult here: https://packaging.python.org/tutorials/distributing-packages/
setup(
name="samply",
packages=["samply"],
version=VERSION,
description="A library for computing samplings in arbitrary dimensions",
long_description=long_description(),
author="Dan Maljovec",
author_email="maljovec002@gmail.com",
license="BSD",
test_suite="samply.tests",
url="https://github.com/maljovec/samply",
download_url="https://github.com/maljovec/samply/archive/"
+ VERSION
+ ".tar.gz",
keywords=[""],
# Consult here: https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: C++",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Mathematics",
],
setup_requires=["scipy", "numpy", "sklearn", "pyDOE", "ghalton"],
install_requires=["scipy", "numpy", "sklearn", "pyDOE", "ghalton"],
python_requires=">=2.7, <4",
)
| 29.935897
| 77
| 0.615418
| 252
| 2,335
| 5.587302
| 0.547619
| 0.06392
| 0.02983
| 0.02983
| 0.096591
| 0.096591
| 0
| 0
| 0
| 0
| 0
| 0.006282
| 0.250107
| 2,335
| 77
| 78
| 30.324675
| 0.79783
| 0.233833
| 0
| 0
| 0
| 0
| 0.360259
| 0.025354
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.04
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c20db92c5e61a54ef4ff2401b5df9360bca3d9b1
| 4,353
|
py
|
Python
|
数据结构实践课/实验3/文本格式化.py
|
TD21forever/hdu-term-project-helper
|
f42f553efd1d7b59162d3fc793ac14ae30850efd
|
[
"Apache-2.0"
] | 17
|
2021-01-09T06:49:09.000Z
|
2022-02-23T01:36:20.000Z
|
数据结构实践课/实验3/文本格式化.py
|
TD21forever/hdu-term-project-helper
|
f42f553efd1d7b59162d3fc793ac14ae30850efd
|
[
"Apache-2.0"
] | null | null | null |
数据结构实践课/实验3/文本格式化.py
|
TD21forever/hdu-term-project-helper
|
f42f553efd1d7b59162d3fc793ac14ae30850efd
|
[
"Apache-2.0"
] | 1
|
2021-06-22T12:56:16.000Z
|
2021-06-22T12:56:16.000Z
|
# -*- coding: utf-8 -*-
# @Author: TD21forever
# @Date: 2018-11-14 15:41:57
# @Last Modified by: TD21forever
# @Last Modified time: 2018-11-15 16:50:48
file = open('input.txt','r')#读取文件
#预处理 传入字符串
def preprocess(article):
article = article.strip()
article = article.replace(",", ", ")
article = article.replace(" ,", ",")
article = article.replace(".", ". ")
article = article.replace(" .", ".")
article = article.replace("?", "? ")
article = article.replace(" ?", "?")
return article
def operate(line_num=5,word_in_line=55,margin=2,heading_len=3,footing_len=3,start_page_num=1,file = file):
flag = 0
article = file.read()#读到文件里的字符串
file.close()
f = open ('out.txt','a')
article = preprocess(article)
word_list = article.split()#分割每个单词,形成列表
str_info = " ".join(word_list)#目的是去掉连续的空格
str_info = str_info.replace("@", "\n @")
start = 0
end = word_in_line
while end < len(str_info):
for i in range(heading_len):#顶部的空格
print("\n",end="",file = f)
for one in range(line_num):#每一行
line = str_info[start:end]
temp = end
if end<=len(str_info):
if str_info[temp-1] != " " or str_info[temp-1] not in word_list:#如果一行的最后一个不是空格说明那个单词被拆开了
# 另一个条件是防止出现as被分开的这种情况
while str_info[temp] != " ":#下一行的第一个字母不是空格,就把这个字母加到上一行的末尾
line = line + (str_info[temp])
temp+=1
line = line + (str_info[temp])#temp最后移到空格,空格放在上一行的末尾
end = temp+1
print(" "*margin,end="",file = f)#每一行开头的空格
print(line,file = f)#打印一页
start = end
end+=word_in_line
elif one+1 < line_num:#如果每页的行数还没有得到要求
line = str_info[start:]
print(" "*margin,end="",file=f)#每一行开头的空格
print(line,file = f)#打印一页
flag = 1
for i in range(footing_len):#底部空格
if footing_len >=3 :
if i==1:
print(" "*((word_in_line+margin)//2),str(start_page_num),end = "",file = f)
print("\n",end = "",file = f)
break
else:#如果这一页的行数已经达到了,那就另起一页
for i in range(footing_len):#底部空格
if footing_len >=3 :
if i==1:
print(" "*((word_in_line+margin)//2),str(start_page_num),end = "",file = f)
print("\n",end = "",file = f)
for i in range(heading_len):#顶部的空格
print("\n",end="",file = f)
line = str_info[start:]
print(" "*margin,end="",file = f)#每一行开头的空格
print(line,file = f)#打印一页
for i in range(footing_len):#底部空格
if footing_len >=3 :
if i==1:
print(" "*((word_in_line+margin)//2),str(start_page_num+1),end = "",file = f)
print("\n",end = "",file = f)
flag = 1
if flag == 1:
break
for i in range(footing_len):#底部空格
if footing_len >=3 :
if i==1:
print(" "*((word_in_line+margin)//2),str(start_page_num),end = "",file = f)
print("\n",end = "",file = f)
start_page_num+=1
if __name__ == '__main__':
while True:
print("欢迎使用文本格式化工具")
print("您可以给出的参数有\n1.每页内文字的行数\n2.每页内文字所占最大字符数\n3.每页文字前的固定空格数\n4.每页页顶所空行数\n5.每页页底所空行数\n6.起始页号\n")
ans = "no"
ans = input("是否要使用默认的参数5,55,2,3,3,1?,请输入yes或no:")
if ans == 'yes':
operate()
else:
print("请输入参数\n")
a = int(input("1.每页内文字的行数"))
b = int(input("2.每页内文字所占最大字符数"))
if b>80:
b = int(input("每页内文字所占最大字符数小于80,请重新输入:"))
c = int(input("3.每页文字前的固定空格数"))
d = int(input("4.每页页顶所空行数"))
e = int(input("5.每页页页底所空行数"))
ff = int(input("6.起始页号"))
operate(a,b,c,d,e,ff)
f.close()
| 34.824
| 107
| 0.464278
| 495
| 4,353
| 3.951515
| 0.260606
| 0.0409
| 0.05317
| 0.033742
| 0.4182
| 0.398773
| 0.397239
| 0.397239
| 0.385992
| 0.385992
| 0
| 0.032756
| 0.389846
| 4,353
| 124
| 108
| 35.104839
| 0.70369
| 0.087296
| 0
| 0.478723
| 0
| 0.010638
| 0.080671
| 0.037454
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021277
| false
| 0
| 0
| 0
| 0.031915
| 0.202128
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c213c3cc512fab07ba3d806bd3d3286525745450
| 389
|
py
|
Python
|
crawler/robo_proxy.py
|
xliangwu/com.caveup.machine_learn
|
793131c4767f45d468a813752c07d02f623a7b99
|
[
"Apache-2.0"
] | 1
|
2018-09-19T06:27:14.000Z
|
2018-09-19T06:27:14.000Z
|
crawler/robo_proxy.py
|
xliangwu/com.caveup.machine_learn
|
793131c4767f45d468a813752c07d02f623a7b99
|
[
"Apache-2.0"
] | null | null | null |
crawler/robo_proxy.py
|
xliangwu/com.caveup.machine_learn
|
793131c4767f45d468a813752c07d02f623a7b99
|
[
"Apache-2.0"
] | null | null | null |
import requests
def pages_crawler():
http_header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
}
url = r'https://robo.datayes.com/v2/indicator_library'
response = requests.get(url, headers=http_header)
print(response.text)
if __name__ == '__main__':
pages_crawler()
| 24.3125
| 135
| 0.676093
| 55
| 389
| 4.545455
| 0.8
| 0.096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087774
| 0.179949
| 389
| 15
| 136
| 25.933333
| 0.695925
| 0
| 0
| 0
| 0
| 0.1
| 0.44473
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2145a28b8098d26c67f49818369dff92c2ac06b
| 11,662
|
py
|
Python
|
apiosintDS/apiosintDS.py
|
davidonzo/apiosintDS
|
b5bb1c42e1a3d984a69e8794a4c5da6969dcd917
|
[
"MIT"
] | 13
|
2019-10-15T06:54:49.000Z
|
2022-03-28T23:23:29.000Z
|
apiosintDS/apiosintDS.py
|
davidonzo/apiosintDS
|
b5bb1c42e1a3d984a69e8794a4c5da6969dcd917
|
[
"MIT"
] | 1
|
2019-11-12T15:00:53.000Z
|
2019-11-14T09:37:46.000Z
|
apiosintDS/apiosintDS.py
|
davidonzo/apiosintDS
|
b5bb1c42e1a3d984a69e8794a4c5da6969dcd917
|
[
"MIT"
] | 4
|
2019-12-05T05:34:07.000Z
|
2022-03-24T09:59:26.000Z
|
import sys
import logging
import pytz
logging.basicConfig(format='%(levelname)s: %(message)s')
if (sys.version_info < (3, 0)):#NO MORE PYTHON 2!!! https://pythonclock.org/
logging.error(" ########################### ERROR ###########################")
logging.error(" =============================================================")
logging.error(" Invalid python version detected: "+str(sys.version_info[0])+"."+str(sys.version_info[1]))
logging.error(" =============================================================")
logging.error(" It seems your are still using python 2 even if you should")
logging.error(" now it will be retire next 2020.")
logging.error(" For more info please read https://pythonclock.org/")
logging.error(" =============================================================")
logging.error(" Try again typing: python3 /path/to/"+sys.argv[0])
logging.error(" =============================================================")
logging.error(" ########################### ERROR ###########################")
exit(0)
import tempfile
import argparse
import os
import requests
import re
import json
italyTZ = pytz.timezone("Europe/Rome")
from apiosintDS.modules import listutils, dosearch
try:
from urllib.parse import urlparse
except ImportError as ierror:
logging.error(ierror)
logging.error("To run this script you need to install the \"urllib\" module")
logging.error("Try typing: \"pip3 install urllib3\"")
exit(0)
try:
import validators
except ImportError as e:
logging.error(e)
logging.error("To run this script you need to install the \"validators\" module")
logging.error("Try typing: \"pip3 install validators\"")
exit(0)
import platform
if platform.system() not in ['Linux']:
logging.warning("Script not testes on "+platform.system()+" systems. Use at your own risks.")
scriptinfo = {"scriptname": "DigitalSide-API",
"majorversion": "1",
"minorversion": "8.3",
"license": "MIT",
"licenseurl": "https://raw.githubusercontent.com/davidonzo/Threat-Intel/master/LICENSE",
"author": "Davide Baglieri",
"mail": "info[at]digitalside.it",
"pgp": "30B31BDA",
"fingerprint": "0B4C F801 E8FF E9A3 A602 D2C7 9C36 93B2 30B3 1BDA",
"git": "https://github.com/davidonzo/Threat-Intel/blob/master/tools/DigitalSide-API/v1",
"DSProjectHP": "https://osint.digitalside.it",
"DSGitHubHP": "https://github.com/davidonzo/Threat-Intel"}
def checkfile(file):
if os.path.isfile(file) == False:
msg = "File not found: %r." % file
raise argparse.ArgumentTypeError(msg)
else:
lines = [line.rstrip('\n') for line in open(file)]
if len(lines) == 0:
msg2 = "File is empty or unreadable: %r." % file
raise argparse.ArgumentTypeError(msg2)
return lines
def writablefile(file):
if os.path.isfile(file) == True:
msg = "File %r already exists. Please, delete it first." % file
raise argparse.ArgumentTypeError(msg)
else:
try:
f = open(file, "w+")
f.close()
except:
msg2 = "File is empty or unreadable: %r." % file
raise argparse.ArgumentTypeError(msg2)
return file
def writablecache(tmpdir):
if os.path.isfile(tmpdir):
msg = "%r seems to be a file, not a directory." % tmpdir
raise argparse.ArgumentTypeError(msg)
elif os.path.exists(tmpdir) == False:
msg = "%r directory not found." % tmpdir
raise argparse.ArgumentTypeError(msg)
elif os.access(tmpdir, os.W_OK) == False:
msg = "%r directory not found." % tmpdir
raise argparse.ArgumentTypeError(msg)
return tmpdir
def filebspath(directory, file):
_BSR = os.path.abspath(os.path.dirname(__file__))
return os.path.join(_BSR, directory, file)
def info():
htext = scriptinfo["scriptname"]+" v."+scriptinfo["majorversion"]+"."+scriptinfo["minorversion"]+"."
htext += "\nOn demand query API for OSINT.digitalside.it project.\n"
htext += "You can query for souspicious domains, urls and IPv4.\n\n"
htext += "For more information read the README.md file and the JSON schema hosted on GitHub.com:\n"
htext += " - "+scriptinfo["git"]+"/README.md\n"
htext += " - "+scriptinfo["git"]+"/schema.json\n"
htext += "\n"
htext += "This file is part of the OSINT.digitalside.it project.\n"
htext += "For more information about the project please visit the following links:\n"
htext += " - "+scriptinfo["DSProjectHP"]+"\n"
htext += " - "+scriptinfo["DSGitHubHP"]+"\n"
htext += "\n"
htext += "This software is released under the "+scriptinfo["license"]+" license\n"
htext += " - "+scriptinfo["licenseurl"]+"\n"
htext += "\n"
htext += "Coded with love by\n "+scriptinfo["author"]+" <"+scriptinfo["mail"]+">\n"
htext += " PGP "+scriptinfo["pgp"]+"\n"
htext += " Fingerprint "+scriptinfo["fingerprint"]
htext += "\n"
return htext
def schema():
try:
schema = open(filebspath('schema', 'schema.json'), "r")
content = schema.read()
schema.close()
return content
except IOError as e:
logging.error(e)
logging.error("Unable to load schema file.")
exit(1)
def request(entities=list, cache=False, cachedirectory=None, clearcache=False, verbose=False, *args, **kwargs):
if isinstance(entities, list):
if clearcache and ((not cache) or (cache == False)):
logging.error("Unable to clear cache with cache disabled. Please set the cache to 'True'")
exit(1)
if cachedirectory and ((not cache) or (cache == False)):
logging.error("Unable to use a cache directory with the cache option disabled. Please set the cache to 'True'")
exit(1)
if cache and not cachedirectory:
logging.error("When using apiosintDS as python library, you always have to specify the temporary files directory to be used.")
exit(1)
if cache:
try:
writablecache(cachedirectory)
except Exception as clearcacheerror:
logging.error(clearcacheerror)
exit(1)
lutils = listutils.listutils(None, entities, cache, cachedirectory, clearcache)
makelist = lutils.prepareLists()
if isinstance(makelist, dict):
serarch = dosearch.dosearch(makelist, verbose)
results = serarch.prepareResults()
if isinstance(results, dict):
return results
else:
logging.error("create_request must return a dict.")
else:
logging.error("create_request must return a dict.")
else:
logging.error("entities must be an instance of list.")
exit(1)
def main():
parserdescription = scriptinfo["scriptname"]+" v."+scriptinfo["majorversion"]+"."+scriptinfo["minorversion"]+"."
parserdescription +=" On demand query API for OSINT.digitalside.it project."
parserdescription +=" You can query for souspicious domains, urls and IPv4."
parser = argparse.ArgumentParser(description=parserdescription)
parser.add_argument("-e","--entity", type=str, action="store", metavar="[IPv4|domain|url|hash]", dest="ITEM", help="Single item to search. Supported entities are IPv4/FQDN/URLs and file hashes in md5, sha1 or sha256. It can't be used in combination with the --file option.", default=None)
parser.add_argument("-f","--file", type=checkfile, action="store", metavar="/path/to/file.txt", dest="FILE", help="Path to file containing entities to search. Supported entities are IPv4/FQDN/URLs. It can't be used in combination with the --entity option.", default=None)
parser.add_argument("-o", "--output", type=writablefile, action="store", metavar="/path/to/output.json", dest="OUTPUT", help="Path to output file (/path/to/output.json). If not specified the output will be redirect to the STDOUT.", default=None)
parser.add_argument("-v", "--verbose", action="store_true", dest="VERBOSE", help="Include unmatched results in report.")
parser.add_argument("-c","--cache", action="store_true", dest="CACHE", help="Enable cache mode. Downloaded lists will be stored a won't be downloaded for the next 4 hours.")
parser.add_argument("-cd","--cachedirectory", type=writablecache, action="store", metavar="/path/to/cachedir", dest="DIRECTORY", help="The cache directory where the script check for cached lists files and where them will be stored on cache creation or update. Must be specified the same every script run unless your are using the system temp directory. Default is '"+tempfile.gettempdir()+"'", default=tempfile.gettempdir())
parser.add_argument("-cc","--clearcache", action="store_true", dest="CLEARCACHE", help="Force the script to download updated lists even if the 3 hours timeout has not yet been reached. Must be used in combination with --cache.")
parser.add_argument("-i","--info", action="store_true", dest="INFO", help="Print information about the program.")
parser.add_argument("-s","--schema", action="store_true", dest="SCHEMA", help="Display the response json schema.")
try:
args = parser.parse_args()
if (args.INFO):
sys.stdout.write(info())
exit(1)
if (args.SCHEMA):
try:
schema = open(filebspath('schema', 'schema.json'), "r")
for schemaline in schema.readlines():
sys.stdout.write(schemaline)
schema.close()
exit(0)
except IOError as e:
logging.error(e)
logging.error("Unable to load schema file.")
exit(1)
if (args.ITEM == None) and (args.FILE == None):
parser.error("No targets selected! Please, specify one option between --entity and --file.\nTry option -h or --help.")
exit(1)
elif (args.ITEM != None) and (args.FILE != None):
parser.error("Too much targets selected! Sorry, you can't specify both options --entity and --file.\nTry option -h or --help.")
exit(1)
elif args.CLEARCACHE and not args.CACHE:
args.CLEARCACHE = False
logging.warning("Expected -c or --cache option declared. Ignoring all cache settings.\nTry option -h or --help.")
lutils = listutils.listutils(args.ITEM, args.FILE, args.CACHE, args.DIRECTORY, args.CLEARCACHE)
makelist = lutils.prepareLists()
if isinstance(makelist, dict):
serarch = dosearch.dosearch(makelist, args.VERBOSE)
results = serarch.prepareResults()
if isinstance(results, dict):
output = json.dumps(results, indent=4, separators=(",", ": "))
if args.OUTPUT == None:
sys.stdout.write(output)
else:
fileoutput = open(args.OUTPUT, "w+")
fileoutput.write(output)
fileoutput.close()
logging.info("Output saved in file: "+args.OUTPUT)
else:
logging.error("'results' is not an dict. Quit!")
else:
logging.error("'makelist' is not an dict. Quit!")
except argparse.ArgumentError as e:
logging.error(e)
parser.error("Unexpected Error.\nTry option -h or --help.")
exit(2)
if __name__ == '__main__':
main()
| 51.149123
| 428
| 0.607271
| 1,384
| 11,662
| 5.091763
| 0.257948
| 0.052788
| 0.021711
| 0.023414
| 0.361288
| 0.315453
| 0.270044
| 0.231304
| 0.191571
| 0.147013
| 0
| 0.008854
| 0.234865
| 11,662
| 227
| 429
| 51.374449
| 0.780903
| 0.003773
| 0
| 0.334884
| 0
| 0.046512
| 0.382371
| 0.036068
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037209
| false
| 0
| 0.069767
| 0
| 0.139535
| 0.009302
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2160b83bdfd16bb5fd59f1cfbfcbb7c7d36395f
| 3,327
|
py
|
Python
|
5-3_stock inventory.py
|
hkrsmk/python
|
1ee1b0adc911b62af3911428f441c6c59e1b345f
|
[
"Unlicense"
] | null | null | null |
5-3_stock inventory.py
|
hkrsmk/python
|
1ee1b0adc911b62af3911428f441c6c59e1b345f
|
[
"Unlicense"
] | null | null | null |
5-3_stock inventory.py
|
hkrsmk/python
|
1ee1b0adc911b62af3911428f441c6c59e1b345f
|
[
"Unlicense"
] | null | null | null |
#Stock inventory control system.
def menu():
print("""1. Add New Stock
2. Update existing stock
3. Sell stock, even though 2 will work too
8. Display Inventory
9. Exit""")
while True:
try:
choice = int(input("Please select an option"))
break
except:
print("Invalid choice, please try again")
return choice
#======================================= 1 ===========================
def newStock():
newstock = input("Enter new stock name")
if newstock in myStock:
print("Stock already there")
else:
myStock[newstock]=0
print("new stock", newstock.center(10, ' '), "added")
#======================================= 2 ===========================
def addVolume():
stock_bought = input("Enter stock name you're buying")
if stock_bought not in myStock:
print("Stock ain't there. add first")
else:
while True:
try:
qty = int(input("How many? positive for buy. negative for sell"))
myStock[stock_bought] += qty
print(stock_bought, "is now", myStock[stock_bought])
break
except:
print("Invalid quantity!")
#======================================= 3 ============================
def sell():
selling = input("Stock name you're selling?")
if selling not in myStock:
print("You don't have this?")
elif myStock[selling]<=0:
print(selling.center(10, ' '), "outta stock")
else:
while True:
try:
qty = int(input("how many sold?"))
if myStock[selling] < qty:
print("u selling > you have, not allowed!")
raise "Error"
myStock[selling] -= qty
print(selling, "is now", myStock[selling])
break
except:
print("Invalid qty")
#main prog below
choice = 0
myStock = {}
#empty dictionary for myStock
try:
infile = open("myStock.txt","r")
read1LineStock = infile.readline()
#read first line
while read1LineStock !=" ":
#while the file has not ended,
myStock[read1LineStock.split(",")[0]] = int(read1LineStock.split(",")[1])
read1LineStock = infile.readline()
print(myStock)
#place item 0 in the split up sentence as the name for the item for myStock,
#and whatever number you can find in item 1 of the split up sentence (ignore '\n')
#as the 'quantity' for myStock.
#eg myStock['apple'] = '1'
#then, read the next line.
infile.close()
except:
print("Welcome to the stock management system!")
while choice != 9:
choice = menu()
#rmb to return choice to the global choice.
#the choice inside menu() is a LOCAL choice.
if choice ==1:
newStock()
elif choice ==2:
addVolume()
elif choice ==3:
sell()
#======================================= 8 ===========================
elif choice ==8:
print(myStock)
#======================================= 9 ===========================
print("Have a noice day")
| 30.522936
| 87
| 0.479411
| 346
| 3,327
| 4.595376
| 0.369942
| 0.034591
| 0.022642
| 0.043396
| 0.042767
| 0.042767
| 0.042767
| 0.042767
| 0.042767
| 0
| 0
| 0.014739
| 0.327021
| 3,327
| 108
| 88
| 30.805556
| 0.6954
| 0.245867
| 0
| 0.283784
| 0
| 0
| 0.229412
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0
| 0
| 0.067568
| 0.216216
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c21a8492971d5deb4f24b54f0d01b958dad6c817
| 1,780
|
py
|
Python
|
2017/day23.py
|
andypymont/adventofcode
|
912aa48fc5b31ec9202fb9654380991fc62afcd1
|
[
"MIT"
] | null | null | null |
2017/day23.py
|
andypymont/adventofcode
|
912aa48fc5b31ec9202fb9654380991fc62afcd1
|
[
"MIT"
] | null | null | null |
2017/day23.py
|
andypymont/adventofcode
|
912aa48fc5b31ec9202fb9654380991fc62afcd1
|
[
"MIT"
] | null | null | null |
"""
2017 Day 23
https://adventofcode.com/2017/day/23
"""
from typing import Dict
import aocd # type: ignore
class Program:
def __init__(self, text: str):
self.registers: Dict[str, int] = {}
self.commands = text.split("\n")
self.position = 0
self.mul_count = 0
def get(self, key: str) -> int:
try:
return int(key)
except ValueError:
return self.registers.get(key, 0)
def run_command(self, pos: int) -> None:
command = self.commands[pos]
instruction, *args = command.split(" ")
if instruction == "set":
self.registers[args[0]] = self.get(args[1])
elif instruction == "sub":
self.registers[args[0]] = self.get(args[0]) - self.get(args[1])
elif instruction == "mul":
self.registers[args[0]] = self.get(args[0]) * self.get(args[1])
self.mul_count += 1
elif instruction == "jnz":
if self.get(args[0]) != 0:
self.position += self.get(args[1]) - 1
def run(self) -> None:
while self.position < len(self.commands):
self.run_command(self.position)
self.position += 1
def prime(number: int) -> bool:
for factor in range(2, (number // 2) + 1):
if number % factor == 0:
return False
return True
def run_program() -> int:
return sum(1 for b in range(107900, 124901, 17) if not prime(b))
def main() -> None:
"""
Calculate and output the solutions based on the real puzzle input.
"""
data = aocd.get_data(year=2017, day=23)
program = Program(data)
program.run()
print(f"Part 1: {program.mul_count}")
print(f"Part 2: {run_program()}")
if __name__ == "__main__":
main()
| 25.797101
| 75
| 0.561798
| 236
| 1,780
| 4.152542
| 0.347458
| 0.035714
| 0.078571
| 0.061224
| 0.146939
| 0.146939
| 0.146939
| 0.133673
| 0.085714
| 0.085714
| 0
| 0.044444
| 0.292135
| 1,780
| 68
| 76
| 26.176471
| 0.733333
| 0.072472
| 0
| 0
| 0
| 0
| 0.04484
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155556
| false
| 0
| 0.044444
| 0.022222
| 0.333333
| 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c21ace7559f52cf54fe988e11522102469f04048
| 1,641
|
py
|
Python
|
src/simulator/wsn/test.py
|
liuliuliu0605/Federated-Learning-PyTorch
|
04169455917ae50a8fea2dabd756a0ca1774e5d5
|
[
"MIT"
] | null | null | null |
src/simulator/wsn/test.py
|
liuliuliu0605/Federated-Learning-PyTorch
|
04169455917ae50a8fea2dabd756a0ca1774e5d5
|
[
"MIT"
] | null | null | null |
src/simulator/wsn/test.py
|
liuliuliu0605/Federated-Learning-PyTorch
|
04169455917ae50a8fea2dabd756a0ca1774e5d5
|
[
"MIT"
] | null | null | null |
import sys
from sklearn.datasets import make_blobs
from src.simulator.wsn.network import Network
from src.simulator.wsn.utils import *
from src.simulator.wsn.fcm import *
from src.simulator.wsn.direct_communication import *
from src.utils import complete, star
seed = 1
np.random.seed(seed )
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
traces = {}
topo = complete(cf.NB_CLUSTERS)
# topo = independent(cf.NB_CLUSTERS)
# topo = star(cf.NB_CLUSTERS)
# topo = ring(cf.NB_CLUSTERS)
centers = [[50, 225], [25, 110], [125, 20], [220, 80], [200, 225]]
X, y = make_blobs(n_samples=100, centers=centers, n_features=2,
random_state=seed, cluster_std=15)
traces = {}
network = Network(init_nodes=X, topo=topo)
# network = Network(topo=topo)
for routing_topology in ['FCM']:#, 'DC']:
network.reset()
routing_protocol_class = eval(routing_topology)
network.init_routing_protocol(routing_protocol_class())
# traces[routing_topology] = network.simulate()
for i in range(1000):
print("--------Round %d--------"% i)
network.activate_mix()
traces[routing_topology] = network.simulate_one_round()
network.deactivate_mix()
if len(network.get_alive_nodes()) == 0 :
break
# plot_clusters(network)
# plot_time_of_death(network)
# print(network.energy_dis)
# print(network.energy_dis['inter-comm']/ network.energy_dis['intra-comm'])
print("All death round: ", i)
print("First death round: ", network.first_depletion)
print("Energy:", network.energy_dis)
plot_traces(traces)
| 32.176471
| 80
| 0.672151
| 215
| 1,641
| 4.953488
| 0.44186
| 0.032864
| 0.060094
| 0.071362
| 0.114554
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028636
| 0.191347
| 1,641
| 51
| 81
| 32.176471
| 0.773926
| 0.197441
| 0
| 0.0625
| 0
| 0
| 0.055688
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.21875
| 0
| 0.21875
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c21c3b472b61858775a3801d8a7ee0aff0f5536a
| 4,149
|
py
|
Python
|
src/dewloosh/geom/cell.py
|
dewloosh/dewloosh-geom
|
5c97fbab4b68f4748bf4309184b9e0e877f94cd6
|
[
"MIT"
] | 2
|
2021-12-11T17:25:51.000Z
|
2022-01-06T15:36:27.000Z
|
src/dewloosh/geom/cell.py
|
dewloosh/dewloosh-geom
|
5c97fbab4b68f4748bf4309184b9e0e877f94cd6
|
[
"MIT"
] | null | null | null |
src/dewloosh/geom/cell.py
|
dewloosh/dewloosh-geom
|
5c97fbab4b68f4748bf4309184b9e0e877f94cd6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numpy as np
from numpy import ndarray
from dewloosh.math.array import atleast1d
from dewloosh.math.utils import to_range
from .celldata import CellData
from .utils import jacobian_matrix_bulk, points_of_cells, pcoords_to_coords_1d
class PolyCell(CellData):
NNODE = None
NDIM = None
def __init__(self, *args, topo: ndarray=None, i: ndarray=None, **kwargs):
if isinstance(topo, ndarray):
kwargs['nodes'] = topo
if isinstance(i, ndarray):
kwargs['id'] = i
super().__init__(*args, **kwargs)
def jacobian_matrix(self, *args, dshp=None, ecoords=None, topo=None, **kwargs):
ecoords = self.local_coordinates(topo=topo) if ecoords is None else ecoords
return jacobian_matrix_bulk(dshp, ecoords)
def jacobian(self, *args, jac=None, **kwargs):
return np.linalg.det(jac)
def points_of_cells(self, *args, target=None, **kwargs):
assert target is None
topo = kwargs.get('topo', self.nodes.to_numpy())
coords = kwargs.get('coords', self.pointdata.x.to_numpy())
return points_of_cells(coords, topo)
def local_coordinates(self, *args, **kwargs):
frames = kwargs.get('frames', self.frames.to_numpy())
topo = kwargs.get('_topo', self.nodes.to_numpy())
coords = self.pointdata.x.to_numpy()
return points_of_cells(coords, topo, local_axes=frames)
def coords(self, *args, **kwargs):
return self.points_of_cells(*args, **kwargs)
class PolyCell1d(PolyCell):
NDIM = 1
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# NOTE The functionality of `pcoords_to_coords_1d` needs to be generalized
# for higher order cells.
def points_of_cells(self, *args, points=None, cells=None, target='global',
rng=None, flatten=False, **kwargs):
if isinstance(target, str):
assert target.lower() in ['global', 'g']
else:
raise NotImplementedError
topo = kwargs.get('topo', self.nodes.to_numpy())
coords = kwargs.get('coords', self.pointdata.x.to_numpy())
ecoords = points_of_cells(coords, topo)
if points is None and cells is None:
return ecoords
# points or cells is not None
if cells is not None:
cells = atleast1d(cells)
conds = np.isin(cells, self.id.to_numpy())
cells = atleast1d(cells[conds])
if len(cells) == 0:
return {}
ecoords = ecoords[cells]
topo = topo[cells]
else:
cells = np.s_[:]
if points is None:
points = np.array(self.lcoords()).flatten()
rng = [-1, 1]
else:
rng = np.array([0, 1]) if rng is None else np.array(rng)
points, rng = to_range(points, source=rng, target=[0, 1]).flatten(), [0, 1]
datacoords = pcoords_to_coords_1d(points, ecoords) # (nE * nP, nD)
if not flatten:
nE = ecoords.shape[0]
nP = points.shape[0]
datacoords = datacoords.reshape(nE, nP, datacoords.shape[-1]) # (nE, nP, nD)
# values : (nE, nP, nDOF, nRHS) or (nE, nP * nDOF, nRHS)
if isinstance(cells, slice):
# results are requested on all elements
data = datacoords
elif isinstance(cells, Iterable):
data = {c : datacoords[i] for i, c in enumerate(cells)}
else:
raise TypeError("Invalid data type <> for cells.".format(type(cells)))
return data
class PolyCell2d(PolyCell):
NDIM = 2
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class PolyCell3d(PolyCell):
NDIM = 3
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| 33.192
| 89
| 0.577489
| 499
| 4,149
| 4.651303
| 0.248497
| 0.034468
| 0.039207
| 0.025851
| 0.19776
| 0.18785
| 0.167169
| 0.167169
| 0.167169
| 0.101249
| 0
| 0.008678
| 0.305616
| 4,149
| 125
| 90
| 33.192
| 0.796946
| 0.065317
| 0
| 0.172414
| 0
| 0
| 0.021194
| 0
| 0
| 0
| 0
| 0
| 0.022989
| 1
| 0.114943
| false
| 0
| 0.103448
| 0.022989
| 0.413793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c22246e42a11a496e2843439e4ad4abd332a1d57
| 968
|
py
|
Python
|
softlearning/environments/mujoco_safety_gym/envs/fetch/slide.py
|
anyboby/mbpo
|
98b75cb4cb13a2640fce1fbe1ddef466b864342e
|
[
"MIT"
] | 5
|
2020-02-12T17:09:09.000Z
|
2021-09-29T16:06:40.000Z
|
softlearning/environments/mujoco_safety_gym/envs/fetch/slide.py
|
anyboby/mbpo
|
98b75cb4cb13a2640fce1fbe1ddef466b864342e
|
[
"MIT"
] | 10
|
2020-08-31T02:50:02.000Z
|
2022-02-09T23:36:43.000Z
|
softlearning/environments/mujoco_safety_gym/envs/fetch/slide.py
|
anyboby/mbpo
|
98b75cb4cb13a2640fce1fbe1ddef466b864342e
|
[
"MIT"
] | 2
|
2022-03-15T01:45:26.000Z
|
2022-03-15T06:46:47.000Z
|
import os
import numpy as np
from gym import utils
from mujoco_safety_gym.envs import fetch_env
# Ensure we get the path separator correct on windows
MODEL_XML_PATH = os.path.join('fetch', 'slide.xml')
class FetchSlideEnv(fetch_env.FetchEnvNew, utils.EzPickle):
def __init__(self, reward_type='sparse'):
initial_qpos = {
'robot0:slide0': 0.05,
'robot0:slide1': 0.48,
'robot0:slide2': 0.0,
'object0:joint': [1.7, 1.1, 0.41, 1., 0., 0., 0.],
}
fetch_env.FetchEnvNew.__init__(
self, MODEL_XML_PATH, has_object=True, block_gripper=True, n_substeps=20,
gripper_extra_height=-0.02, target_in_the_air=False, target_offset=np.array([0.4, 0.0, 0.0]),
obj_range=0.1, target_range=0.3, distance_threshold=0.05, additional_objects=False,
number_of_objects = 0, initial_qpos=initial_qpos, reward_type=reward_type)
utils.EzPickle.__init__(self)
| 37.230769
| 105
| 0.66219
| 143
| 968
| 4.188811
| 0.538462
| 0.020033
| 0.015025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059367
| 0.216942
| 968
| 25
| 106
| 38.72
| 0.730871
| 0.052686
| 0
| 0
| 0
| 0
| 0.078689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.210526
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c224e7c1cff16812960fb4cd9afab8ab99e06afc
| 2,227
|
py
|
Python
|
index_to_csv.py
|
grenzi/photoindexer
|
d10b3b6f347168706dc9c2673a29102fd73f31e1
|
[
"Apache-2.0"
] | null | null | null |
index_to_csv.py
|
grenzi/photoindexer
|
d10b3b6f347168706dc9c2673a29102fd73f31e1
|
[
"Apache-2.0"
] | null | null | null |
index_to_csv.py
|
grenzi/photoindexer
|
d10b3b6f347168706dc9c2673a29102fd73f31e1
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
from enum import Enum
from datetime import datetime,date
import logging
import pathlib
from tqdm import tqdm
from datastructures import Volume, IndexedFile,load_index_if_exists, save_index
from os import listdir
from os.path import isfile, join
import itertools
import csv
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
###############################################################################
index_dir = os.path.join(os.getcwd(), 'index')
logger.info('finding index files')
indexfiles = list([f for f in listdir(index_dir) if isfile(join(index_dir, f)) and f[-4:]=='json'])
columns = ['VolumeName', 'VolumeSerialNumber', 'Directory', 'Name', 'InodeNumber', 'Modified On', 'Created On', 'SHA256']
exif_columns=set()
logger.info('parsing index files')
#Pass 1 = collect keys
for index_file in indexfiles:
index = load_index_if_exists(os.path.join(index_dir, index_file))
for vol in index:
for ixf in vol.files:
if ixf.EXIF is not None:
for i in ixf.EXIF.keys():
exif_columns.add(i)
logger.info('writing csv')
#Pass 2 = write header
with open(os.path.join(os.getcwd(), 'index.csv'), mode='w', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
writer.writerow(columns+list(exif_columns))
#and now rows
for index_file in indexfiles:
index = load_index_if_exists(os.path.join(index_dir, index_file))
for vol in index:
for ixf in vol.files:
row = [
vol.VolumeName,
vol.VolumeSerialNumber,
ixf.Directory,
ixf.Name,
ixf.st_ino,
ixf.st_mtime.strftime("%c"),
ixf.st_ctime.strftime("%c"),
ixf.SHA256
]
for ec in exif_columns:
row.append(ixf.EXIF.get(ec, None))
writer.writerow(row)
| 35.349206
| 122
| 0.58599
| 273
| 2,227
| 4.684982
| 0.373626
| 0.023456
| 0.031274
| 0.039875
| 0.195465
| 0.195465
| 0.1595
| 0.1595
| 0.1595
| 0.1595
| 0
| 0.00802
| 0.272115
| 2,227
| 63
| 123
| 35.349206
| 0.780999
| 0.024248
| 0
| 0.150943
| 0
| 0
| 0.10197
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.226415
| 0
| 0.226415
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2253045dcaa56a5991a62320574be6662b1c519
| 1,056
|
py
|
Python
|
tests/test_wrapper.py
|
waysup/Jike-Metro
|
b8ead80dddd5d695784c5587edfd8df87c55a4e6
|
[
"MIT"
] | 193
|
2018-04-04T02:27:51.000Z
|
2022-03-14T03:26:44.000Z
|
tests/test_wrapper.py
|
BeiFenKu/Jike-Metro
|
e97fd0a751dca28a39d0e9fb94fbd696d5ee07b3
|
[
"MIT"
] | 16
|
2018-04-04T05:58:15.000Z
|
2021-01-08T02:56:57.000Z
|
tests/test_wrapper.py
|
BeiFenKu/Jike-Metro
|
e97fd0a751dca28a39d0e9fb94fbd696d5ee07b3
|
[
"MIT"
] | 24
|
2018-04-06T09:34:58.000Z
|
2021-03-02T02:10:07.000Z
|
import unittest
from collections import namedtuple
from jike.objects.wrapper import *
class TestWrapper(unittest.TestCase):
def setUp(self):
self.Test = namedtuple('Test', ['id', 'content', 'other', 'none'])
def test_repr_namedtuple(self):
self.Test.__repr__ = repr_namedtuple
test = self.Test(**{'id': 'a', 'content': 'b', 'other': 'c', 'none': None})
self.assertEqual(repr(test), 'Test(id=a, content=b)')
def test_str_namedtuple(self):
self.Test.__str__ = str_namedtuple
test = self.Test(**{'id': 'a', 'content': 'b', 'other': 'c', 'none': None})
self.assertEqual(str(test), 'Test(id=a, content=b, other=c)')
def test_namedtuple_with_defaults(self):
Test = namedtuple_with_defaults(self.Test)
test = Test(**{'id': 'a', 'content': 'b', 'other': 'c'})
self.assertEqual(test.id, 'a')
self.assertEqual(test.content, 'b')
self.assertEqual(test.other, 'c')
self.assertIsNone(test.none)
if __name__ == '__main__':
unittest.main()
| 34.064516
| 83
| 0.61553
| 132
| 1,056
| 4.719697
| 0.234848
| 0.089888
| 0.067416
| 0.11236
| 0.41252
| 0.41252
| 0.279294
| 0.279294
| 0.199037
| 0.199037
| 0
| 0
| 0.202652
| 1,056
| 30
| 84
| 35.2
| 0.739905
| 0
| 0
| 0.086957
| 0
| 0
| 0.135417
| 0
| 0
| 0
| 0
| 0
| 0.26087
| 1
| 0.173913
| false
| 0
| 0.130435
| 0
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c225d7cd38555d8a71f34fd96c413aa41e8e84be
| 10,125
|
py
|
Python
|
storm_control/hal4000/illumination/illuminationChannelUI.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 47
|
2015-02-11T16:05:54.000Z
|
2022-03-26T14:13:12.000Z
|
storm_control/hal4000/illumination/illuminationChannelUI.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 110
|
2015-01-30T03:53:41.000Z
|
2021-11-03T15:58:44.000Z
|
storm_control/hal4000/illumination/illuminationChannelUI.py
|
shiwei23/STORM6
|
669067503ebd164b575ce529fcc4a9a3f576b3d7
|
[
"MIT"
] | 61
|
2015-01-09T18:31:27.000Z
|
2021-12-21T13:07:51.000Z
|
#!/usr/bin/env python
"""
The various ChannelUI classes.
Hazen 04/17
"""
import os
from PyQt5 import QtCore, QtWidgets
def loadStyleSheet(name):
text = ""
with open(os.path.join(os.path.dirname(__file__), name)) as fp:
text += fp.read()
return text
class ChannelUI(QtWidgets.QFrame):
"""
A QWidget for displaying the UI elements associated with
an illumination channel.
"""
onOffChange = QtCore.pyqtSignal(object)
powerChange = QtCore.pyqtSignal(int)
def __init__(self, name = "", color = None, **kwds):
super().__init__(**kwds)
self.enabled = True
# FIXME: These styles could be better..
self.disabled_style = loadStyleSheet("disabled_style.qss")
self.enabled_style = "QFrame { background-color: rgb(" + color + ");}\n"
self.enabled_style += loadStyleSheet("enabled_style.qss")
self.setFixedWidth(50)
self.setLineWidth(2)
self.setStyleSheet(self.enabled_style)
self.main_layout = QtWidgets.QVBoxLayout(self)
self.main_layout.setContentsMargins(0,0,0,0)
self.main_layout.setSpacing(1)
# Text label.
self.wavelength_label = QtWidgets.QLabel(self)
self.wavelength_label.setText(name)
self.wavelength_label.setAlignment(QtCore.Qt.AlignCenter)
self.main_layout.addWidget(self.wavelength_label)
# Container for the power slider (if any).
self.slider_widget = QtWidgets.QWidget(self)
#
# FIXME: This is a mistake if none of the channels have a power
# slider.
#
self.slider_widget.setMinimumHeight(150)
self.slider_layout = QtWidgets.QVBoxLayout(self.slider_widget)
self.slider_layout.setContentsMargins(0,0,0,0)
self.slider_layout.setSpacing(1)
self.main_layout.addWidget(self.slider_widget)
# Power on/off radio button.
self.on_off_button = QtWidgets.QRadioButton(self)
self.main_layout.addWidget(self.on_off_button)
self.main_layout.setAlignment(self.on_off_button, QtCore.Qt.AlignCenter)
# Spacer at the bottom.
self.spacer_item = QtWidgets.QSpacerItem(1, 1,
QtWidgets.QSizePolicy.Minimum,
QtWidgets.QSizePolicy.Expanding)
self.main_layout.addItem(self.spacer_item)
# Connect signals
self.on_off_button.clicked.connect(self.handleOnOffChange)
def disableChannel(self):
"""
Disables all the UI elements of the channel.
"""
self.setOnOff(False)
self.setStyleSheet(self.disabled_style)
self.setFrameShadow(QtWidgets.QFrame.Sunken)
self.on_off_button.setCheckable(False)
self.enabled = False
def enableChannel(self, was_on = False):
"""
Enables all the UI elements of the channel.
"""
self.setStyleSheet(self.enabled_style)
self.setFrameShadow(QtWidgets.QFrame.Raised)
self.on_off_button.setCheckable(True)
self.setOnOff(was_on)
self.enabled = True
def getAmplitude(self):
if self.on_off_button.isChecked():
return 1.0
else:
return 0.0
def handleOnOffChange(self, on_off):
"""
Called when the on/off radio button is pressed.
"""
self.onOffChange.emit(on_off)
def isEnabled(self):
return self.enabled
def isOn(self):
return self.on_off_button.isChecked()
def newSettings(self, on, power):
self.setOnOff(on)
def remoteIncPower(self, power_inc):
pass
def remoteSetPower(self, new_power):
if self.enabled:
if (new_power > 0.5):
self.setOnOff(True)
else:
self.setOnOff(False)
def setOnOff(self, state):
if (state != self.on_off_button.isChecked()):
self.on_off_button.setChecked(state)
self.handleOnOffChange(state)
def setupButtons(self, button_data):
pass
def startFilm(self):
self.on_off_button.setEnabled(False)
def stopFilm(self):
self.on_off_button.setEnabled(True)
class ChannelUIAdjustable(ChannelUI):
"""
A QWidget for displaying the UI elements associated with
an adjustable illumination channel.
"""
def __init__(self, **kwds):
super().__init__(**kwds)
self.buttons = []
self.max_amplitude = 1
self.min_amplitude = 0
# Current power label.
self.power_label = QtWidgets.QLabel(self.slider_widget)
self.power_label.setAlignment(QtCore.Qt.AlignCenter)
self.slider_layout.addWidget(self.power_label)
# Slider for controlling the power.
self.powerslider = QtWidgets.QSlider(self.slider_widget)
self.powerslider.setOrientation(QtCore.Qt.Vertical)
self.powerslider.setSizePolicy(QtWidgets.QSizePolicy.Preferred,
QtWidgets.QSizePolicy.Expanding)
self.slider_layout.addWidget(self.powerslider)
# FIXME: If I knew what I was doing I should be able to do this
# using the stylesheet?
self.powerslider.setFixedWidth(25)
self.slider_layout.setAlignment(self.powerslider, QtCore.Qt.AlignHCenter)
def configureSlider(self, minimum, maximum):
"""
This is called once we have obtained amplitude functionality
that backs the slider. The functionality sets the range
for the slider.
"""
self.max_amplitude = maximum
self.min_amplitude = minimum
self.powerslider.setMaximum(maximum)
self.powerslider.setMinimum(minimum)
page_step = 0.1 * (maximum - minimum)
if (page_step > 1.0):
self.powerslider.setPageStep(page_step)
self.powerslider.setSingleStep(1)
#
# Why 2? We need the initial value to be a number that is not
# the default power, otherwise the slider text won't get updated
# at start-up.
#
self.setAmplitude(2)
self.powerslider.valueChanged.connect(self.handleAmplitudeChange)
def disableChannel(self):
super().disableChannel()
self.powerslider.setEnabled(False)
for button in self.buttons:
button.setEnabled(False)
def enableChannel(self, was_on = False):
super().enableChannel(was_on)
self.powerslider.setEnabled(True)
for button in self.buttons:
button.setEnabled(True)
def getAmplitude(self):
return self.powerslider.value()
def handleAmplitudeChange(self, amplitude):
self.powerChange.emit(amplitude)
def newSettings(self, on, power):
self.setOnOff(on)
self.setAmplitude(power)
def remoteIncPower(self, power_inc):
if self.enabled:
self.setAmplitude(self.powerslider.value() + power_inc)
def remoteSetPower(self, new_power):
if self.enabled:
self.setAmplitude(new_power)
def setAmplitude(self, amplitude):
if (amplitude != self.powerslider.value()):
self.powerslider.setValue(amplitude)
def setupButtons(self, button_data):
# Remove spacer at the end.
self.main_layout.removeItem(self.spacer_item)
# Make sure we have enough buttons.
while (len(self.buttons) < len(button_data)):
new_button = PowerButton(parent = self)
new_button.powerChange.connect(self.setAmplitude)
self.layout().addWidget(new_button)
self.buttons.append(new_button)
#self.cur_y += 22
# Hide all the buttons.
for button in self.buttons:
button.hide()
# Set text and value of the buttons we'll use & show them.
amp_range = float(self.max_amplitude - self.min_amplitude)
for i in range(len(button_data)):
self.buttons[i].setText(button_data[i][0])
self.buttons[i].setValue(int(round(button_data[i][1] * amp_range + self.min_amplitude)))
self.buttons[i].show()
# Add spacer again.
self.main_layout.addItem(self.spacer_item)
# Resize based on number of visible buttons.
#self.setFixedSize(48, 248 + 22 * len(button_data))
def updatePowerText(self, new_text):
self.power_label.setText(new_text)
class PowerButton(QtWidgets.QPushButton):
"""
A push button specialized for amplitude / power control.
"""
powerChange = QtCore.pyqtSignal(int)
def __init__(self, **kwds):
super().__init__(**kwds)
self.value = 0.0
self.clicked.connect(self.handleClicked)
def handleClicked(self, boolean):
self.powerChange.emit(self.value)
def setValue(self, value):
self.value = value
#
# The MIT License
#
# Copyright (c) 2017 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| 32.041139
| 100
| 0.645136
| 1,189
| 10,125
| 5.381833
| 0.280067
| 0.03985
| 0.018284
| 0.028129
| 0.231755
| 0.15268
| 0.115799
| 0.060947
| 0.015627
| 0.015627
| 0
| 0.007661
| 0.265185
| 10,125
| 315
| 101
| 32.142857
| 0.852419
| 0.240494
| 0
| 0.263804
| 0
| 0
| 0.009503
| 0
| 0
| 0
| 0
| 0.006349
| 0
| 1
| 0.184049
| false
| 0.01227
| 0.01227
| 0.018405
| 0.269939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c22ad6cee4570624757675e6c7ad19a18a8249f2
| 5,028
|
py
|
Python
|
DataProcess/ultimate_subimage.py
|
EmmaAlexander/possum-tools
|
051ebca682cd97b68fa2a89c9d67e99cf85b09c7
|
[
"MIT"
] | 5
|
2021-11-18T13:27:30.000Z
|
2021-12-05T00:15:33.000Z
|
DataProcess/ultimate_subimage.py
|
EmmaAlexander/possum-tools
|
051ebca682cd97b68fa2a89c9d67e99cf85b09c7
|
[
"MIT"
] | null | null | null |
DataProcess/ultimate_subimage.py
|
EmmaAlexander/possum-tools
|
051ebca682cd97b68fa2a89c9d67e99cf85b09c7
|
[
"MIT"
] | null | null | null |
#CASA script to create cutouts of fits cubes
directoryA = '/Volumes/TARDIS/Work/askap/'
directoryB = '/Volumes/NARNIA/pilot_cutouts/'
import numpy as np
sources=np.loadtxt('/Users/emma/GitHub/possum-tools/DataProcess/pilot_sources.txt',dtype='str')
for i in range(0,sources.shape[0]):
objectname=sources[i,0]
POSSUMSB=sources[i,3]
EMUSB=sources[i,4]
ra=sources[i,1]
dec=sources[i,2]
sourcecentre=ra+','+dec
fov=sources[i,6]#arcsec
print(objectname)
region='centerbox[['+sourcecentre+'], ['+fov+'arcsec, '+fov+'arcsec]]'
possum_outfile=directoryB+objectname+'/'+objectname+'_POSSUM.fits'
emu_outfile=directoryB+objectname+'/'+objectname+'_EMU.fits'
#POSSUM
if POSSUMSB == '5038':
#this is the Early Science data
possum_cont_filename = '/Volumes/NARNIA/PawseySync/DRAGN_1_0p8_A/DRAGN_1_0p8_A/image.i.SB5038.cont.restored.fits'
else:
possum_cont_filename = directoryA +'fullfields/image.i.SB'+POSSUMSB+'.cont.taylor.0.restored.fits'
if POSSUMSB == '10035':
print('Skipping POSSUM: bad SB10035')
else:
imsubimage(imagename=possum_cont_filename,outfile='possum_cont_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='possum_cont_temp',fitsimage=possum_outfile,overwrite=True)
#cubes
i_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.i.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits'
q_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.q.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits'
u_filename = '/Volumes/NARNIA/leakage_corrected/image.restored.u.SB'+POSSUMSB+'.contcube.linmos.13arcsec.leakage.zernike.holoI.fits'
imsubimage(imagename=i_filename,outfile='i_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=q_filename,outfile='q_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=u_filename,outfile='u_im_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='i_im_temp',fitsimage=objectname+'_POSSUM_i.fits',overwrite=True)
exportfits(imagename='q_im_temp',fitsimage=objectname+'_POSSUM_q.fits',overwrite=True)
exportfits(imagename='u_im_temp',fitsimage=objectname+'_POSSUM_u.fits',overwrite=True)
#EMU
if EMUSB != 'NaN':
if EMUSB=='10083':
i_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.i.SB10083.contcube.conv.fits'
q_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.q.SB10083.contcube.conv.fits'
u_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.u.SB10083.contcube.conv.fits'
cont_EMU_filename= '/Volumes/NARNIA/fullfields/image.i.SB10083.cont.taylor.0.restored.conv.fits'
imsubimage(imagename=i_EMU_filename,outfile='i_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=q_EMU_filename,outfile='q_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=u_EMU_filename,outfile='u_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=cont_EMU_filename,outfile='EMU_cont_im_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='i_EMU_im_temp',fitsimage=objectname+'_EMU_i.fits',overwrite=True)
exportfits(imagename='q_EMU_im_temp',fitsimage=objectname+'_EMU_q.fits',overwrite=True)
exportfits(imagename='u_EMU_im_temp',fitsimage=objectname+'_EMU_u.fits',overwrite=True)
exportfits(imagename='EMU_cont_im_temp',fitsimage=emu_outfile,overwrite=True)
elif EMUSB=='10635':
i_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.i.SB10635.contcube.v2.conv.fits'
q_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.q.SB10635.contcube.v2.conv.fits'
u_EMU_filename = '/Volumes/NARNIA/fullfields/image.restored.u.SB10635.contcube.v2.conv.fits'
cont_EMU_filename= '/Volumes/NARNIA/fullfields/image.i.SB10635.cont.taylor.0.restored.fits'
imsubimage(imagename=i_EMU_filename,outfile='i_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=q_EMU_filename,outfile='q_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=u_EMU_filename,outfile='u_EMU_im_temp',region=region,overwrite=True,dropdeg=True)
imsubimage(imagename=cont_EMU_filename,outfile='EMU_cont_im_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='i_EMU_im_temp',fitsimage=objectname+'_EMU_i.fits',overwrite=True)
exportfits(imagename='q_EMU_im_temp',fitsimage=objectname+'_EMU_q.fits',overwrite=True)
exportfits(imagename='u_EMU_im_temp',fitsimage=objectname+'_EMU_u.fits',overwrite=True)
exportfits(imagename='EMU_cont_im_temp',fitsimage=emu_outfile,overwrite=True)
else:
#no cubes
emu_filename= directoryA +'fullfields/image.i.SB'+EMUSB+'.cont.taylor.0.restored.fits'
imsubimage(imagename=emu_filename,outfile='emu_cont_temp',region=region,overwrite=True,dropdeg=True)
exportfits(imagename='emu_cont_temp',fitsimage=emu_outfile,overwrite=True)
os.system("rm -r emu_cont_temp")
#tidy up
os.system("rm -r *_temp")
os.system("mv *{}* {}/".format(objectname,objectname))
| 57.136364
| 134
| 0.793755
| 715
| 5,028
| 5.366434
| 0.160839
| 0.08809
| 0.054209
| 0.084702
| 0.711754
| 0.660933
| 0.632786
| 0.557988
| 0.557988
| 0.489445
| 0
| 0.020131
| 0.061456
| 5,028
| 87
| 135
| 57.793103
| 0.792965
| 0.022076
| 0
| 0.279412
| 0
| 0.088235
| 0.36321
| 0.243023
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014706
| 0
| 0.014706
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2306615617cec84564c5dcb8ee8a144809be27e
| 1,640
|
py
|
Python
|
openhab2/scripts/readNilan.py
|
starze/openhab2
|
e4eeeecd829cdf286372067bd61561e63fed6e1a
|
[
"MIT"
] | 10
|
2017-04-04T08:28:54.000Z
|
2021-02-24T04:36:07.000Z
|
openhab2/scripts/readNilan.py
|
starze/openhab2
|
e4eeeecd829cdf286372067bd61561e63fed6e1a
|
[
"MIT"
] | 2
|
2017-04-18T13:33:12.000Z
|
2018-06-05T21:27:18.000Z
|
openhab2/scripts/readNilan.py
|
starze/openhab2
|
e4eeeecd829cdf286372067bd61561e63fed6e1a
|
[
"MIT"
] | 7
|
2017-04-17T18:02:19.000Z
|
2020-09-25T21:28:08.000Z
|
#!/usr/bin/env python3
# -*- coding: ISO-8859-1 -*-
# https://github.com/starze/openhab2
# https://github.com/roggmaeh/nilan-openhab
import minimalmodbus
import serial
import os, sys
import csv
import httplib2
minimalmodbus.CLOSE_PORT_AFTER_EACH_CALL = True
instrument = minimalmodbus.Instrument('/dev/ttyUSB0', 30, mode='rtu') # port name, slave address (in decimal)
instrument.serial.port
instrument.serial.baudrate = 19200 # Baud
instrument.serial.bytesize = 8
instrument.serial.parity = serial.PARITY_EVEN
instrument.serial.stopbits = 1
instrument.serial.timeout = 2 # seconds
#instrument.debug = True
h = httplib2.Http()
with open('nilan_modbus.csv') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
for row in reader:
if row['Register Type'] == "Input":
fc = 4
elif row['Register Type'] == "Holding":
fc = 3
if row['Unit'] == "text" or row['Unit'] == "ascii":
strRet = instrument.read_string(int(row['Address']), numberOfRegisters=1, functioncode=fc)
lst = list(strRet)
strRet = lst[1] + lst[0]
elif row['Scale'] == "100":
strRet = instrument.read_register(int(row['Address']), numberOfDecimals=2, functioncode=fc)
else:
strRet = instrument.read_register(int(row['Address']), numberOfDecimals=0, functioncode=fc)
if row['Unit'] == "%" or row['Unit'] == "°C":
print("%s: %s %s" % (row['Name'], strRet, row['Unit']))
h.request("http://localhost:8080/rest/items/" + row['Name'] + "/state", "PUT", body=str(strRet))
else:
print("%s: %s" % (row['Name'], strRet))
h.request("http://localhost:8080/rest/items/" + row['Name'] + "/state", "PUT", body=str(strRet))
| 34.166667
| 109
| 0.675
| 220
| 1,640
| 4.995455
| 0.468182
| 0.087352
| 0.054595
| 0.050955
| 0.243858
| 0.216561
| 0.216561
| 0.216561
| 0.11283
| 0.11283
| 0
| 0.02695
| 0.140244
| 1,640
| 47
| 110
| 34.893617
| 0.751773
| 0.121951
| 0
| 0.114286
| 0
| 0
| 0.171788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.057143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c230b7732d9a3dd108e45e13abd94ad053baac7e
| 2,316
|
py
|
Python
|
face_signin/prepare_training.py
|
sribs/FaceRecognition
|
68284173195d55f32a353fe3d78a53c25fbf1363
|
[
"Apache-2.0"
] | null | null | null |
face_signin/prepare_training.py
|
sribs/FaceRecognition
|
68284173195d55f32a353fe3d78a53c25fbf1363
|
[
"Apache-2.0"
] | null | null | null |
face_signin/prepare_training.py
|
sribs/FaceRecognition
|
68284173195d55f32a353fe3d78a53c25fbf1363
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
import os
def prepare_training_data(data_folder_path):
#------STEP-1--------
#get the directories (one directory for each subject) in data folder
dirs = sorted(os.listdir(data_folder_path))
#print(dirs)
faces = []
labels = []
for label,count in zip(dirs,range(len(dirs))):
subject_dir_path = data_folder_path+"/"+label
for image_name in os.listdir(subject_dir_path):
#ignore system files like .DS_Store
if image_name.startswith("."):
continue;
#build image path
#sample image path = training-data/s1/1.pgm
image_path = subject_dir_path + "/" + image_name
#read image
image = cv2.imread(image_path)
#display an image window to show the image
#print("Training label :",label)
cv2.waitKey(100)
#detect face
face, rect = detect_face(image)
#------STEP-4--------
#for the purpose of this tutorial
#we will ignore faces that are not detected
if face is not None:
#add face to list of faces
faces.append(face)
#add label for this face
labels.append(count)
print("Data Prepared for Training")
cv2.destroyAllWindows()
cv2.waitKey(1)
cv2.destroyAllWindows()
return faces, labels
def detect_face(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#load OpenCV face detector, I am using LBP which is fast
#there is also a more accurate but slow: Haar classifier
face_cascade = cv2.CascadeClassifier('opencv-files/lbpcascade_frontalface.xml')
#let's detect multiscale images(some images may be closer to camera than others)
#result is a list of faces
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5);
#if no faces are detected then return original img
if (len(faces) == 0):
return None, None
#under the assumption that there will be only one face,
#extract the face area
x, y, w, h = faces[0]
#return only the face part of the image
return gray[y:y+w, x:x+h], faces[0]
| 31.297297
| 85
| 0.593264
| 300
| 2,316
| 4.493333
| 0.466667
| 0.029674
| 0.031157
| 0.023739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01518
| 0.317358
| 2,316
| 73
| 86
| 31.726027
| 0.837445
| 0.346287
| 0
| 0.0625
| 0
| 0
| 0.047989
| 0.027523
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.09375
| 0
| 0.25
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2318081600b41f253e54a78d1001f4ddb857e30
| 15,873
|
py
|
Python
|
fisspy/analysis/tdmap.py
|
SNU-sunday/FISS-PYTHON
|
f79420debef476a904356d42542cb6472990bb2f
|
[
"BSD-2-Clause"
] | 3
|
2017-02-18T06:42:08.000Z
|
2021-01-05T04:15:08.000Z
|
fisspy/analysis/tdmap.py
|
SNU-sunday/fisspy
|
f79420debef476a904356d42542cb6472990bb2f
|
[
"BSD-2-Clause"
] | 1
|
2019-06-30T10:35:27.000Z
|
2019-06-30T10:35:27.000Z
|
fisspy/analysis/tdmap.py
|
SNU-sunday/FISS-PYTHON
|
f79420debef476a904356d42542cb6472990bb2f
|
[
"BSD-2-Clause"
] | 1
|
2017-02-23T05:24:13.000Z
|
2017-02-23T05:24:13.000Z
|
from __future__ import absolute_import, division
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
from fisspy.analysis.filter import FourierFilter
from interpolation.splines import LinearSpline
from matplotlib.animation import FuncAnimation
import astropy.units as u
from astropy.time import Time
__author__= "Juhyung Kang"
__email__ = "jhkang@astro.snu.ac.kr"
class TDmap:
"""
Make Time-Distance map for given slit position
Parameters
----------
data : `~numpy.ndarray`
3-dimensional data array (time, y, x).
header : '~astropy.io.fits.header.Header
Header of data.
tarr : `~numpy.ndarray`, optional
Array of time (unit: second).
filterRange : `list`, optional
List of range of Fourier bandpass filters
Returns
-------
td : `~fisspy.analysis.tdmap.TDmap`
A new time distance class object.
Examples
--------
"""
def __init__(self, data, header, tarr=None, filterRange=None, cmap=None):
self.data = data
self.header = header
self.nx = self.header['naxis1']
self.ny = self.header['naxis2']
self.nt = self.header['naxis3']
self.dx = self.header['cdelt1']
self.dy = self.header['cdelt2']
self.dt = self.header['cdelt3']
self.rx = self.header['crval1']
self.ry = self.header['crval2']
self.cmap = cmap
if not np.any(tarr):
tarr = np.arange(0, self.nt*self.dt, self.dt)
self._tarr = tarr
self.Time = Time(self.header['sttime']) + tarr*u.second
self.extent = [self.rx-self.nx/2*self.dx,
self.rx+self.nx/2*self.dx,
self.ry-self.ny/2*self.dy,
self.ry+self.ny/2*self.dy]
self._xarr = np.linspace(self.extent[0]+self.dx*0.5,
self.extent[1]-self.dx*0.5,
self.nx)
self._yarr = np.linspace(self.extent[2]+self.dy*0.5,
self.extent[3]-self.dy*0.5,
self.ny)
self.smin = [self._tarr[0],
self.extent[2]+0.5*self.dy,
self.extent[0]+0.5*self.dx]
self.smax = [self._tarr[-1],
self.extent[3]-0.5*self.dy,
self.extent[1]-0.5*self.dx]
self.order = [self.nt, self.ny, self.nx]
self._tname = ['ori']
if not filterRange:
self.nfilter = 1
self.fdata = np.empty([1, self.nt, self.ny, self.nx])
else:
self.nfilter = len(filterRange)+1
self.fdata = np.empty([self.nfilter, self.nt, self.ny, self.nx])
for n, fR in enumerate(filterRange):
self._tname += ['%.1f - %.1f mHZ'%(fR[0], fR[1])]
self.fdata[n+1] = FourierFilter(self.data, self.nt,
self.dt*1e-3, fR)
self.fdata[0] = self.data
self.interp = []
for data in self.fdata:
self.interp += [LinearSpline(self.smin, self.smax,
self.order, data)]
def get_TD(self, R, xc, yc, angle):
self.R = R
self.xc = xc
self.yc = yc
self.angle = angle
ang = np.deg2rad(self.angle)
nl = int(np.ceil(2*R/self.dx))
self.x1 = -R*np.cos(ang) + xc
self.x2 = R*np.cos(ang) + xc
self.y1 = -R*np.sin(ang) + yc
self.y2 = R*np.sin(ang) + yc
x = np.linspace(self.x1, self.x2, nl)
y = np.linspace(self.y1, self.y2, nl)
oiarr = np.empty([nl, self.nt, 3])
oiarr[:,:,0] = self._tarr
oiarr[:,:,1] = y[:,None]
oiarr[:,:,2] = x[:,None]
iarr = oiarr.reshape([nl*self.nt, 3])
td = self.interp[self.filterNum-1](iarr)
return td.reshape([nl, self.nt])
def imshow(self, R=5, xc=None, yc=None, angle=0, t=0,
filterNum=1, fps=10, cmap=plt.cm.gray,
interpolation='bilinear'):
try:
plt.rcParams['keymap.back'].remove('left')
plt.rcParams['keymap.forward'].remove('right')
except:
pass
if not xc:
xc = self.rx
if not yc:
yc = self.ry
self.R = self._R0 = R
self.angle = self._angle0 = angle
self.xc = self._xc0 = xc
self.yc = self._yc0 = yc
self.filterNum = self._filterNum0 = filterNum
self.t = self._t0 = t
self.fps = fps
self.pause = 'ini'
self.pos = []
self.mark = []
self.hlines = []
tpix = np.abs(self._tarr-self.t).argmin()
self.td = self.get_TD(R,xc,yc,angle)
self.tdextent = [self._tarr[0]-0.5*self.dt,
self._tarr[-1]+0.5*self.dt,
-self.R,
self.R]
if not self.cmap:
self.cmap = cmap
self.fig= plt.figure(figsize=[14,9])
self.fig.canvas.set_window_title('%s ~ %s'%(self.Time[0], self.Time[-1]))
gs = gridspec.GridSpec(5, self.nfilter)
self.axTD = self.fig.add_subplot(gs[3:, :])
self.axTD.set_xlabel('Time (sec)')
self.axTD.set_ylabel('Distance (arcsec)')
self.axTD.set_title('%i: %s, '
'Time: %s, '
'tpix: %i'%(filterNum, self._tname[filterNum-1],
self.Time[tpix].value,
tpix))
self.imTD = self.axTD.imshow(self.td,
extent=self.tdextent,
origin='lower',
cmap=self.cmap,
interpolation=interpolation)
self.axRaster = []
self.im = []
for i in range(self.nfilter):
if i == 0:
self.axRaster += [self.fig.add_subplot(gs[:3, i])]
self.axRaster[i].set_xlabel('X (arcsec)')
self.axRaster[i].set_ylabel('Y (arcsec)')
else:
self.axRaster += [self.fig.add_subplot(gs[:3, i],
sharex=self.axRaster[0],
sharey=self.axRaster[0])]
self.axRaster[i].tick_params(labelleft=False, labelbottom=False)
self.axRaster[i].set_title('%i: %s'%(i+1, self._tname[i]))
self.im += [self.axRaster[i].imshow(self.fdata[i, tpix],
extent=self.extent,
origin='lower',
cmap=self.cmap,
interpolation=interpolation)]
self.slit = self.axRaster[filterNum-1].plot([self.x1, self.x2],
[self.y1, self.y2],
color='k')[0]
self.center = self.axRaster[filterNum-1].scatter(self.xc, self.yc,
100, marker='+',
c='k')
self.top = self.axRaster[filterNum-1].scatter(self.x2, self.y2, 100,
marker='+', c='b', label='%.1f'%self.R)
self.bottom = self.axRaster[filterNum-1].scatter(self.x1, self.y1, 100,
marker='+', c='r',
label='-%.1f'%self.R)
self.tslit = self.axTD.axvline(self.t, ls='dashed', c='lime')
self.leg = self.axRaster[filterNum-1].legend()
self.axTD.set_aspect(adjustable='box', aspect='auto')
self.imTD.set_clim(self.fdata[filterNum-1,0].min(),
self.fdata[filterNum-1,0].max())
self.fig.tight_layout()
self.fig.canvas.mpl_connect('key_press_event', self._onKey)
plt.show()
def _onKey(self, event):
if event.key == 'up':
if self.angle < 360:
self.angle += 1
else:
self.angle = 1
elif event.key == 'down':
if self.angle > 0:
self.angle -=1
else:
self.angle = 359
elif event.key == 'right':
if self.t < self._tarr[-1]:
self.t += self.dt
else:
self.t = self._tarr[0]
elif event.key == 'left':
if self.t > self._tarr[0]:
self.t -= self.dt
else:
self.t = self._tarr[-1]
elif event.key == 'ctrl+right':
if self.xc < self._xarr[-1]:
self.xc += self.dx
else:
self.xc = self._xarr[0]
elif event.key == 'ctrl+left':
if self.xc > self._xarr[0]:
self.xc -= self.dx
else:
self.xc = self._xarr[-1]
elif event.key == 'ctrl+up':
if self.yc < self._yarr[-1]:
self.yc += self.dy
else:
self.yc = self._yarr[0]
elif event.key == 'ctrl+down':
if self.yc > self._yarr[0]:
self.yc -= self.dy
else:
self.yc = self._yarr[-1]
elif event.key == 'ctrl++':
self.R += self.dx
elif event.key == 'ctrl+-':
self.R -= self.dx
elif event.key == ' ' and event.inaxes in self.axRaster:
self.xc = event.xdata
self.yc = event.ydata
elif event.key == ' ' and event.inaxes == self.axTD:
self.t = event.xdata
elif event.key == 'x' and event.inaxes == self.axTD:
self.pos += [event.ydata]
ang = np.deg2rad(self.angle)
xp = self.pos[-1]*np.cos(ang) + self.xc
yp = self.pos[-1]*np.sin(ang) + self.yc
self.mark += [self.axRaster[self.filterNum-1].scatter(xp, yp, 100,
marker='+',
c='lime')]
self.hlines += [self.axTD.axhline(self.pos[-1], ls='dashed', c='lime')]
elif event.key == 'enter':
if self.pause == 'ini':
self.ani = FuncAnimation(self.fig, self._chTime,
frames=self._tarr,
blit=False,
interval=1e3/self.fps,
repeat=True)
# cache_frame_data=False)
self.pause = False
else:
self.pause ^= True
if self.pause:
self.ani.event_source.stop()
else:
self.ani.event_source.start(1e3/self.fps)
for iid in range(self.nfilter):
if event.key == 'ctrl+%i'%(iid+1):
self.filterNum = iid+1
tpix = np.abs(self._tarr-self.t).argmin()
self.changeSlit(self.R, self.xc, self.yc, self.angle)
self.axTD.set_title('%i: %s, '
'Time: %s, '
'tpix: %i'%(self.filterNum, self._tname[self.filterNum-1],
self.Time[tpix].value,
tpix))
self._filterNum0 = self.filterNum
self.imTD.set_clim(self.im[self.filterNum-1].get_clim())
if self.xc != self._xc0 or self.yc != self._yc0 or \
self.angle != self._angle0 or self.R != self._R0:
self.changeSlit(self.R, self.xc, self.yc, self.angle)
self._R0 = self.R
self._xc0 = self.xc
self._yc0 = self.yc
self._angle0 = self.angle
if self.t != self._t0:
self._chTime(self.t)
self._t0 = self.t
self.fig.canvas.draw_idle()
def changeSlit(self, R, xc, yc, angle):
td = self.get_TD(R, xc, yc, angle)
self.tdextent[2] = -R
self.tdextent[3] = R
self.axTD.set_ylim(-R, R)
ang = np.deg2rad(self.angle)
if self.filterNum != self._filterNum0:
self.leg.remove()
self.slit.remove()
self.bottom.remove()
self.center.remove()
self.top.remove()
self.slit = self.axRaster[self.filterNum-1].plot([self.x1, self.x2],
[self.y1, self.y2],
color='k')[0]
self.center = self.axRaster[self.filterNum-1].scatter(self.xc,
self.yc, 100, marker='+', c='k')
self.top = self.axRaster[self.filterNum-1].scatter(self.x2,
self.y2, 100,
marker='+', c='b', label='%.1f'%self.R)
self.bottom = self.axRaster[self.filterNum-1].scatter(self.x1,
self.y1, 100,
marker='+', c='r',
label='-%.1f'%self.R)
for n, pos in enumerate(self.pos):
self.mark[n].remove()
xp = pos*np.cos(ang) + self.xc
yp = pos*np.sin(ang) + self.yc
self.mark[n] = self.axRaster[self.filterNum-1].scatter(xp, yp, 100,
marker='+',
c='lime')
else:
self.slit.set_xdata([self.x1, self.x2])
self.slit.set_ydata([self.y1, self.y2])
self.bottom.set_offsets([self.x1, self.y1])
self.top.set_offsets([self.x2, self.y2])
self.center.set_offsets([self.xc, self.yc])
# change marker
for n, pos in enumerate(self.pos):
xp = pos*np.cos(ang) + self.xc
yp = pos*np.sin(ang) + self.yc
self.mark[n].set_offsets([xp, yp])
self.hlines[n].set_ydata(pos)
self.top.set_label('%.1f'%self.R)
self.bottom.set_label('-%.1f'%self.R)
self.imTD.set_data(td)
self.leg = self.axRaster[self.filterNum-1].legend()
def _chTime(self, t):
self.t = t
tpix = np.abs(self._tarr-t).argmin()
self.axTD.set_title('%i: %s, '
'Time: %s, '
'tpix: %i'%(self.filterNum, self._tname[self.filterNum-1],
self.Time[tpix].value,
tpix))
self.tslit.set_xdata(self.t)
for n, im in enumerate(self.im):
im.set_data(self.fdata[n, tpix])
def set_clim(self, cmin, cmax, frame):
self.im[frame-1].set_clim(cmin, cmax)
if self.filterNum == frame:
self.imTD.set_clim(cmin, cmax)
def remove_Mark(self):
for n in range(len(self.pos)):
self.mark[n].remove()
self.hlines[n].remove()
self.pos = []
self.mark = []
self.hlines = []
def savefig(self, filename, **kwargs):
self.fig.save(filename, **kwargs)
def saveani(self, filename, **kwargs):
fps = kwargs.pop('fps', self.fps)
self.ani.save(filename, fps=fps, **kwargs)
| 41.015504
| 86
| 0.449001
| 1,830
| 15,873
| 3.830601
| 0.156284
| 0.039372
| 0.019971
| 0.024964
| 0.396576
| 0.305278
| 0.246648
| 0.234665
| 0.203138
| 0.160057
| 0
| 0.02394
| 0.4158
| 15,873
| 387
| 87
| 41.015504
| 0.732018
| 0.033705
| 0
| 0.197605
| 0
| 0
| 0.031363
| 0.001443
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02994
| false
| 0.002994
| 0.026946
| 0
| 0.062874
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c232029579d8b288e2ac9ed43b03f0690df1e9c2
| 1,317
|
py
|
Python
|
polaris/polaris/sep24/tzinfo.py
|
yuriescl/django-polaris
|
8806d0e4e8baaddbffbceb3609786d2436b8abe1
|
[
"Apache-2.0"
] | 81
|
2019-11-16T21:47:22.000Z
|
2022-02-17T07:35:02.000Z
|
polaris/polaris/sep24/tzinfo.py
|
yuriescl/django-polaris
|
8806d0e4e8baaddbffbceb3609786d2436b8abe1
|
[
"Apache-2.0"
] | 491
|
2019-11-10T23:44:30.000Z
|
2022-03-20T00:25:02.000Z
|
polaris/polaris/sep24/tzinfo.py
|
yuriescl/django-polaris
|
8806d0e4e8baaddbffbceb3609786d2436b8abe1
|
[
"Apache-2.0"
] | 89
|
2019-11-18T21:31:01.000Z
|
2022-03-28T13:47:41.000Z
|
import pytz
from datetime import datetime, timedelta, timezone
from rest_framework.decorators import api_view, parser_classes, renderer_classes
from rest_framework.parsers import JSONParser
from rest_framework.renderers import JSONRenderer
from rest_framework.request import Request
from rest_framework.response import Response
from django.contrib.sessions.backends.db import SessionStore
from polaris.utils import render_error_response, getLogger
logger = getLogger(__name__)
@api_view(["POST"])
@parser_classes([JSONParser])
@renderer_classes([JSONRenderer])
def post_tzinfo(request: Request) -> Response:
if not (
request.data.get("sessionId") and request.data.get("sessionOffset") is not None
):
return render_error_response("missing required parameters")
now = datetime.now(timezone.utc)
offset = timedelta(minutes=request.data["sessionOffset"])
zone = None
for tz in map(pytz.timezone, pytz.all_timezones_set):
if now.astimezone(tz).utcoffset() == offset:
zone = tz.zone
break
if not zone:
return render_error_response("no timezones matched with offset")
session = SessionStore(session_key=request.data["sessionId"])
session["timezone"] = zone
session.save()
return Response({"status": "ok", "tz": zone})
| 34.657895
| 87
| 0.741838
| 161
| 1,317
| 5.913043
| 0.447205
| 0.042017
| 0.089286
| 0.052521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.161731
| 1,317
| 37
| 88
| 35.594595
| 0.862319
| 0
| 0
| 0
| 0
| 0
| 0.094913
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.290323
| 0
| 0.419355
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c2321c74ae596a68d5084730c6df5fe1a40a8090
| 1,615
|
py
|
Python
|
utils/fundoptutils.py
|
joshualee155/FundOptimizer
|
da842de6c99f89c767d03c9ef1b392237b726a3f
|
[
"MIT"
] | 2
|
2021-01-03T00:46:51.000Z
|
2021-09-01T02:48:51.000Z
|
utils/fundoptutils.py
|
joshualee155/FundOptimizer
|
da842de6c99f89c767d03c9ef1b392237b726a3f
|
[
"MIT"
] | null | null | null |
utils/fundoptutils.py
|
joshualee155/FundOptimizer
|
da842de6c99f89c767d03c9ef1b392237b726a3f
|
[
"MIT"
] | 1
|
2021-08-28T11:04:00.000Z
|
2021-08-28T11:04:00.000Z
|
import pandas as pd
import datetime as dt
class FundType( object ):
OF = 'Open Ended Fund'
ETF = 'Exchange Traded Fund'
LOF = 'Listed Open Ended Fund'
MMF = 'Money Market Fund'
def getFundType( fundCode ):
fundTypeDf = pd.read_csv( 'refData/fund_list.csv', names = [ 'fundCode', 'fundType' ] )
fundTypeDf[ 'fundCode' ] = fundTypeDf[ 'fundCode' ].apply( lambda x: str(x).zfill(6) )
fundTypeDf.drop_duplicates( subset = [ 'fundCode' ], inplace = True )
fundTypeDf.set_index( 'fundCode', drop = True, inplace = True )
try:
sType = fundTypeDf[ 'fundType' ][ fundCode ]
if sType == 'OF':
return FundType.OF
elif sType == 'ETF':
return FundType.ETF
elif sType == 'LOF':
return FundType.LOF
elif sType == 'MMF':
return FundType.MMF
else:
raise NameError( "Unknown fund type %s" % sType )
except KeyError:
return FundType.OF
def str2date( sDate ):
"""
Convert a string date to datetime.date
"""
try:
dateTime = dt.datetime.strptime( sDate, "%Y%m%d" )
except ValueError:
dateTime = dt.datetime.strptime( sDate, "%Y-%m-%d" )
return dateTime.date()
def getHolidays( startDate, endDate ):
"""
Return China exchange holidays ( non-trading days ) from `startDate` to `endDate`
"""
with open( 'refData/holidays.txt', 'r' ) as f:
holidays = f.read().strip().split('\n')
holidays = [ date for date in map( str2date, holidays ) if date >= startDate and date <= endDate ]
return holidays
| 30.471698
| 102
| 0.596285
| 187
| 1,615
| 5.128342
| 0.470588
| 0.072993
| 0.027112
| 0.054223
| 0.070907
| 0.070907
| 0.070907
| 0.070907
| 0
| 0
| 0
| 0.002575
| 0.278638
| 1,615
| 52
| 103
| 31.057692
| 0.820601
| 0.074303
| 0
| 0.108108
| 0
| 0
| 0.149692
| 0.014354
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.054054
| 0
| 0.459459
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c234a2bf9d847b0178d0e12fe82918d472e89c91
| 2,014
|
py
|
Python
|
plotter.py
|
keshavbantu/covclass
|
e27cfb4ff8e7e6f076c3429aa1c4696e173bc3a4
|
[
"MIT"
] | null | null | null |
plotter.py
|
keshavbantu/covclass
|
e27cfb4ff8e7e6f076c3429aa1c4696e173bc3a4
|
[
"MIT"
] | null | null | null |
plotter.py
|
keshavbantu/covclass
|
e27cfb4ff8e7e6f076c3429aa1c4696e173bc3a4
|
[
"MIT"
] | null | null | null |
import cleaner as dataStream
import plotly.graph_objects as go
import plotly.io as pio
#DONUT PLOT - CONDITIONS -----------------------------------------
labels = ['Diabetes','Hypertension','Coronary Heart(D)','Chronic Kidney(D)','No Conditions','Obstructive Pulmonary(D)']
values = dataStream.PIEList
fig_cond = go.Figure(data=[go.Pie(labels=labels, values=values, hole=.3)])
#fig_cond.show()
pio.write_html(fig_cond, file="templates/cond.html")
#GROUP BAR PLOT - SYMPTOMS ---------------------------------------
symplabel=['Symptoms']
fig_symp = go.Figure(data=[
go.Bar(name='Fever', x=symplabel, y=dataStream.Fever),
go.Bar(name='Cough', x=symplabel, y=dataStream.Cough),
go.Bar(name='Breathlessness', x=symplabel, y=dataStream.Breathlessness),
go.Bar(name='Severe Acute Respiratory Syndrome', x=symplabel, y=dataStream.SARI),
go.Bar(name='Influenza-like Illness', x=symplabel, y=dataStream.ILI),
go.Bar(name='Asymptomatic', x=symplabel, y=dataStream.NONE_sym)
])
fig_symp.update_layout(barmode='group')
#fig_symp.show()
pio.write_html(fig_symp, file="templates/symp.html")
#STACK BAR PLOT - AGE DATA ------------------------------------------
fig_age = go.Figure()
fig_age.add_trace(go.Bar(
y=['0 to 10', '10 to 20', '20 to 30','30 to 40', '40 to 50', '50 to 60','60 to 70', '70 to 80', '80 to 90','90 to 100'],
x=dataStream.maleAgeList,
name='Male Deaths',
orientation='h',
marker=dict(
color='rgba(61, 112, 242, 0.6)',
line=dict(color='rgba(61, 112, 242, 1.0)', width=2)
)
))
fig_age.add_trace(go.Bar(
y=['0 to 10', '10 to 20', '20 to 30','30 to 40', '40 to 50', '50 to 60','60 to 70', '70 to 80', '80 to 90','90 to 100'],
x=dataStream.femaleAgeList,
name='Female Deaths',
orientation='h',
marker=dict(
color='rgba(242, 61, 221, 0.6)',
line=dict(color='rgba(242, 61, 221, 1.0)', width=2)
)
))
fig_age.update_layout(barmode='stack')
#fig_age.show()
pio.write_html(fig_age, file="templates/age.html")
| 38
| 124
| 0.627607
| 306
| 2,014
| 4.058824
| 0.326797
| 0.032206
| 0.043478
| 0.101449
| 0.326087
| 0.280193
| 0.206119
| 0.146538
| 0.146538
| 0.146538
| 0
| 0.071057
| 0.140516
| 2,014
| 53
| 125
| 38
| 0.646447
| 0.120159
| 0
| 0.243902
| 0
| 0
| 0.302207
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.073171
| 0
| 0.073171
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c239846032333fb5d26b1c1eb5b5c8a5cf233d15
| 2,219
|
py
|
Python
|
Music/__init__.py
|
izazkhan8293/Musicheu
|
9cd33a71868b8b850d6fd78eaac05dda0713b7cc
|
[
"Apache-2.0"
] | null | null | null |
Music/__init__.py
|
izazkhan8293/Musicheu
|
9cd33a71868b8b850d6fd78eaac05dda0713b7cc
|
[
"Apache-2.0"
] | null | null | null |
Music/__init__.py
|
izazkhan8293/Musicheu
|
9cd33a71868b8b850d6fd78eaac05dda0713b7cc
|
[
"Apache-2.0"
] | null | null | null |
from pyrogram import Client
import asyncio
from Music.config import API_ID, API_HASH, BOT_TOKEN, MONGO_DB_URI, SUDO_USERS
from motor.motor_asyncio import AsyncIOMotorClient as MongoClient
import time
import uvloop
from Music import config
import importlib
from pyrogram import Client as Bot
from Music.config import API_ID, API_HASH, BOT_TOKEN, MONGO_DB_URI, SUDO_USERS, LOG_GROUP_ID, OWNER_ID
from pyrogram import Client
from aiohttp import ClientSession
from motor.motor_asyncio import AsyncIOMotorClient as MongoClient
import time
def initialize():
global dbb
dbb = {}
initialize()
MONGODB_CLI = MongoClient(MONGO_DB_URI)
db = MONGODB_CLI.wbb
SUDOERS = SUDO_USERS
OWNER = OWNER_ID
async def load_sudoers():
global SUDOERS
sudoersdb = db.sudoers
sudoers = await sudoersdb.find_one({"sudo": "sudo"})
sudoers = [] if not sudoers else sudoers["sudoers"]
for user_id in SUDOERS:
if user_id not in sudoers:
sudoers.append(user_id)
await sudoersdb.update_one(
{"sudo": "sudo"}, {"$set": {"sudoers": sudoers}}, upsert=True
)
SUDOERS = (SUDOERS + sudoers) if sudoers else SUDOERS
loop = asyncio.get_event_loop()
loop.run_until_complete(load_sudoers())
Music_START_TIME = time.time()
loop = asyncio.get_event_loop()
BOT_ID = 0
BOT_NAME = ""
BOT_USERNAME = ""
ASSID = 0
ASSNAME = ""
ASSUSERNAME = ""
ASSMENTION = ""
app = Client(
'MusicBot',
API_ID,
API_HASH,
bot_token=BOT_TOKEN,
)
aiohttpsession = ClientSession()
client = Client(config.SESSION_NAME, config.API_ID, config.API_HASH)
def all_info(app, client):
global BOT_ID, BOT_NAME, BOT_USERNAME
global ASSID, ASSNAME, ASSMENTION, ASSUSERNAME
getme = app.get_me()
getme1 = client.get_me()
BOT_ID = getme.id
ASSID = getme1.id
if getme.last_name:
BOT_NAME = getme.first_name + " " + getme.last_name
else:
BOT_NAME = getme.first_name
BOT_USERNAME = getme.username
ASSNAME = (
f"{getme1.first_name} {getme1.last_name}"
if getme1.last_name
else getme1.first_name
)
ASSUSERNAME = getme1.username
ASSMENTION = getme1.mention
app.start()
client.start()
all_info(app, client)
| 28.448718
| 102
| 0.708878
| 302
| 2,219
| 4.986755
| 0.268212
| 0.055777
| 0.035857
| 0.047809
| 0.2417
| 0.183267
| 0.169987
| 0.169987
| 0.169987
| 0.169987
| 0
| 0.005637
| 0.200541
| 2,219
| 77
| 103
| 28.818182
| 0.843292
| 0
| 0
| 0.106667
| 0
| 0
| 0.036503
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026667
| false
| 0
| 0.186667
| 0
| 0.213333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c23a870064fefb4e740984ad848e886ea4aa0cd9
| 9,372
|
py
|
Python
|
test.py
|
ZJianjin/Traffic4cast2020_lds
|
6cb76e885a9539e485c055222be77f41a559c507
|
[
"Apache-2.0"
] | 3
|
2020-12-10T13:43:08.000Z
|
2021-01-17T04:36:34.000Z
|
test.py
|
ZJianjin/Traffic4cast2020_lds
|
6cb76e885a9539e485c055222be77f41a559c507
|
[
"Apache-2.0"
] | null | null | null |
test.py
|
ZJianjin/Traffic4cast2020_lds
|
6cb76e885a9539e485c055222be77f41a559c507
|
[
"Apache-2.0"
] | null | null | null |
import random
from random import shuffle
import numpy as np
import tensorflow as tf
from tensorflow.python.tools import freeze_graph
import datetime
import time
import queue
import threading
import logging
from PIL import Image
import itertools
import yaml
import re
import os
import glob
import shutil
import sys
import copy
import h5py
from net_all import *
from trainer_all import *
season = None
use_mask = True
use_flip = False
use_time = True
model_name = 'neta'
train_winter = ['-01-', '-02-', '-03-']
train_summer = ['-05-', '-04-', '-06-']
test_winter = ['-11-', '-12-']
test_summer = ['-07-', '-08-', '-09-', '-10-']
SEED = 0
num_train_file = 285
num_frame_per_day = 288
num_frame_before = 12
num_frame_sequence = 24
target_frames = [0, 1, 2, 5, 8, 11]
num_sequence_per_day = num_frame_per_day - num_frame_sequence + 1
height = 495
width = 436
num_channel = 9
num_channel_discretized = 8 # 4 * 2
visual_input_channels = 115 # 12 * 8
visual_output_channels = 6 * 8 # 6 * 8
vector_input_channels = 1 # start time point
import json
#
n = 1
s = 255
e = 85
w = 170
tv = 16
##############################Set the path##############################################
data_root = './data'
model_root = './jianjzhmodelstest'
log_root = './output'
##############################Set the path##############################################
#
target_city = 'ISTANBUL' # ['BERLIN', 'MOSCOW', 'ISTANBUL']
# test_start_index_list = np.array([ 18, 57, 114, 174, 222], np.int32) # 'BERLIN'
# test_start_index_list = np.array([ 45, 102, 162, 210, 246], np.int32) # 'Moscow' # 'Istanbul'
input_static_data_path = data_root + '/' + target_city + '/' + target_city + '_static_2019.h5'
input_mask_data_path = data_root + '/maskdata/'
input_train_data_folder_path = data_root + '/' + target_city + '/training'
input_val_data_folder_path = data_root + '/' + target_city + '/validation'
input_test_data_folder_path = data_root + '/' + target_city + '/testing'
save_model_path = model_root + '/' + target_city + str(season) + str(use_flip) + str(use_mask)
summary_path = log_root + '/' + target_city + str(season) + str(use_flip) + str(use_mask)
#
batch_size_test = 5
learning_rate = 3e-4
load_model_path = model_root + '/' + 'ISTANBULneta'
# load_model_path = ''
is_training = False
# premodel = os.path.join(model_root, 'BERLINneta', 'model-58000.cptk')
global_step = 60000
def write_data(data, filename):
f = h5py.File(filename, 'w', libver='latest')
dset = f.create_dataset('array', shape=(data.shape), data=data, compression='gzip', compression_opts=9)
f.close()
def get_data_filepath_list(input_data_folder_path):
data_filepath_list = []
for filename in os.listdir(input_data_folder_path):
if filename.split('.')[-1] != 'h5':
continue
data_filepath_list.append(os.path.join(input_data_folder_path, filename))
data_filepath_list = sorted(data_filepath_list)
return data_filepath_list
def get_static_data(input_static_data_path):
fr = h5py.File(input_static_data_path, 'r')
data = fr['array'].value / 255.0
return data
def get_mask_data(input_mask_data_path, city):
map_0 = np.load(input_mask_data_path + city + 'map_0.npy')
map_1 = np.load(input_mask_data_path + city + 'map_1.npy')
map_2 = np.load(input_mask_data_path + city + 'map_2.npy')
map_3 = np.load(input_mask_data_path + city + 'map_3.npy')
result = np.concatenate([map_0, map_0, map_1, map_1, map_2, map_2, map_3, map_3], axis=-1)
return result
if __name__ == '__main__':
random.seed(SEED)
np.random.seed(SEED)
tf.set_random_seed(SEED)
trainer = Trainer(height, width, visual_input_channels, visual_output_channels, vector_input_channels,
learning_rate,
save_model_path, load_model_path, summary_path, is_training, use_mask, model_name)
tf.reset_default_graph()
test_data_filepath_list = get_data_filepath_list(input_test_data_folder_path)
if season == 'winter':
tmp = []
for i in test_data_filepath_list:
if any([j in i for j in test_winter]):
tmp.append(i)
data_filepath_list = tmp
elif season == 'summer':
tmp = []
for i in test_data_filepath_list:
if any([j in i for j in test_summer]):
tmp.append(i)
data_filepath_list = tmp
print('test_data_filepath_list\t', len(test_data_filepath_list), )
test_output_filepath_list = list()
for test_data_filepath in test_data_filepath_list:
filename = test_data_filepath.split('/')[-1]
test_output_filepath_list.append('output/' + target_city + '/' + target_city + '_test' + '/' + filename)
static_data = get_static_data(input_static_data_path)
mask_data = get_mask_data(input_mask_data_path, target_city)
try:
if not os.path.exists('output'):
os.makedirs('output')
if not os.path.exists('output/' + target_city):
os.makedirs('output/' + target_city)
if not os.path.exists('output/' + target_city + '/' + target_city + '_test'):
os.makedirs('output/' + target_city + '/' + target_city + '_test')
except Exception:
print('output path not made')
exit(-1)
with open('test_data.json') as f:
test_json = json.load(f)
for i in range(len(test_data_filepath_list)):
file_path = test_data_filepath_list[i]
out_file_path = test_output_filepath_list[i]
fr = h5py.File(file_path, 'r')
a_group_key = list(fr.keys())[0]
data = fr[a_group_key]
# assert data.shape[0] == num_frame_per_day
data = np.array(data, np.uint8)
test_data_batch_list = []
test_data_time_list = []
test_data_mask_list = []
batch_size_test = data.shape[0]
for j in range(batch_size_test):
test_data_time_list.append(float(j) / float(num_frame_per_day))
data_sliced = data[:, :, :, :, :num_channel]
if use_time:
for time_dict in test_json:
time_data = list(time_dict.keys())[0]
if time_data in file_path:
time_data = time_dict[time_data]
break
time_id = np.ones_like(data_sliced)[:, :, :, :, :1]
for m in range(len(time_data)):
for n in range(num_frame_before):
time_id[m, n] = time_id[m, n] * (time_data[m] + n) / 288.0 * 255.0
data_sliced = np.concatenate([data_sliced, time_id], axis=-1)
data_mask = (np.max(data_sliced, axis=4) == 0)
test_data_mask_list = data_mask[:, :, :, :]
test_data_batch_list.append(data_sliced)
test_data_time_list = np.asarray(test_data_time_list, np.float32)
input_time = np.reshape(test_data_time_list, (batch_size_test, 1))
test_data_mask = test_data_mask_list
input_data = np.concatenate(test_data_batch_list, axis=0).astype(np.float32)
input_data[:, :, :, :, :] = input_data[:, :, :, :, :] / 255.0
input_data = np.moveaxis(input_data, 1, -1).reshape((batch_size_test, height, width, -1))
static_data_tmp = np.tile(static_data, [batch_size_test, 1, 1, 1])
input_data = np.concatenate([input_data, static_data_tmp], axis=-1)
# input_data_mask = np.zeros((batch_size_test, num_frame_before, height, width, num_channel_discretized), np.bool)
# input_data_mask[test_data_mask[:, :num_frame_before, :, :], :] = True
# input_data_mask = np.moveaxis(input_data_mask, 1, -1).reshape((batch_size_test, height, width, -1))
# input_data[input_data_mask] = -1.0
true_label_mask = np.ones((batch_size_test, height, width, visual_output_channels), dtype=np.float32)
if use_mask:
orig_label_mask = np.tile(mask_data, [1, 1, 1, len(target_frames)])
else:
orig_label_mask = np.ones((batch_size_test, height, width, visual_output_channels), dtype=np.float32)
prediction_list = []
# print(input_data.shape)
# assert 0
import scipy.misc as misc
# trainer.load_model(premodel)
# print('load model')
for b in range(batch_size_test):
run_out_one = trainer.infer(input_data[b, :, :, :][np.newaxis, :, :, :],
input_time[b, :][np.newaxis, :],
true_label_mask[b, :, :, :][np.newaxis, :, :, :], global_step)
prediction_one = run_out_one['predict']
prediction_list.append(prediction_one)
# print(input_data[b,:,:,:].shape)
# for t in range(3):
# misc.imsave('output_'+str(b)+'_'+str(t)+'.png', np.reshape(prediction_one, [495, 436, 3, 8])[:, :, t, 0])
# assert 0
prediction = np.concatenate(prediction_list, axis=0)
prediction = np.moveaxis(np.reshape(prediction, (
batch_size_test, height, width, num_channel_discretized, len(target_frames),)), -1, 1)
prediction = prediction.astype(np.float32) * 255.0
prediction = np.rint(prediction)
prediction = np.clip(prediction, 0.0, 255.0).astype(np.uint8)
assert prediction.shape == (batch_size_test, len(target_frames), height, width, num_channel_discretized)
write_data(prediction, out_file_path)
| 37.94332
| 122
| 0.636364
| 1,313
| 9,372
| 4.204113
| 0.192689
| 0.03913
| 0.049275
| 0.028986
| 0.270652
| 0.183333
| 0.156703
| 0.105435
| 0.071739
| 0.05942
| 0
| 0.031276
| 0.222151
| 9,372
| 246
| 123
| 38.097561
| 0.725926
| 0.103713
| 0
| 0.042781
| 0
| 0
| 0.046356
| 0.003042
| 0
| 0
| 0
| 0
| 0.005348
| 1
| 0.02139
| false
| 0
| 0.128342
| 0
| 0.165775
| 0.010695
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c23bc080151d66518c85923b1ce1c8be7c0ff949
| 3,037
|
py
|
Python
|
python/python-010/rds.py
|
suzuxander/suzuxander_samples
|
736224dae91b432ef3ec796f5eda23417865f142
|
[
"MIT"
] | null | null | null |
python/python-010/rds.py
|
suzuxander/suzuxander_samples
|
736224dae91b432ef3ec796f5eda23417865f142
|
[
"MIT"
] | null | null | null |
python/python-010/rds.py
|
suzuxander/suzuxander_samples
|
736224dae91b432ef3ec796f5eda23417865f142
|
[
"MIT"
] | null | null | null |
from troposphere import Template, Ref, Parameter, GetAtt
from troposphere.ec2 import SecurityGroup
from troposphere.rds import DBSubnetGroup, DBInstance
def create_rds_template():
template = Template()
vpc = template.add_parameter(
parameter=Parameter(
title='Vpc',
Type='String'
)
)
subnet_a = template.add_parameter(
parameter=Parameter(
title='SubnetA',
Type='String'
)
)
subnet_b = template.add_parameter(
parameter=Parameter(
title='SubnetB',
Type='String'
)
)
master_user_name = template.add_parameter(
parameter=Parameter(
title='DBMasterUserName',
Type='String'
)
)
master_user_password = template.add_parameter(
parameter=Parameter(
title='DBMasterUserPassword',
Type='String'
)
)
storage_size = template.add_parameter(
parameter=Parameter(
title='StorageSize',
Default='20',
Type='String'
)
)
instance_class = template.add_parameter(
parameter=Parameter(
title='InstanceClass',
Default='db.t2.micro',
Type='String'
)
)
engine_version = template.add_parameter(
parameter=Parameter(
title='EngineVersion',
Default='5.7.26',
Type='String'
)
)
security_group = template.add_resource(
resource=SecurityGroup(
title='SampleSecurityGroup',
GroupDescription='sample-rds',
SecurityGroupIngress=[
{
'IpProtocol': 'tcp',
'FromPort': 3306,
'ToPort': 3306,
'CidrIp': '0.0.0.0/0',
}
],
VpcId=Ref(vpc)
)
)
db_subnet_group = template.add_resource(
resource=DBSubnetGroup(
title='SampleDBSubnetGroup',
DBSubnetGroupDescription='sample-rds',
DBSubnetGroupName='sample-rds',
SubnetIds=[Ref(subnet_a), Ref(subnet_b)]
)
)
template.add_resource(
resource=DBInstance(
title='SampleDBInstance',
DBSubnetGroupName=Ref(db_subnet_group),
# VPCSecurityGroups=[Ref(security_group)],
VPCSecurityGroups=[GetAtt(security_group, 'GroupId')],
AllocatedStorage=Ref(storage_size),
DBInstanceClass=Ref(instance_class),
DBInstanceIdentifier='sample-rds',
DBName='sample_rds',
Engine='mysql',
EngineVersion=Ref(engine_version),
MasterUsername=Ref(master_user_name),
MasterUserPassword=Ref(master_user_password),
PubliclyAccessible=True
)
)
with open('./rds.yml', mode='w') as file:
file.write(template.to_yaml())
if __name__ == '__main__':
create_rds_template()
| 25.957265
| 66
| 0.55186
| 246
| 3,037
| 6.609756
| 0.365854
| 0.177122
| 0.098401
| 0.142681
| 0.250923
| 0.211562
| 0
| 0
| 0
| 0
| 0
| 0.01059
| 0.347053
| 3,037
| 116
| 67
| 26.181034
| 0.80938
| 0.013171
| 0
| 0.161616
| 0
| 0
| 0.111185
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010101
| false
| 0.030303
| 0.030303
| 0
| 0.040404
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c24663b502469b48e008fb30a563fba0b901fd18
| 7,119
|
py
|
Python
|
total_tolles_ferleihsystem/auth_providers/ldap_auth_provider.py
|
spethso/Verleihsystem-TTF
|
39179f9ac5b07f5106e555f82f3c9011d33805bd
|
[
"MIT"
] | 1
|
2019-03-17T08:11:14.000Z
|
2019-03-17T08:11:14.000Z
|
total_tolles_ferleihsystem/auth_providers/ldap_auth_provider.py
|
spethso/Verleihsystem-TTF
|
39179f9ac5b07f5106e555f82f3c9011d33805bd
|
[
"MIT"
] | 60
|
2018-06-12T14:46:50.000Z
|
2020-11-16T00:50:37.000Z
|
total_tolles_ferleihsystem/auth_providers/ldap_auth_provider.py
|
FIUS/ttf-backend
|
39179f9ac5b07f5106e555f82f3c9011d33805bd
|
[
"MIT"
] | 1
|
2019-12-02T19:25:59.000Z
|
2019-12-02T19:25:59.000Z
|
"""
Auth Providers which provides LDAP login
"""
from typing import List, Dict
from ldap3 import Connection, Server, AUTO_BIND_TLS_BEFORE_BIND, SUBTREE
from ldap3.core.exceptions import LDAPSocketOpenError, LDAPBindError
from ..login import LoginProvider
from .. import APP, AUTH_LOGGER
class LDAPAuthProvider(LoginProvider, provider_name="LDAP"):
"""
Login Provider with connection to LDAP Server
"""
ldap_uri: str #The URL of the ldpa server
port: int #The port of the ldap server. Use None for default.
ssl: bool #Whether to use ssl for the connection.
start_tls: bool #Whether to upgrade connection with StartTLS once bound.
user_search_base: str #The search base for users.
group_search_base: str #The search base for groups.
user_rdn: str #The RDN for users.
user_uid_field: str # The field of a user, which is the name, that is i the group_membership_field
group_membership_field: str #The field of a group, which contains the username
moderator_filter: str #A moderator must match this filter
admin_filter: str #A admininstrator must match this filter
moderator_group_filter: str # A moderator must be in at least one of the matched groups
admin_group_filter: str # A admin must be in at least one of the matched groups
server: Server = None
known_users: Dict[str, bool]
def __init__(self):
self.ldap_uri: str = APP.config["LDAP_URI"] #The URL of the ldpa server
self.port: int = APP.config["LDAP_PORT"] #The port of the ldap server. Use None for default.
self.ssl: bool = APP.config["LDAP_SSL"] #Whether to use ssl for the connection.
self.start_tls: bool = APP.config["LDAP_START_TLS"] #Whether to upgrade connection with StartTLS once bound.
self.user_search_base: str = APP.config["LDAP_USER_SEARCH_BASE"] #The search base for users.
self.group_search_base: str = APP.config["LDAP_GROUP_SEARCH_BASE"] #The search base for groups.
self.user_rdn: str = APP.config["LDAP_USER_RDN"] #The RDN for users.
# The field of a user, which is the name, that is i the group_membership_field
self.user_uid_field: str = APP.config["LDAP_USER_UID_FIELD"]
#The field of a group, which contains the username
self.group_membership_field: str = APP.config["LDAP_GROUP_MEMBERSHIP_FIELD"]
self.moderator_filter: str = APP.config["LDAP_MODERATOR_FILTER"] #A moderator must match this filter
self.admin_filter: str = APP.config["LDAP_ADMIN_FILTER"] #A admininstrator must match this filter
# A moderator must be in at least one of the matched groups
self.moderator_group_filter: str = APP.config["LDAP_MODERATOR_GROUP_FILTER"]
# A admin must be in at least one of the matched groups
self.admin_group_filter: str = APP.config["LDAP_ADMIN_GROUP_FILTER"]
self.server: Server = None
self.known_users = {}
def init(self) -> None:
self.server = Server(self.ldap_uri, port=self.port, use_ssl=self.ssl)
def valid_user(self, user_id: str) -> bool:
return True
@classmethod
def combine_filters(cls, filters: List[str]) -> str:
"""
Combines the given filters with a or
"""
non_empty_filters = list(filter(None, filters))
if not non_empty_filters:
return ""
elif len(non_empty_filters) == 1:
return non_empty_filters.pop()
else:
return "(|" + ''.join(non_empty_filters) + ")"
def valid_password(self, user_id: str, password: str) -> bool:
try:
user_str = self.user_rdn + "=" + user_id + "," + self.user_search_base
with Connection(self.server,
user=user_str,
password=password,
auto_bind=AUTO_BIND_TLS_BEFORE_BIND,
read_only=True) as conn:
user_base_filter = "(" + self.user_rdn + "=" + user_id + ")"
user_filter = user_base_filter
all_users_filter = self.combine_filters([self.moderator_filter, self.admin_filter])
if all_users_filter:
user_filter = "(&" + all_users_filter + user_base_filter + ")"
if not conn.search(self.user_search_base,
user_filter,
search_scope=SUBTREE,
attributes=[self.user_uid_field]):
AUTH_LOGGER.info("User %s is not in the user filter", user_id)
return False
user_uid = str(conn.entries.pop()[self.user_uid_field])
group_base_filter = "(" + self.group_membership_field + "=" + user_uid + ")"
group_filter = group_base_filter
all_groups_filter = self.combine_filters([self.moderator_group_filter, self.admin_group_filter])
if all_groups_filter:
group_filter = "(&" + all_groups_filter + group_base_filter + ")"
if not conn.search(self.group_search_base, group_filter, search_scope=SUBTREE):
AUTH_LOGGER.info("User %s is not in any group of the group filter", user_id)
return False
admin_user_filter = user_base_filter
all_admin_users_filter = self.combine_filters([self.admin_filter])
if all_admin_users_filter:
admin_user_filter = "(&" + all_admin_users_filter + user_base_filter + ")"
admin_group_filter = group_base_filter
all_admin_groups_filter = self.combine_filters([self.admin_group_filter])
if all_admin_groups_filter:
admin_group_filter = "(&" + all_admin_groups_filter + group_base_filter + ")"
in_admin_user_filter = conn.search(self.user_search_base,
admin_user_filter,
search_scope=SUBTREE)
in_admin_group_filter = conn.search(self.group_search_base,
admin_group_filter,
search_scope=SUBTREE)
if (in_admin_user_filter and in_admin_group_filter):
self.known_users[user_id] = True
else:
self.known_users[user_id] = False
AUTH_LOGGER.debug("Valid login from user %s. User in admin user filter: %s. User in admin group: %s",
user_id, str(in_admin_user_filter), str(in_admin_group_filter))
return True
except LDAPSocketOpenError as error:
raise ConnectionError("Unable to connect to LDAP Server.") from error
except LDAPBindError:
return False
return False
def is_admin(self, user_id: str) -> bool:
return self.known_users[user_id]
def is_moderator(self, user_id: str) -> bool:
return True
| 47.46
| 118
| 0.61975
| 910
| 7,119
| 4.579121
| 0.141758
| 0.050156
| 0.040557
| 0.038397
| 0.538757
| 0.402208
| 0.201104
| 0.145428
| 0.110391
| 0.091193
| 0
| 0.000605
| 0.303554
| 7,119
| 149
| 119
| 47.778523
| 0.839855
| 0.172215
| 0
| 0.101852
| 0
| 0.009259
| 0.077121
| 0.024218
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064815
| false
| 0.018519
| 0.046296
| 0.027778
| 0.361111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c247338889dd4aef3193b428e74aac5424652e3f
| 4,117
|
py
|
Python
|
md2html.py
|
osfans/yancheng
|
1f5cec75c8d97006f8b2ee4b1b36b7dc78930ef0
|
[
"Apache-2.0"
] | 4
|
2017-01-26T03:25:24.000Z
|
2019-04-15T14:11:46.000Z
|
md2html.py
|
osfans/yancheng
|
1f5cec75c8d97006f8b2ee4b1b36b7dc78930ef0
|
[
"Apache-2.0"
] | 1
|
2016-12-02T04:26:31.000Z
|
2016-12-05T05:02:39.000Z
|
md2html.py
|
osfans/xu
|
1f5cec75c8d97006f8b2ee4b1b36b7dc78930ef0
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import re, os, glob
template = """
<!doctype html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0, user-scalable=yes">
<style>
body {
font-family: PMingLiu, HanaMinA, HanaMinB, Helvetica, arial, sans-serif;
writing-mode: vertical-rl;
-webkit-writing-mode: vertical-rl; }
.sm {
margin: 20px 0 10px;
padding: 0;
font-weight: bold;
font-size: 30px;
border-left: 1px solid #cccccc;
margin: 0 5px;
cursor: text;
position: static;
clear: both;
text-align: right;
}
.sd, .sd2, .zy, .zi, .zi1, .yi {
font-size: 10px;
text-align: center;
cursor: text;
float: left;
margin-left: 10px;
margin-right: 10px;
line-height: 10px;
letter-spacing: 0.35em;
}
.sd, .sd2 {
margin-right: 25px;
clear: both;
}
.sd2 {
margin-right: 20px;
}
.zi, .zi1 {
padding-top: 20px;
padding-bottom: 10px;
font-size: 20px;
line-height: 20px;
}
.zi1 {
padding-top: 10px;
}
.yi {
min-height: 40px;
text-align: left;
line-height: 12px;
margin-right: 8px;
}
.clear {
clear: both;
}
</style>
<title>徐氏類音字彙</title>
</head>
<body>
%s
</body>
</html>
"""
lines = list()
def append(fmt, s):
#print(s)
lines.append(fmt % s)
def parse(s):
s = s.strip().strip("`").replace("〜", "—").replace("~", "—").replace("※", "").replace(" ", "")
if "(" in s:
s = re.sub("(.[\?=]?)((.+?))", r'<a title="\2">\1</a>', s)
return s
def break_yi(yi):
n = len(yi)
if 0 < n < 4:
yi = yi + (4-n) * " "
n = 4
if n > 0 and '<' not in yi:
yi = yi[:(n+1)//2]+"<br/>"+yi[(n+1)//2:]
return yi
def md2html(filename):
sm = ""
sd = ""
zi_count = 0
zi_single = ""
lines.clear()
for line in open(filename, encoding="U8"):
line = line.strip()
if line:
if line.startswith(">") or line.startswith("---") :
continue
if line.startswith("##"):
line = line[2:].strip()
if line == sd:
continue
sd = line
zi_count = 0
elif line.startswith("#"):
line = line[1:].strip()
if line == sm:
continue
sm = line
append("<div class=sm>%s</div>", sm)
else:
zi, yi= "", ""
if line.startswith("`"):
yi = line #無字
elif line.count("`") == 2:
zi, yi = line.split("`", 1)
if zi or yi:
zi = parse(zi)
yi = parse(yi)
if not yi:
zi_single += zi
continue
if zi:
zi = zi_single + zi
zi_single = ""
yi = break_yi(yi)
zi_count+=1
if zi_count == 1:
sd_title = sd
if not zi:
sd_title = yi
yi = ""
if len(sd_title) == 2:
sd_title = sd[0]+"<br/>" + sd[1]
append("<div class=sd2>%s</div>", sd_title)
else:
append("<div class=sd>%s</div>", sd_title)
append("<div class=zy><div class=zi1>%s</div><div class=yi>%s</div></div>",(zi, yi))
else:
append("<div class=zy><div class=zi>%s</div><div class=yi>%s</div></div>",(zi, yi))
target = open("docs/" + os.path.basename(filename).replace(".md", ".html"), "w", encoding="U8")
target.write(template % ("\n".join(lines)))
target.close()
def copy_readme():
target = open("README.md", "w", encoding="U8")
target.write(open("wiki/Home.md", encoding="U8").read().replace("/osfans/xu/wiki/", "https://osfans.github.io/xu/"))
target.close()
copy_readme()
for filename in glob.glob("wiki/??.md"):
md2html(filename)
| 24.360947
| 120
| 0.459072
| 499
| 4,117
| 3.759519
| 0.304609
| 0.03838
| 0.037313
| 0.022388
| 0.075693
| 0.052239
| 0.026652
| 0.026652
| 0.026652
| 0.026652
| 0
| 0.030928
| 0.363857
| 4,117
| 168
| 121
| 24.505952
| 0.683849
| 0.00753
| 0
| 0.123288
| 0
| 0.020548
| 0.360274
| 0.038942
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034247
| false
| 0
| 0.006849
| 0
| 0.054795
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c24d4c5a8f9125c9ef834c785c10d1d380869f30
| 8,645
|
py
|
Python
|
src/utils/strava.py
|
adrigrillo/endomondo-strava-migrator
|
398ff4a0db4a8a5a3a4f0d8fb53157ffeeb88079
|
[
"MIT"
] | 2
|
2020-12-08T20:51:38.000Z
|
2021-01-03T20:42:10.000Z
|
src/utils/strava.py
|
adrigrillo/endomondo-strava-migrator
|
398ff4a0db4a8a5a3a4f0d8fb53157ffeeb88079
|
[
"MIT"
] | 1
|
2020-12-08T21:09:50.000Z
|
2020-12-08T21:30:35.000Z
|
src/utils/strava.py
|
adrigrillo/endomondo-strava-migrator
|
398ff4a0db4a8a5a3a4f0d8fb53157ffeeb88079
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
utils/strava.py
=================
Utility class to Strava API
"""
import json
import time
from configparser import ConfigParser, NoOptionError
from datetime import datetime
from pathlib import Path
from typing import Tuple
from loguru import logger
from stravalib import Client, exc
from utils.parameters import SECRET
from utils.constants import CONFIG_PATH, CODE_ID_FILE_NAME, TOKEN_FILE_NAME
from utils.files_handler import check_folder
from utils.parameters import STRAVA, CLIENT_ID
def get_client_id(app_config: ConfigParser) -> int:
""" Obtains the client ID from the configuration file.
Args:
app_config (ConfigParser): app configuration.
Returns:
int: client id from the configuration file.
Raises:
NoOptionError: If the `client_id` key is not
present in the configuration.
ValueError: If the client id is not an integer.
"""
try:
client_id = app_config.getint(STRAVA, CLIENT_ID)
except NoOptionError:
raise ValueError('The client id has not been set in the configuration.')
except ValueError:
logger.exception('Invalid client id format.')
raise
return client_id
def get_secret(app_config: ConfigParser) -> str:
""" Obtains the secret from the configuration file.
Args:
app_config (ConfigParser): app configuration.
Returns:
str: secret from the configuration file.
Raises:
NoOptionError: If the `secret` key is not
present in the configuration.
"""
try:
secret = app_config.get(STRAVA, SECRET)
except NoOptionError:
raise ValueError('The client id has not been set in the configuration.')
return secret
def get_strava_token_from_code_id(config: ConfigParser) -> str:
""" Method that interchange the temporary authentication code obtained
when `src/request_auth.py` is executed. The method reads the file
`config/code_id.txt` that contains the temporal authentication and generates
the POST request to obtain the final access token which is saved in
`config/token.json`.
This method requires the Strava application `client_id` and `secret` that
has to be set in the configuration file (`config/config.ini`).
Args:
config (ConfigParser): app configuration.
Returns:
str: Strava access token.
Raises:
ValueError: If no token is found in the configuration.
"""
code_id_path = Path(CONFIG_PATH, CODE_ID_FILE_NAME)
if not code_id_path.is_file():
raise ValueError('The file with the temporal authentication code (`config/code_id.txt`)'
'was NOT found. Execute `request_auth.py` to obtain the temporal access.')
with open(code_id_path, 'r') as file:
logger.debug('The file with the temporal authentication code (`config/code_id.txt`)'
'was found.')
code_id = file.read()
if not code_id:
raise ValueError('No valid temporal code access found. Rerun `request_auth.py` '
'to obtain the temporal access.')
client = Client()
token = client.exchange_code_for_token(client_id=get_client_id(config),
client_secret=get_secret(config),
code=code_id)
logger.debug('Obtained access until {}:\n'
'- token: {}.'
'- refresh token: {}.',
datetime.utcfromtimestamp(int(token['expires_at'])).strftime('%d-%m-%Y %H:%M:%S'),
token['access_token'], token['refresh_token'])
# Save JSON with the response
save_path = Path(check_folder(CONFIG_PATH), TOKEN_FILE_NAME)
with open(save_path, 'w') as file:
logger.info('Writing token information to `{}`.', save_path)
json.dump(token, file, indent=4)
return token['access_token']
def get_strava_client(config: ConfigParser) -> Client:
""" Checks the authentication token and generates the Strava client.
Args:
config (ConfigParser): app configuration.
Returns:
if exist, strava client configured with the authentication token.
"""
token_file_path = Path(check_folder(CONFIG_PATH), TOKEN_FILE_NAME)
if token_file_path.is_file():
logger.debug('The token info file (`config/token.json`) was found.')
with open(token_file_path, 'r') as file:
token_data = json.load(file)
token = token_data.get('access_token')
# If the file exists but no access token found, check against the temporary auth
if not token:
logger.warning('The token info file (`config/token.json`) was found'
' but the access token could not be read.')
token = get_strava_token_from_code_id(config)
else:
logger.info('The token info file (`config/token.json`) was NOT found. '
'Retrieving from the temporal authentication code.')
token = get_strava_token_from_code_id(config)
client = Client(access_token=token)
return client
def upload_activity(client: Client, activity_type: str, file_path: Path) -> bool:
""" Helper method to upload the activity to Strava. This method will handle
the different possibilities when uploading an activity.
Args:
client (Client): configured Strava client.
activity_type (str): Strava activity string.
file_path (Path): Path to the `*.tcx` activity file.
Returns:
bool: True if the activity have been uploaded successfully. False otherwise.
Raises:
RateLimitExceeded: When the API limits have been reached. Generally when
more than 1000 petitions have been done during the day.
ConnectionError: When it has been impossible to connect the Strava servers.
Exception: Unknown exceptions that will be logged in detail.
"""
try:
activity_file = open(file_path, 'r')
client.upload_activity(
activity_file=activity_file,
data_type='tcx',
activity_type=activity_type,
private=False
)
except exc.ActivityUploadFailed:
logger.exception('Error uploading the activity `{}`.', file_path.stem)
return False
except exc.RateLimitExceeded:
logger.exception('Exceeded the API rate limit.')
raise
except ConnectionError:
logger.exception('No internet connection.')
raise
except Exception:
logger.exception('Unknown exception')
raise
# If no error return true
logger.debug('Activity `{}` uploaded sucessfully.', file_path.stem)
return True
def handle_rate_limit(start_time: float, requests: int) -> Tuple[float, int]:
""" Method to handle the 15 minutes API limit. This method will check the
elapsed time since the first request and the number of them. Three cases
are possible:
- Less than 15 minutes elapsed from the first request and less than 100
requests -> continue.
- More than 15 minutes elapsed from the first request and less than 100
requests -> reset timer and request number to count from 0 again.
- Less than 15 minutes elapsed from the first request but more than 100
requests -> sleep until the 15 minutes block is over and reset timer
and request number to count from 0 again.
Args:
start_time (float): timestamp of the first request of the block.
requests (int): number of request done in the block.
Returns:
float, int: updated start time and number of requests following the
possible cases.
"""
requests += 1
elapsed_time = time.time() - start_time
if elapsed_time <= 60 * 15:
if requests >= 100:
remaining_time_stopped = 60 * 15 - elapsed_time
mins, secs = divmod(remaining_time_stopped, 60)
logger.warning('The number of allowed request per 15 minutes have'
'been reached. Sleeping for {:0.0f} minutes, {:0.1f} seconds.',
mins, secs)
time.sleep(remaining_time_stopped)
# Reset values. Include petition to be processed
logger.info('Waiting time elapsed. Continuing with the process.')
requests = 1
start_time = time.time()
else:
logger.debug('15 minutes have been elapsed. Resetting requests and time.')
# Reset values. Include petition to be processed
requests = 1
start_time = time.time()
return start_time, requests
| 36.020833
| 99
| 0.65587
| 1,096
| 8,645
| 5.058394
| 0.221715
| 0.021645
| 0.019481
| 0.017316
| 0.271104
| 0.268939
| 0.2307
| 0.199315
| 0.149531
| 0.111833
| 0
| 0.00803
| 0.265356
| 8,645
| 239
| 100
| 36.171548
| 0.864903
| 0.365992
| 0
| 0.169643
| 0
| 0
| 0.23559
| 0.020696
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0
| 0.107143
| 0
| 0.223214
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
df9a160281e97721997326dd0b0903a52cd73273
| 5,293
|
py
|
Python
|
train_synthText.py
|
skyatmoon/Detailed-Handwriting-detection
|
1eb7ba8087290cbdd3fbc2c092fbdbc2b715fc9c
|
[
"MIT"
] | 1
|
2020-12-08T01:24:34.000Z
|
2020-12-08T01:24:34.000Z
|
train_synthText.py
|
skyatmoon/Detailed-Handwriting-detection
|
1eb7ba8087290cbdd3fbc2c092fbdbc2b715fc9c
|
[
"MIT"
] | null | null | null |
train_synthText.py
|
skyatmoon/Detailed-Handwriting-detection
|
1eb7ba8087290cbdd3fbc2c092fbdbc2b715fc9c
|
[
"MIT"
] | null | null | null |
"""
Author: brooklyn
train with synthText
"""
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import os
from net.craft import CRAFT
import sys
from utils.cal_loss import cal_synthText_loss
from dataset.synthDataset import SynthDataset
import argparse
from eval import eval_net
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser(description='CRAFT Train Fine-Tuning')
parser.add_argument('--gt_path', default='/media/brooklyn/EEEEE142EEE10425/SynthText/gt.mat', type=str, help='SynthText gt.mat')
parser.add_argument('--synth_dir', default='/media/brooklyn/EEEEE142EEE10425/SynthText', type=str, help='SynthText image dir')
parser.add_argument('--label_size', default=96, type=int, help='target label size')
parser.add_argument('--batch_size', default=16, type=int, help='training data batch size')
parser.add_argument('--test_batch_size', default=16, type=int, help='test data batch size')
parser.add_argument('--test_interval', default=40, type=int, help='test interval')
parser.add_argument('--max_iter', default=50000, type=int, help='max iteration')
parser.add_argument('--lr', default=0.0001, type=float, help='initial learning rate')
parser.add_argument('--epochs', default=500, type=int, help='training epochs')
parser.add_argument('--test_iter', default=10, type=int, help='test iteration')
args = parser.parse_args()
image_transform = transforms.Compose([
transforms.Resize((args.label_size * 2, args.label_size * 2)),
transforms.ToTensor()
])
label_transform = transforms.Compose([
transforms.Resize((args.label_size,args.label_size)),
transforms.ToTensor()
])
def train(net, epochs, batch_size, test_batch_size, lr, test_interval, max_iter, model_save_path, save_weight=True):
train_data = SynthDataset(image_transform=image_transform,
label_transform=label_transform,
file_path=args.gt_path,
image_dir=args.synth_dir)
steps_per_epoch = 1000
#选取SynthText部分数据作为训练集
train_num = batch_size * steps_per_epoch
train_data = torch.utils.data.Subset(train_data, range(train_num))
#划分训练集、验证集
train_num = len(train_data)
test_iter = 10
val_num = test_batch_size * test_iter
train_data, val_data = torch.utils.data.random_split(train_data, [train_num - val_num, val_num])
train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_data, batch_size=test_batch_size, shuffle=False)
criterion = nn.MSELoss(reduction='none')
optimizer = optim.Adam(net.parameters(), lr=lr)
for epoch in range(epochs):
print('epoch = ', epoch)
for i, (images, labels_region, labels_affinity, _) in enumerate(train_loader):
iter = epoch * steps_per_epoch + i
#更新学习率
if iter != 0 and iter % 10000 == 0:
for param in optimizer.param_groups:
param['lr'] *= 0.8
images = images.to(device)
labels_region = labels_region.to(device)
labels_affinity = labels_affinity.to(device)
labels_region = torch.squeeze(labels_region, 1)
labels_affinity = torch.squeeze(labels_affinity, 1)
#前向传播
y, _ = net(images)
score_text = y[:, :, :, 0]
score_link = y[:, :, :, 1]
#联合损失 ohem loss
loss = cal_synthText_loss(criterion, score_text, score_link, labels_region, labels_affinity, device)
#反向传播
optimizer.zero_grad() #梯度清零
loss.backward() #计算梯度
optimizer.step() #更新权重
#打印损失和学习率信息
if i % 10 == 0:
print('i = ', i,': loss = ', loss.item(), ' lr = ', lr)
#计算验证损失
if i != 0 and i % test_interval == 0:
test_loss = eval_net(net, val_loader, criterion, device)
print('test: i = ', i, 'test_loss = ', test_loss, 'lr = ', lr)
if save_weight:
torch.save(net.state_dict(), model_save_path + 'epoch_' + str(epoch) + '_iter' + str(i) + '.pth')
#保存最后训练模型
if iter == max_iter:
if save_weight:
torch.save(net.state_dict(), model_save_path + 'final.pth')
if __name__ == "__main__":
batch_size = args.batch_size
test_batch_size = args.test_batch_size
epochs = args.epochs # 遍历数据集次数
lr = args.lr # 学习率
test_interval = args.test_interval #测试间隔
max_iter = args.max_iter
net = CRAFT(pretrained=True) # craft模型
net = net.to(device)
model_save_prefix = 'checkpoints/craft_netparam_'
try:
train(net=net,
batch_size=batch_size,
test_batch_size=test_batch_size,
lr=lr,
test_interval=test_interval,
max_iter=max_iter,
epochs=epochs,
model_save_path=model_save_prefix)
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED1.pth')
print('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 37.807143
| 128
| 0.642736
| 671
| 5,293
| 4.831595
| 0.257824
| 0.055521
| 0.052437
| 0.027761
| 0.186305
| 0.115978
| 0.101172
| 0.062307
| 0.028378
| 0.028378
| 0
| 0.016962
| 0.242585
| 5,293
| 139
| 129
| 38.079137
| 0.791719
| 0.029284
| 0
| 0.07767
| 0
| 0
| 0.107918
| 0.023069
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009709
| false
| 0
| 0.106796
| 0
| 0.116505
| 0.038835
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
df9b4ebedd02514962424a1cc0a1b5aae502b670
| 1,896
|
py
|
Python
|
friendcircle/models.py
|
jossafossa/Project24_backend
|
bb5cc91d21c9f93034b85b3e94e829f7ab33c565
|
[
"MIT"
] | null | null | null |
friendcircle/models.py
|
jossafossa/Project24_backend
|
bb5cc91d21c9f93034b85b3e94e829f7ab33c565
|
[
"MIT"
] | 9
|
2019-12-04T23:15:59.000Z
|
2022-02-10T09:08:38.000Z
|
friendcircle/models.py
|
jossafossa/Project24_backend
|
bb5cc91d21c9f93034b85b3e94e829f7ab33c565
|
[
"MIT"
] | null | null | null |
from django.db import models
class FriendCircle(models.Model):
name = models.CharField(blank=True, max_length=255)
description = models.CharField(blank=True, max_length=1000)
interests = models.ManyToManyField('interests.Interest', blank=True)
members = models.ManyToManyField(
'users.CustomUser',
through='friendcircle.FriendCircleMembership',
through_fields=('friendcircle', 'user'),
related_name='memberships',
)
def __str__(self):
return self.name
# Keeps track of FriendCircle memberships
class FriendCircleMembership(models.Model):
user = models.ForeignKey('users.CustomUser', on_delete=models.CASCADE)
friendcircle = models.ForeignKey('friendcircle.FriendCircle', on_delete=models.CASCADE)
startdate = models.DateTimeField(auto_now_add=True)
enddate = models.DateTimeField(null=True, blank=True)
def __str__(self):
return self.user.name + " member at " + self.friendcircle.name
class Meta:
unique_together = (('user', 'friendcircle'))
MATCH_STATUS = (
('O', 'Not swiped',),
('V', 'Swiped Right',),
('X', 'Swiped Left',),
)
# Keeps track of matches. If both parties swiped right, the user can be added to FriendCircleMembership
class FriendCircleMatcher(models.Model):
user = models.ForeignKey('users.CustomUser', on_delete=models.CASCADE)
user_match_status = models.CharField(max_length=1,
choices=MATCH_STATUS,
default="O")
friendcircle = models.ForeignKey('friendcircle.FriendCircle', on_delete=models.CASCADE)
friendcircle_match_status = models.CharField(max_length=1,
choices=MATCH_STATUS,
default="O")
def __str__(self):
return self.user.email + " + " + self.friendcircle.name
class Meta:
unique_together = (('user', 'friendcircle'))
| 35.773585
| 103
| 0.68038
| 203
| 1,896
| 6.192118
| 0.369458
| 0.043755
| 0.044551
| 0.066826
| 0.531424
| 0.505967
| 0.415274
| 0.415274
| 0.415274
| 0.205251
| 0
| 0.005964
| 0.204114
| 1,896
| 52
| 104
| 36.461538
| 0.827038
| 0.074367
| 0
| 0.375
| 0
| 0
| 0.149629
| 0.048544
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.025
| 0.075
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
df9d6d03fbed45db8f46a22336474ebb4831783c
| 474
|
py
|
Python
|
components/collector/tests/source_collectors/jira/test_issues.py
|
m-zakeri/quality-time
|
531931f0d8d4f5d262ea98445868158e41d268da
|
[
"Apache-2.0"
] | null | null | null |
components/collector/tests/source_collectors/jira/test_issues.py
|
m-zakeri/quality-time
|
531931f0d8d4f5d262ea98445868158e41d268da
|
[
"Apache-2.0"
] | null | null | null |
components/collector/tests/source_collectors/jira/test_issues.py
|
m-zakeri/quality-time
|
531931f0d8d4f5d262ea98445868158e41d268da
|
[
"Apache-2.0"
] | null | null | null |
"""Unit tests for the Jira issues collector."""
from .base import JiraTestCase
class JiraIssuesTest(JiraTestCase):
"""Unit tests for the Jira issue collector."""
METRIC_TYPE = "issues"
async def test_issues(self):
"""Test that the issues are returned."""
issues_json = dict(total=1, issues=[self.issue()])
response = await self.get_response(issues_json)
self.assert_measurement(response, value="1", entities=[self.entity()])
| 29.625
| 78
| 0.679325
| 59
| 474
| 5.355932
| 0.59322
| 0.056962
| 0.075949
| 0.094937
| 0.120253
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005236
| 0.194093
| 474
| 15
| 79
| 31.6
| 0.82199
| 0.172996
| 0
| 0
| 0
| 0
| 0.020528
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
df9e429f72ebf0471ad51a2d2296ecb2934b944d
| 1,485
|
py
|
Python
|
cf_xarray/tests/test_coding.py
|
rcaneill/cf-xarray
|
210e997ab5e550e411ec1a4e789aac28e77bacff
|
[
"Apache-2.0"
] | null | null | null |
cf_xarray/tests/test_coding.py
|
rcaneill/cf-xarray
|
210e997ab5e550e411ec1a4e789aac28e77bacff
|
[
"Apache-2.0"
] | null | null | null |
cf_xarray/tests/test_coding.py
|
rcaneill/cf-xarray
|
210e997ab5e550e411ec1a4e789aac28e77bacff
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import pandas as pd
import pytest
import xarray as xr
import cf_xarray as cfxr
@pytest.mark.parametrize(
"mindex",
[
pd.MultiIndex.from_product([["a", "b"], [1, 2]], names=("lat", "lon")),
pd.MultiIndex.from_arrays(
[["a", "b", "c", "d"], [1, 2, 4, 10]], names=("lat", "lon")
),
pd.MultiIndex.from_arrays(
[["a", "b", "b", "a"], [1, 2, 1, 2]], names=("lat", "lon")
),
],
)
@pytest.mark.parametrize("idxnames", ["foo", "landpoint", ("landpoint",), None])
def test_compression_by_gathering_multi_index_roundtrip(mindex, idxnames):
dim = "foo" if idxnames == "foo" else "landpoint"
dataset = xr.Dataset(
data_vars={"landsoilt": (dim, np.random.randn(4), {"foo": "bar"})},
coords={
dim: (dim, mindex, {"long_name": "land point number"}),
"coord1": (dim, [1, 2, 3, 4], {"foo": "baz"}),
},
attrs={"dataset": "test dataset"},
)
dataset.lat.attrs["standard_name"] = "latitude"
dataset.lon.attrs["standard_name"] = "longitude"
encoded = cfxr.encode_multi_index_as_compress(dataset, idxnames)
roundtrip = cfxr.decode_compress_to_multi_index(encoded, idxnames)
assert "compress" not in roundtrip[dim].encoding
xr.testing.assert_identical(roundtrip, dataset)
dataset[dim].attrs["compress"] = "lat lon"
with pytest.raises(ValueError):
cfxr.encode_multi_index_as_compress(dataset, idxnames)
| 34.534884
| 80
| 0.60404
| 184
| 1,485
| 4.728261
| 0.423913
| 0.011494
| 0.055172
| 0.022989
| 0.201149
| 0.183908
| 0.183908
| 0.183908
| 0.08046
| 0
| 0
| 0.014555
| 0.213468
| 1,485
| 42
| 81
| 35.357143
| 0.730308
| 0
| 0
| 0.108108
| 0
| 0
| 0.145455
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 1
| 0.027027
| false
| 0
| 0.135135
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfa2ba545c720071817fb0691cb4e7c5aad3c2a5
| 8,344
|
py
|
Python
|
project/pfasst/transfer_tools.py
|
amit17133129/pyMG-2016
|
b82a60811bb0a8b91d8793c47177a240221f9176
|
[
"BSD-2-Clause"
] | 2
|
2016-04-04T15:20:50.000Z
|
2020-08-01T19:28:55.000Z
|
project/pfasst/transfer_tools.py
|
amit17133129/pyMG-2016
|
b82a60811bb0a8b91d8793c47177a240221f9176
|
[
"BSD-2-Clause"
] | 1
|
2020-10-02T05:44:45.000Z
|
2020-10-02T05:44:45.000Z
|
project/pfasst/transfer_tools.py
|
amit17133129/pyMG-2016
|
b82a60811bb0a8b91d8793c47177a240221f9176
|
[
"BSD-2-Clause"
] | 11
|
2016-03-26T18:37:06.000Z
|
2020-10-01T19:44:55.000Z
|
# coding=utf-8
import numpy as np
import scipy.interpolate as intpl
import scipy.sparse as sprs
def to_sparse(D, format="csc"):
"""
Transform dense matrix to sparse matrix of return_type
bsr_matrix(arg1[, shape, dtype, copy, blocksize]) Block Sparse Row matrix
coo_matrix(arg1[, shape, dtype, copy]) A sparse matrix in COOrdinate format.
csc_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Column matrix
csr_matrix(arg1[, shape, dtype, copy]) Compressed Sparse Row matrix
dia_matrix(arg1[, shape, dtype, copy]) Sparse matrix with DIAgonal storage
dok_matrix(arg1[, shape, dtype, copy]) Dictionary Of Keys based sparse matrix.
lil_matrix(arg1[, shape, dtype, copy]) Row-based linked list sparse matrix
:param D: Dense matrix
:param format: how to save the sparse matrix
:return: sparse version
"""
if format == "bsr":
return sprs.bsr_matrix(D)
elif format == "coo":
return sprs.coo_matrix(D)
elif format == "csc":
return sprs.csc_matrix(D)
elif format == "csr":
return sprs.csr_matrix(D)
elif format == "dia":
return sprs.dia_matrix(D)
elif format == "dok":
return sprs.dok_matrix(D)
elif format == "lil":
return sprs.lil_matrix(D)
else:
return to_dense(D)
def to_dense(D):
if sprs.issparse(D):
return D.toarray()
elif isinstance(D, np.ndarray):
return D
def next_neighbors_periodic(p, ps, k, T=None):
"""
This function gives for a value p the k points next to it which are found in
in the vector ps and the points which are found periodically.
:param p: value
:param ps: ndarray, vector where to find the next neighbors
:param k: integer, number of neighbours
:return: ndarray, with the k next neighbors and an array containing the
"""
if T is None:
T = ps[-1]-2*ps[0]+ps[1]
p_bar = p - np.floor(p/T)*T
ps = ps - ps[0]
distance_to_p = []
for tk in ps:
d1 = tk+T-p_bar
d2 = tk-p_bar
d3 = tk-T-p_bar
min_d = min([np.abs(d1), np.abs(d2), np.abs(d3)])
if np.abs(d1) == min_d:
distance_to_p.append(d1)
elif np.abs(d2) == min_d:
distance_to_p.append(d2)
else:
distance_to_p.append(d3)
distance_to_p = np.asarray(distance_to_p)
value_index = []
for d,i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d, i))
# sort by distance
value_index_sorted_by_abs = sorted(value_index,cmp=lambda x,y:cmp(np.abs(x),np.abs(y)), key=lambda s: s[0])
if k % 2 == 1:
value_index_sorted_by_sign =sorted(value_index_sorted_by_abs[0:k+1], key=lambda s: s[0])[:k]
else:
value_index_sorted_by_sign =sorted(value_index_sorted_by_abs[0:k], key=lambda s: s[0])
return map(lambda s: s[1], value_index_sorted_by_sign), map(lambda s: s[0]+p, value_index_sorted_by_sign)
def next_neighbors(p, ps, k):
"""
This function gives for a value p the k points next to it which are found in
in the vector ps
:param p: value
:param ps: ndarray, vector where to find the next neighbors
:param k: integer, number of neighbours
:return: ndarray, with the k next neighbors
"""
distance_to_p = np.abs(ps-p)
# zip it
value_index = []
for d,i in zip(distance_to_p, range(distance_to_p.size)):
value_index.append((d,i))
# sort by distance
value_index_sorted = sorted(value_index, key=lambda s: s[0])
# take first k indices with least distance and sort them
return sorted(map(lambda s: s[1], value_index_sorted[0:k]))
def continue_periodic_array(arr,nn,T):
nn = np.asarray(nn)
d_nn = nn[1:]-nn[:-1]
if np.all(d_nn == np.ones(nn.shape[0]-1)):
return arr[nn]
else:
cont_arr = [arr[nn[0]]]
shift = 0.
for n,d in zip(nn[1:],d_nn):
if d != 1:
shift = -T
cont_arr.append(arr[n]+shift)
return np.asarray(cont_arr)
def restriction_matrix_1d(fine_grid, coarse_grid, k=2, return_type="csc", periodic=False, T=1.0):
"""
We construct the restriction matrix between two 1d grids, using lagrange interpolation.
:param fine_grid: a one dimensional 1d array containing the nodes of the fine grid
:param coarse_grid: a one dimensional 1d array containing the nodes of the coarse grid
:param k: order of the restriction
:return: a restriction matrix
"""
M = np.zeros((coarse_grid.size, fine_grid.size))
n_g = coarse_grid.size
for i, p in zip(range(n_g), coarse_grid):
if periodic:
nn, cont_arr = next_neighbors_periodic(p, fine_grid, k, T)
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(cont_arr, np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
else:
nn = next_neighbors(p, fine_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(fine_grid[nn], np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
return to_sparse(M, return_type)
def interpolation_matrix_1d(fine_grid, coarse_grid, k=2, return_type="csc", periodic=False, T=1.0):
"""
We construct the interpolation matrix between two 1d grids, using lagrange interpolation.
:param fine_grid: a one dimensional 1d array containing the nodes of the fine grid
:param coarse_grid: a one dimensional 1d array containing the nodes of the coarse grid
:param k: order of the restriction
:return: a interpolation matrix
"""
M = np.zeros((fine_grid.size, coarse_grid.size))
n_f = fine_grid.size
for i, p in zip(range(n_f), fine_grid):
if periodic:
nn,cont_arr = next_neighbors_periodic(p, coarse_grid, k, T)
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(cont_arr, np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
else:
nn = next_neighbors(p, coarse_grid, k)
# construct the lagrange polynomials for the k neighbors
circulating_one = np.asarray([1.0]+[0.0]*(k-1))
lag_pol = []
for l in range(k):
lag_pol.append(intpl.lagrange(coarse_grid[nn], np.roll(circulating_one, l)))
M[i, nn] = np.asarray(map(lambda x: x(p), lag_pol))
return to_sparse(M, return_type)
def kron_on_list(matrix_list):
"""
:param matrix_list: a list of sparse matrices
:return: a matrix
"""
if len(matrix_list) == 2:
return sprs.kron(matrix_list[0], matrix_list[1])
elif len(matrix_list) == 1:
return matrix_list[0]
else:
return sprs.kron(matrix_list[0], kron_on_list(matrix_list[1:]))
def matrixN(tau, rows=-1, last_value=1.0):
n = tau.shape[0]
if rows == -1:
rows = n
N = np.zeros((rows, n))
# construct the lagrange polynomials
circulating_one = np.asarray([1.0]+[0.0]*(n-1))
lag_pol = []
for i in range(n):
lag_pol.append(intpl.lagrange(tau, np.roll(circulating_one, i)))
N[:, i] = -np.ones(rows)*lag_pol[-1](last_value)
return N
def interpolate_to_t_end(nodes_on_unit, values):
"""
Assume a GaussLegendre nodes, we are interested in the value at the end of
the interval, but we now only the values in the interior of the interval.
We compute the value by legendre interpolation.
:param nodes_on_unit: nodes transformed to the unit interval
:param values: values on those nodes
:return: interpolation to the end of the interval
"""
n = nodes_on_unit.shape[0]
circulating_one = np.asarray([1.0]+[0.0]*(n-1))
lag_pol = []
result = np.zeros(values[0].shape)
for i in range(n):
lag_pol.append(intpl.lagrange(nodes_on_unit, np.roll(circulating_one, i)))
result += values[i]*lag_pol[-1](1.0)
return result
| 36.920354
| 111
| 0.628715
| 1,323
| 8,344
| 3.815571
| 0.141345
| 0.021395
| 0.02397
| 0.027734
| 0.562203
| 0.499802
| 0.48019
| 0.464342
| 0.453249
| 0.44374
| 0
| 0.017192
| 0.254075
| 8,344
| 225
| 112
| 37.084444
| 0.793862
| 0.311841
| 0
| 0.29927
| 0
| 0
| 0.005457
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072993
| false
| 0
| 0.021898
| 0
| 0.248175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfa3a2fa2289a9c892b09c29ede2ebe39a3dd0c8
| 7,266
|
py
|
Python
|
python/trees/rbtree_graphviz.py
|
rcanepa/cs-fundamentals
|
b362fc206417501e53a5739df1edf7568901eef8
|
[
"MIT"
] | null | null | null |
python/trees/rbtree_graphviz.py
|
rcanepa/cs-fundamentals
|
b362fc206417501e53a5739df1edf7568901eef8
|
[
"MIT"
] | null | null | null |
python/trees/rbtree_graphviz.py
|
rcanepa/cs-fundamentals
|
b362fc206417501e53a5739df1edf7568901eef8
|
[
"MIT"
] | null | null | null |
"""rbtree_graphviz.py - create a graphviz representation of a LLRBT.
The purpose of this module is to visually show how the shape of a LLRBT
changes when keys are inserted in it. For every insert, sub graph (tree)
is added to the main graph.
`initialization_list` holds the values that are inserted in the tree.
This list can be changed for a list of anything that can be compared
with > == <. For example, with `initialization_list = range(50)` keys
from 0 to 49 will be inserted in the tree.
Consider that for every key, a graph is going to be generated.
"""
from graphviz import Digraph
from trees.rbtree import LLRBT, is_red
NODE_SHAPE = "circle"
NONE_NODE_SHAPE = "point"
TITLE_SHAPE = "box"
RED_COLOR = "#b8000f"
DEFAULT_GRAPH_NODE_ATTR = {
"shape": NODE_SHAPE,
"color": "black",
"style": "filled",
"fillcolor": "#cfd3d6",
}
RED_NODE_ATTR = {
"fontcolor": "white",
"fillcolor": RED_COLOR
}
DEFAULT_GRAPH_EDGE_ATTR = {
"color": "black",
"arrowhead": "vee",
"style": "solid",
}
def add_node(graph, node):
"""Add `node` to `graph`. `node` is a tuple with the
following shape:
(node_id, {<node attributes>}, {<graph's node attributes>})
^ ^ ^
string see graphviz documentation"""
node_id, node_attr, graph_node_attr = node
graph.node(node_id, **node_attr, **graph_node_attr)
return graph
def add_edge(graph, edge):
"""Add edge from `edge[0]` to `edge[1]` to `graph`. `edge` is
a tuple with the following shape:
(source_node_id, destiny_node_id, {<graph's edge attributes>})
^ ^ ^
string string see graphviz documentation"""
source_node_id, destiny_node_id, graph_edge_attr = edge
graph.edge(source_node_id, destiny_node_id, **graph_edge_attr)
return graph
def generate_graph(tree, initialization_list, format="pdf"):
if initialization_list is None or len(initialization_list) == 0:
raise Exception("You can't generate a graph with an empty tree.")
if not isinstance(tree, LLRBT):
raise Exception("You need to provide an instance of a Leaf Leaning Red Black Tree (LLRBT).")
for value in initialization_list:
tree.insert(value)
graph = Digraph(format="pdf",
node_attr=DEFAULT_GRAPH_NODE_ATTR,
edge_attr=DEFAULT_GRAPH_EDGE_ATTR)
# Iterate over all keys and create nodes and edges.
for idx, node in enumerate(tree.pre_order_traversal()):
node_id = str(node.value)
node_label = str(node.value)
if is_red(node):
add_node(graph, (node_id, {"label": node_label}, RED_NODE_ATTR))
else:
add_node(graph, (node_id, {"label": node_label}, {}))
# Create edge between node and its left child.
if node.left:
node_left_id = str(node.left.value)
add_edge(graph, (node_id, node_left_id, {}))
# Node doesn't have a left child so we put a dot in its place.
else:
null_node_value = "left-null-" + str(idx)
add_node(graph, (null_node_value, {}, {"shape": NONE_NODE_SHAPE}))
add_edge(graph, (node_id, null_node_value, {}))
# Create edge between node and its right child.
if node.right:
node_right_id = str(node.right.value)
add_edge(graph, (node_id, node_right_id, {}))
# Node doesn't have a left child so we put a dot in its place.
else:
null_node_value = "right-null-" + str(idx)
add_node(graph, (null_node_value, {}, {"shape": NONE_NODE_SHAPE}))
add_edge(graph, (node_id, null_node_value, {}))
return graph
def generate_graph_per_insert(tree, initialization_list, format="pdf"):
if initialization_list is None or len(initialization_list) == 0:
raise Exception("You can't generate a graph with an empty tree.")
if not isinstance(tree, LLRBT):
raise Exception("You need to provide an instance of a Leaf Leaning Red Black Tree (LLRBT).")
main_graph = Digraph(format=format,
node_attr=DEFAULT_GRAPH_NODE_ATTR,
edge_attr=DEFAULT_GRAPH_EDGE_ATTR)
main_graph.attr(rankdir="TB", newrank="true") # print sub graph from top to bottom
# For every key to be inserted, create a sub graph representing
# the tree after the insertion.
for graph_number, value in enumerate(initialization_list):
tree.insert(value)
# Create sub graph.
sub_graph_name = "cluster_" + str(graph_number)
with main_graph.subgraph(name=sub_graph_name) as sub_graph:
sub_graph.attr(label="Inserting = " + str(value), fontsize="12")
# Iterate over all keys and fill the sub graph.
for idx, node in enumerate(tree.pre_order_traversal()):
node_id = str(graph_number) + "." + str(node.value)
node_label = str(node.value)
if is_red(node):
add_node(sub_graph, (node_id, {"label": node_label}, RED_NODE_ATTR))
else:
add_node(sub_graph, (node_id, {"label": node_label}, {}))
# Create edge between node and its left child.
if node.left:
node_left_id = str(graph_number) + "." + str(node.left.value)
# Paint edge red if the left child is red.
if is_red(node.left):
add_edge(sub_graph, (node_id, node_left_id, {}))
else:
add_edge(sub_graph, (node_id, node_left_id, {}))
# Node doesn't have a left child so we put a dot in its place.
else:
null_node_id = str(graph_number) + "-left-null-" + str(idx)
add_node(sub_graph, (null_node_id, {}, {"shape": NONE_NODE_SHAPE}))
add_edge(sub_graph, (node_id, null_node_id, {}))
# Create edge between node and its right child.
if node.right:
node_right_id = str(graph_number) + "." + str(node.right.value)
# Paint edge red if the right child is red.
if is_red(node.right):
add_edge(sub_graph, (node_id, node_right_id, {}))
else:
add_edge(sub_graph, (node_id, node_right_id, {}))
# Node doesn't have a left child so we put a dot in its place.
else:
null_node_id = str(graph_number) + "-right-null-" + str(idx)
add_node(sub_graph, (null_node_id, {}, {"shape": NONE_NODE_SHAPE}))
add_edge(sub_graph, (node_id, null_node_id, {}))
return main_graph
if __name__ == "__main__":
initialization_list = ["Z", "W", "F", "D", "S", "E", "A", "R", "C", "H", "X", "M", "P", "L"]
# initialization_list = ["A", "B", "C", "D"]
tree = LLRBT()
# graph = generate_graph(tree, initialization_list)
graph = generate_graph_per_insert(tree, initialization_list)
print(graph.source)
graph.render("trees/rbtree.gv", view=True)
| 38.648936
| 100
| 0.597991
| 979
| 7,266
| 4.21144
| 0.181818
| 0.045113
| 0.037351
| 0.027165
| 0.626486
| 0.581858
| 0.555421
| 0.488964
| 0.488964
| 0.447005
| 0
| 0.003315
| 0.294247
| 7,266
| 187
| 101
| 38.855615
| 0.800702
| 0.260666
| 0
| 0.449541
| 0
| 0
| 0.097455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036697
| false
| 0
| 0.018349
| 0
| 0.091743
| 0.009174
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfad270ef93b37ed6df9bcf779f6cf41ac7ec78e
| 2,499
|
py
|
Python
|
graphtiny/service.py
|
Canicio/pyqtgraph-tiny
|
b88ebe8a2e6ad860ca4857b527adccbbde14851d
|
[
"MIT"
] | 1
|
2018-03-17T12:36:56.000Z
|
2018-03-17T12:36:56.000Z
|
graphtiny/service.py
|
Canicio/pyqtgraph-tiny
|
b88ebe8a2e6ad860ca4857b527adccbbde14851d
|
[
"MIT"
] | 1
|
2017-08-08T18:31:31.000Z
|
2017-08-08T18:31:31.000Z
|
graphtiny/service.py
|
Canicio/graphtiny
|
b88ebe8a2e6ad860ca4857b527adccbbde14851d
|
[
"MIT"
] | null | null | null |
from time import sleep
import pyqtgraph as pg
import threading
from graphtiny.api import IChart, IDataStreamWindow
from graphtiny.domain import DataStreamWindow, Chart
class FuncThread(threading.Thread):
def __init__(self, t, *a) -> None:
self._t = t
self._a = a
threading.Thread.__init__(self)
def run(self) -> None:
self._t(*self._a)
class ChartService(IChart):
def set_data_stream(self, chart: Chart, x, y) -> None:
chart.x[chart.ptr] = x
chart.y[chart.ptr] = y
chart.ptr += 1
class DataStreamWindowService(IDataStreamWindow):
def launch_window(self, window: DataStreamWindow) -> None:
calculating_thread = FuncThread(self.__raise_thread_with_window, window)
calculating_thread.start()
sleep(1)
def __raise_thread_with_window(self, window: DataStreamWindow) -> None:
window.qapp = pg.mkQApp()
window.win = pg.GraphicsWindow() # raise window!
if window.background_color:
window.win.setBackground(window.background_color)
if window.coordinate_system_color:
pg.setConfigOption('foreground', window.coordinate_system_color)
i = 0
for chart in window.charts_list:
if i % window.columns_display == 0 and i >= window.columns_display:
window.win.nextRow()
chart.plot = window.win.addPlot()
if chart.downsampling:
chart.plot.setDownsampling(mode=chart.downsampling)
if chart.clipToView:
chart.plot.setClipToView(True)
if chart.left_label:
if chart.left_label_units:
chart.plot.setLabel('left', chart.left_label, chart.left_label_units)
else:
chart.plot.setLabel('left', chart.left_label)
if chart.bottom_label:
if chart.bottom_label_units:
chart.plot.setLabel('bottom', chart.bottom_label, chart.bottom_label_units)
else:
chart.plot.setLabel('bottom', chart.bottom_label)
chart.curve = chart.plot.plot()
if chart.line_color:
chart.curve.setPen(chart.line_color)
i += 1
while window.win.isVisible():
# refresh data
for chart in window.charts_list:
chart.curve.setData(chart.x[:chart.ptr], chart.y[:chart.ptr])
window.qapp.processEvents()
| 34.232877
| 95
| 0.612645
| 283
| 2,499
| 5.222615
| 0.29682
| 0.048714
| 0.047361
| 0.018945
| 0.258457
| 0.161028
| 0.106901
| 0.05954
| 0
| 0
| 0
| 0.002828
| 0.292517
| 2,499
| 72
| 96
| 34.708333
| 0.833145
| 0.010404
| 0
| 0.071429
| 0
| 0
| 0.012151
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.089286
| false
| 0
| 0.089286
| 0
| 0.232143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfb2125e655f351b14d7a2e313cfea92c5b3d51d
| 4,629
|
py
|
Python
|
pcie_bw.py
|
pcie-bench/pcie-model
|
5bb1a71684c51f4bbbab2b9673c6bbc3dcf57b11
|
[
"Apache-2.0"
] | 30
|
2018-12-05T22:02:26.000Z
|
2022-03-13T17:09:51.000Z
|
pcie_bw.py
|
pcie-bench/pcie-model
|
5bb1a71684c51f4bbbab2b9673c6bbc3dcf57b11
|
[
"Apache-2.0"
] | null | null | null |
pcie_bw.py
|
pcie-bench/pcie-model
|
5bb1a71684c51f4bbbab2b9673c6bbc3dcf57b11
|
[
"Apache-2.0"
] | 13
|
2018-12-28T14:31:48.000Z
|
2022-02-25T11:24:36.000Z
|
#! /usr/bin/env python3
#
## Copyright (C) 2015-2018 Rolf Neugebauer. All rights reserved.
## Copyright (C) 2015 Netronome Systems, Inc. All rights reserved.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
"""A simple script to generate data for PCIe and ethernet bandwidth estimates"""
import sys
from optparse import OptionParser
from model import pcie, eth, mem_bw
# pylint: disable=too-many-locals
OUT_FILE = "pcie_bw.dat"
def main():
"""Main"""
usage = """usage: %prog [options]"""
parser = OptionParser(usage)
parser.add_option('--mps', dest='MPS', type="int", action='store',
default=256,
help='Set the maximum payload size of the link')
parser.add_option('--mrrs', dest='MRRS', type="int", action='store',
default=512,
help='Set the maximum read request size of the link')
parser.add_option('--rcb', dest='RCB', type="int", action='store',
default=64,
help='Set the read completion boundary of the link')
parser.add_option('--lanes', dest='lanes', type="string", action='store',
default='x8',
help='Set num lanes (x2, x4, x8, x16, or x32)')
parser.add_option('--gen', dest='gen', type="string", action='store',
default='gen3',
help='Set PCIe version (gen1, gen2, gen3, gen4, or gen5)')
parser.add_option('--addr', dest='addr', type="int", action='store',
default=64,
help='Set the number of address bits (32 or 64)')
parser.add_option('--ecrc', dest='ecrc', type="int", action='store',
default=0,
help='Use ECRC (0 or 1)')
parser.add_option('-o', '--outfile', dest='FILE',
default=OUT_FILE, action='store',
help='File where to write the data to')
(options, _) = parser.parse_args()
pciecfg = pcie.Cfg(version=options.gen,
lanes=options.lanes,
addr=options.addr,
ecrc=options.ecrc,
mps=options.MPS,
mrrs=options.MRRS,
rcb=options.RCB)
print("PCIe Config:")
pciecfg.pp()
ethcfg = eth.Cfg('40GigE')
tlp_bw = pciecfg.TLP_bw
bw_spec = pcie.BW_Spec(tlp_bw, tlp_bw, pcie.BW_Spec.BW_RAW)
dat = open(options.FILE, "w")
dat.write("\"Payload(Bytes)\" "
"\"PCIe Write BW\" "
"\"PCIe Write Trans/s\" "
"\"PCIe Read BW\" "
"\"PCIe Read Trans/s\" "
"\"PCIe Read/Write BW\" "
"\"PCIe Read/Write Trans/s\" "
"\"40G Ethernet BW\" "
"\"40G Ethernet PPS\" "
"\"40G Ethernet Frame time (ns)\" "
"\n")
for size in range(1, 1500 + 1):
wr_bw = mem_bw.write(pciecfg, bw_spec, size)
rd_bw = mem_bw.read(pciecfg, bw_spec, size)
rdwr_bw = mem_bw.read_write(pciecfg, bw_spec, size)
wr_trans = (wr_bw.tx_eff * 1000 * 1000 * 1000 / 8) / size
rd_trans = (rd_bw.rx_eff * 1000 * 1000 * 1000 / 8) / size
rdwr_trans = (rdwr_bw.tx_eff * 1000 * 1000 * 1000 / 8) / size
if size >= 64:
eth_bw = ethcfg.bps_ex(size) / (1000 * 1000 * 1000.0)
eth_pps = ethcfg.pps_ex(size)
eth_lat = 1.0 * 1000 * 1000 * 1000 / eth_pps
dat.write("%d %.2f %.1f %.2f %.1f %.2f %.1f %.2f %d %.2f\n" %
(size,
wr_bw.tx_eff, wr_trans,
rd_bw.rx_eff, rd_trans,
rdwr_bw.tx_eff, rdwr_trans,
eth_bw, eth_pps, eth_lat))
else:
dat.write("%d %.2f %.1f %.2f %.1f %.2f %.1f\n" %
(size,
wr_bw.tx_eff, wr_trans,
rd_bw.rx_eff, rd_trans,
rdwr_bw.tx_eff, rdwr_trans))
dat.close()
if __name__ == '__main__':
sys.exit(main())
| 38.575
| 80
| 0.534241
| 593
| 4,629
| 4.042159
| 0.333895
| 0.033375
| 0.050063
| 0.037547
| 0.239883
| 0.157697
| 0.139341
| 0.115978
| 0.095953
| 0.047559
| 0
| 0.046527
| 0.331389
| 4,629
| 119
| 81
| 38.89916
| 0.727948
| 0.168935
| 0
| 0.097561
| 0
| 0.012195
| 0.163952
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012195
| false
| 0
| 0.036585
| 0
| 0.04878
| 0.012195
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfb71ae9c49c8ec75050dd6031ca98dd54f66f9f
| 18,950
|
py
|
Python
|
BiModNeuroCNN/training/bimodal_classification.py
|
cfcooney/BiModNeuroCNN
|
f79da6150b4186bcbc15d876394f4af8a47076d0
|
[
"MIT"
] | 4
|
2020-10-31T21:20:12.000Z
|
2022-01-05T16:13:07.000Z
|
BiModNeuroCNN/training/bimodal_classification.py
|
cfcooney/BiModNeuroCNN
|
f79da6150b4186bcbc15d876394f4af8a47076d0
|
[
"MIT"
] | null | null | null |
BiModNeuroCNN/training/bimodal_classification.py
|
cfcooney/BiModNeuroCNN
|
f79da6150b4186bcbc15d876394f4af8a47076d0
|
[
"MIT"
] | null | null | null |
"""
Description: Class for training CNNs using a nested cross-validation method. Train on the inner_fold to obtain
optimized hyperparameters. Train outer_fold to obtain classification performance.
"""
from braindecode.datautil.iterators import BalancedBatchSizeIterator
from braindecode.experiments.stopcriteria import MaxEpochs, NoDecrease, Or
from braindecode.torch_ext.util import set_random_seeds, np_to_var, var_to_np
from braindecode.datautil.signal_target import SignalAndTarget
from braindecode.torch_ext.functions import square, safe_log
import torch as th
from sklearn.model_selection import train_test_split
from BiModNeuroCNN.training.training_utils import current_acc, current_loss
from BiModNeuroCNN.data_loader.data_utils import smote_augmentation, multi_SignalAndTarget
from BiModNeuroCNN.results.results import Results as res
from torch.nn.functional import nll_loss, cross_entropy
from BiModNeuroCNN.training.bimodal_training import Experiment
import numpy as np
import itertools as it
import torch
from torch import optim
import logging
from ast import literal_eval
from BiModNeuroCNN.results.metrics import cross_entropy
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
log = logging.getLogger(__name__)
torch.backends.cudnn.deterministic = True
class Classification:
def __init__(self, model, subnet1_params, subnet2_params, hyp_params, parameters, data_params, model_save_path, tag):
self.model = model
self.subnet1_params = subnet1_params
self.subnet2_params = subnet2_params
self.model_save_path = model_save_path
self.tag = tag
self.best_loss = parameters["best_loss"]
self.batch_size = parameters["batch_size"]
self.monitors = parameters["monitors"]
self.cuda = parameters["cuda"]
self.model_constraint = parameters["model_constraint"]
self.max_increase_epochs = parameters['max_increase_epochs']
self.lr_scheduler = parameters['learning_rate_scheduler']
self.lr_step = parameters['lr_step']
self.lr_gamma = parameters['lr_gamma']
self.n_classes = data_params["n_classes"]
self.n_chans_d1 = data_params["n_chans_d1"]
self.input_time_length_d1= data_params["input_time_length_d1"]
self.n_chans_d2 = data_params["n_chans_d2"]
self.input_time_length_d2 = data_params["input_time_length_d2"]
self.hyp_params = hyp_params
self.activation = "elu"
self.learning_rate = 0.001
self.dropout = 0.1
self.epochs = parameters['epochs']
self.window = None
self.structure = 'deep'
self.n_filts = 10 #n_filts in n-1 filters
self.first_pool = False
self.loss = nll_loss
for key in hyp_params:
setattr(self, key, hyp_params[key])
self.iterator = BalancedBatchSizeIterator(batch_size=self.batch_size)
self.best_params = None
self.model_number = 1
self.y_pred = np.array([])
self.y_true = np.array([])
self.probabilities = np.array([])
def call_model(self):
self.subnet1_params['structure'] = self.structure
self.subnet2_params['structure'] = self.structure
if self.model.__name__ == 'BiModalNet':
model = self.model(n_classes=self.n_classes, in_chans_1=self.n_chans_d1, input_time_1=self.input_time_length_d1,
SubNet_1_params=self.subnet1_params, in_chans_2=self.n_chans_d2,
input_time_2=self.input_time_length_d2, SubNet_2_params=self.subnet2_params,
linear_dims=100, drop_prob=.2, nonlin=torch.nn.functional.leaky_relu,
fc1_out_features=500, fc2_out_features=500, gru_hidden_size=250, gru_n_layers=1)
th.nn.init.kaiming_uniform_(model.fused_linear.weight)
th.nn.init.constant_(model.fused_linear.bias, 0)
elif self.model.__name__ == 'BiModalNet_w_Pool':
model = self.model(n_classes=self.n_classes, in_chans_1=self.n_chans_d1, input_time_1=self.input_time_length_d1,
SubNet_1_params=self.subnet1_params, in_chans_2=self.n_chans_d2,
input_time_2=self.input_time_length_d2, SubNet_2_params=self.subnet2_params,
linear_dims=100, drop_prob=.2, nonlin=torch.nn.functional.leaky_relu,
fc1_out_features=500, fc2_out_features=500, gru_hidden_size=250, gru_n_layers=1)
th.nn.init.kaiming_uniform_(model.fused_linear.weight)
th.nn.init.constant_(model.fused_linear.bias, 0)
return model
def train_model(self, train_set_1, val_set_1, test_set_1, train_set_2, val_set_2, test_set_2, save_model):
"""
:param train_set_1: (np.array) n_trials*n_channels*n_samples
:param val_set_1: (np.array) n_trials*n_channels*n_samples
:param test_set_1: (np.array) n_trials*n_channels*n_samples - can be None when training on inner-fold
:param train_set_2: (np.array) n_trials*n_channels*n_samples
:param val_set_2: (np.array) n_trials*n_channels*n_samples
:param test_set_2: (np.array) n_trials*n_channels*n_samples - can be None when training on inner-fold
:param save_model: (Bool) True if trained model is to be saved
:return: Accuracy and loss scores for the model trained with a given set of hyper-parameters
"""
model = self.call_model()
predictions = None
set_random_seeds(seed=20190629, cuda=self.cuda)
if self.cuda:
model.cuda()
torch.backends.cudnn.deterministic = True
model = torch.nn.DataParallel(model)
log.info(f"Cuda in use")
log.info("%s model: ".format(str(model)))
optimizer = optim.Adam(model.parameters(), lr=self.learning_rate, weight_decay=0.01, eps=1e-8, amsgrad=False)
stop_criterion = Or([MaxEpochs(self.epochs),
NoDecrease('valid_loss', self.max_increase_epochs)])
model_loss_function = None
#####Setup to run the selected model#####
model_test = Experiment(model, train_set_1, val_set_1, train_set_2, val_set_2, test_set_1=test_set_1, test_set_2=test_set_2,
iterator=self.iterator, loss_function=self.loss, optimizer=optimizer,
lr_scheduler=self.lr_scheduler(optimizer, step_size=self.lr_step, gamma=self.lr_gamma),
model_constraint=self.model_constraint, monitors=self.monitors, stop_criterion=stop_criterion,
remember_best_column='valid_misclass', run_after_early_stop=True, model_loss_function=model_loss_function,
cuda=self.cuda, save_file=self.model_save_path, tag=self.tag, save_model=save_model)
model_test.run()
model_acc = model_test.epochs_df['valid_misclass'].astype('float')
model_loss = model_test.epochs_df['valid_loss'].astype('float')
current_val_acc = 1 - current_acc(model_acc)
current_val_loss = current_loss(model_loss)
test_accuracy = None
if train_set_1 is not None and test_set_2 is not None:
val_metric_index = self.get_model_index(model_test.epochs_df)
test_accuracy = round((1 - model_test.epochs_df['test_misclass'].iloc[val_metric_index]) * 100, 3)
predictions = model_test.model_predictions
probabilities = model_test.model_probabilities
return current_val_acc, current_val_loss, test_accuracy, model_test, predictions, probabilities
def train_inner(self, train_set_1, val_set_1, train_set_2, val_set_2, test_set_1=None, test_set_2=None, augment=False, save_model=False):
"""
:param train_set_1: (np.array) n_trials*n_channels*n_samples
:param val_set_1: (np.array) n_trials*n_channels*n_samples
:param test_set_1: (np.array) n_trials*n_channels*n_samples - can be None when performing HP optimization
:param train_set_2: (np.array) n_trials*n_channels*n_samples
:param val_set_2: (np.array) n_trials*n_channels*n_samples
:param test_set_2: (np.array) n_trials*n_channels*n_samples - can be None when performing HP optimization
:param augment: (Bool) True if data augmentation to be applied - currently only configured for SMOTE augmentation
:param save_model: (Bool) True if trained model is to be saved
:return: Accuracy, loss and cross entropy scores for the model trained with a given set of hyper-parameters
"""
val_acc, val_loss, val_cross_entropy = [], [], []
if augment:
# Only augment training data - never test or validation sets
train_set_1_os, train_labels_1_os = smote_augmentation(train_set_1.X, train_set_1.y, 2)
train_set_2_os, train_labels_2_os = smote_augmentation(train_set_1.X, train_set_1.y, 2)
train_set_1, train_set_2 = multi_SignalAndTarget((train_set_1_os, train_labels_1_os), (train_set_2_os, train_labels_2_os))
names = list(self.hyp_params.keys())
hyp_param_combs = it.product(*(self.hyp_params[Name] for Name in names))
for hyp_combination in hyp_param_combs:
assert len(hyp_combination) == len(self.hyp_params), f"HP combination must be of equal length to original set."
for i in range(len(self.hyp_params)):
setattr(self, list(self.hyp_params.keys())[i], hyp_combination[i])
if 'window' in self.hyp_params.keys():
# when using classification window as a hyperparameter - currently data would have to be of same number of samples
train_set_1_w = SignalAndTarget(train_set_1.X[:, :, self.window[0]:self.window[1]], train_set_1.y)
val_set_1_w = SignalAndTarget(val_set_1.X[:, :, self.window[0]:self.window[1]], val_set_1.y)
train_set_2_w = SignalAndTarget(train_set_2.X[:, :, self.window[0]:self.window[1]], train_set_2.y)
val_set_2_w = SignalAndTarget(val_set_2.X[:, :, self.window[0]:self.window[1]], val_set_2.y)
current_val_acc, current_val_loss, _, _, _, probabilities = self.train_model(train_set_1_w, val_set_1_w, test_set_1, train_set_2_w,
val_set_2_w, test_set_2, save_model)
else:
current_val_acc, current_val_loss, _, _, _, probabilities = self.train_model(train_set_1, val_set_1, test_set_1, train_set_2,
val_set_2, test_set_2, save_model)
val_acc.append(current_val_acc)
val_loss.append(current_val_loss)
probabilities = np.array(probabilities).reshape((val_set_1.y.shape[0],4))
val_cross_entropy.append(cross_entropy(val_set_1.y, probabilities)) #1 CE value per-HP, repeat for n_folds
return val_acc, val_loss, val_cross_entropy
def train_outer(self, trainsetlist, testsetlist, augment=False, save_model=True, epochs_save_path=None, print_details=False):
"""
:param trainsetlist: (list) data as split by k-folds n_folds*(n_trials*n_channels*n_samples)
:param testsetlist: (list) data as split by k-folds n_folds*(n_trials*n_channels*n_samples)
:param augment: (Bool) True if data augmentation to be applied - currently only configured for SMOTE augmentation
:param save_model: (Bool) True if trained model is to be saved
"""
scores, all_preds, probabilities_list, outer_cross_entropy, fold_models = [],[],[],[],[]
fold_number = 1
for train_set, test_set in zip(trainsetlist, testsetlist):
train_set_1, train_set_2 = train_set[0], train_set[1]
test_set_1, test_set_2 = test_set[0], test_set[1]
train_set_1_X, val_set_1_X, train_set_1_y, val_set_1_y = train_test_split(train_set_1.X, train_set_1.y, test_size=0.2,
shuffle=True, random_state=42, stratify= train_set_1.y)
train_set_2_X, val_set_2_X, train_set_2_y, val_set_2_y = train_test_split(train_set_2.X, train_set_2.y, test_size=0.2,
shuffle=True, random_state=42, stratify= train_set_2.y)
train_set_1, val_set_1, train_set_2, val_set_2 = multi_SignalAndTarget((train_set_1_X, train_set_1_y), (val_set_1_X, val_set_1_y),
(train_set_2_X, train_set_2_y), (val_set_2_X, val_set_2_y))
if augment:
# Only augment training data - never test or validation sets
train_set_1_os, train_labels_1_os = smote_augmentation(train_set_1.X, train_set_1.y, 2)
train_set_2_os, train_labels_2_os = smote_augmentation(train_set_2.X, train_set_2.y, 2)
train_set_1 = SignalAndTarget(train_set_1_os, train_labels_1_os)
train_set_2 = SignalAndTarget(train_set_2_os, train_labels_2_os)
print(train_set_1.X.shape)
if 'window' in self.hyp_params.keys():
# when using classification window as a hyperparameter - currently data would have to be of same number of samples
if type(self.window) == str:
self.window = literal_eval(self.window) # extract tuple of indices
train_set_1_w = SignalAndTarget(train_set_1.X[:,:,self.window[0]:self.window[1]], train_set_1.y)
val_set_1_w = SignalAndTarget(val_set_1.X[:,:,self.window[0]:self.window[1]], val_set_1.y)
test_set_1_w = SignalAndTarget(test_set_1.X[:,:,self.window[0]:self.window[1]], test_set_1.y)
train_set_2_w = SignalAndTarget(train_set_2.X[:,:,self.window[0]:self.window[1]], train_set_2.y)
val_set_2_w = SignalAndTarget(val_set_2.X[:,:,self.window[0]:self.window[1]], val_set_2.y)
test_set_2_w = SignalAndTarget(test_set_2.X[:, :, self.window[0]:self.window[1]], test_set_2.y)
_, _, test_accuracy, optimised_model, predictions, probabilities = self.train_model(train_set_1_w, val_set_1_w, test_set_1_w,
train_set_2_w, val_set_2_w, test_set_2_w, save_model)
if print_details:
print(f"Data 1 train set: {train_set_1.y.shape} | Data 1 val_set: {val_set_1.y.shape} | Data 1 test_set: {test_set_1.y.shape}")
print(f"Data 2 train set: {train_set_2.y.shape} | Data 2 val_set: {val_set_2.y.shape} | Data 2 test_set: {test_set_2.y.shape}")
else:
_, _, test_accuracy, optimised_model, predictions, probabilities = self.train_model(train_set_1, val_set_1, test_set_1,
train_set_2, val_set_2, test_set_2, save_model)
if epochs_save_path != None:
try:
optimised_model.epochs_df.to_excel(f"{epochs_save_path}/epochs{fold_number}.xlsx")
except FileNotFoundError:
optimised_model.epochs_df.to_excel(f"{epochs_save_path}/epochs{fold_number}.xlsx", engine='xlsxwriter')
fold_models.append(optimised_model)
probs_array = []
for lst in probabilities:
for trial in lst:
probs_array.append(trial) # all probabilities for this test-set
probabilities_list.append(probs_array) #outer probabilities to be used for cross-entropy
print(f"/"*20)
scores.append(test_accuracy)
self.concat_y_pred(predictions)
self.concat_y_true(test_set_1.y)
fold_number += 1
for y_true, y_probs in zip(testsetlist, probabilities_list):
outer_cross_entropy.append(cross_entropy(y_true[0].y, y_probs))
return scores, fold_models, self.y_pred, probabilities_list, outer_cross_entropy, self.y_true
def set_best_params(self):
"""
Set optimal hyperparameter values selected from optimization - Best parameter values can be
accessed with BiModNeuroCNN.results.Results.get_best_params() and the list assigned to self.best_params.
"""
assert type(self.best_params) is list, "list of selected parameters required"
for i in range(len(self.hyp_params)):
setattr(self, list(self.hyp_params.keys())[i], self.best_params[i])
@staticmethod
def get_model_index(df):
"""
Returns the row index of a pandas dataframe used for storing epoch-by-epoch results.
:param df: pandas.DataFrame
:return: int index of the selected epoch based on validation metric
"""
valid_metric_index = df['valid_misclass'].idxmin()
best_val_acc = df.index[df['valid_misclass'] == df['valid_misclass'].iloc[valid_metric_index]]
previous_best = 1.0
i = 0
for n, index in enumerate(best_val_acc):
value = df['test_misclass'][index]
if value < previous_best:
previous_best = value
i = n
return best_val_acc[i]
def concat_y_pred(self, y_pred_fold):
"""
Method for combining all outer-fold ground-truth values.
:param y_pred_fold: array of single-fold true values.
:return: all outer fold true values in single arrau
"""
self.y_pred = np.concatenate((self.y_pred, np.array(y_pred_fold)))
def concat_y_true(self, y_true_fold):
"""
Method for combining all outer-fold ground-truth values.
:param y_true_fold: array of single-fold true values.
:return: all outer fold true values in single arrau
"""
self.y_true = np.concatenate((self.y_true, np.array(y_true_fold)))
def concat_probabilities(self, probabilities_fold):
"""
Method for combining all outer-fold ground-truth values.
:param y_pred_fold: array of single-fold true values.
:return: all outer fold true values in single arrau
"""
self.probabilities = np.concatenate((self.probabilities, probabilities_fold))
| 57.95107
| 154
| 0.644063
| 2,615
| 18,950
| 4.327342
| 0.130402
| 0.028279
| 0.031813
| 0.019795
| 0.510339
| 0.459791
| 0.445564
| 0.435136
| 0.426211
| 0.420467
| 0
| 0.02253
| 0.269235
| 18,950
| 326
| 155
| 58.128834
| 0.794627
| 0.174354
| 0
| 0.156682
| 0
| 0.009217
| 0.054925
| 0.010139
| 0
| 0
| 0
| 0
| 0.009217
| 1
| 0.046083
| false
| 0
| 0.092166
| 0
| 0.165899
| 0.02765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfb9067db6876e985a83eb3d9d6219b06ce32b30
| 1,197
|
py
|
Python
|
setup.py
|
adadesions/sfcpy
|
d395218ae9f72fed378c30ad604923373b7fbf3f
|
[
"MIT"
] | 2
|
2019-08-28T19:30:32.000Z
|
2020-03-28T16:17:01.000Z
|
setup.py
|
adadesions/sfcpy
|
d395218ae9f72fed378c30ad604923373b7fbf3f
|
[
"MIT"
] | 5
|
2021-03-18T22:53:57.000Z
|
2022-03-11T23:42:38.000Z
|
setup.py
|
adadesions/sfcpy
|
d395218ae9f72fed378c30ad604923373b7fbf3f
|
[
"MIT"
] | null | null | null |
"""Setup script for sfcpy"""
import os.path
from setuptools import setup
# The directory containing this file
HERE = os.path.abspath(os.path.dirname(__file__))
# The text of the README file
with open(os.path.join(HERE, "README.md"), encoding='utf-8') as fid:
README = fid.read()
# This call to setup() does all the work
setup(
name="sfcpy",
version="1.2.3",
description="Space-Filling Curve library for image-processing tasks",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/adadesions/sfcpy",
author="adadesions",
author_email="adadesions@gmail.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
packages=["sfcpy"],
include_package_data=True,
tests_require=['pytest'],
install_requires=[
"numpy", "matplotlib", "Pillow"
],
entry_points={"console_scripts": ["sfcpy=sfcpy.__main__:main"]},
)
| 29.925
| 73
| 0.652464
| 144
| 1,197
| 5.291667
| 0.618056
| 0.124672
| 0.164042
| 0.136483
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011482
| 0.199666
| 1,197
| 39
| 74
| 30.692308
| 0.783925
| 0.104428
| 0
| 0.064516
| 0
| 0
| 0.418233
| 0.023496
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfb94390c72e2b9eb210dfba78b3240cd00784e2
| 7,921
|
py
|
Python
|
make_DigitalCommons_spreadsheet.py
|
lsulibraries/CWBR_DigitalCommons
|
6eb994d08d6de088075cde82f6dc2b3aed15bdda
|
[
"Apache-2.0"
] | null | null | null |
make_DigitalCommons_spreadsheet.py
|
lsulibraries/CWBR_DigitalCommons
|
6eb994d08d6de088075cde82f6dc2b3aed15bdda
|
[
"Apache-2.0"
] | null | null | null |
make_DigitalCommons_spreadsheet.py
|
lsulibraries/CWBR_DigitalCommons
|
6eb994d08d6de088075cde82f6dc2b3aed15bdda
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import csv
import os
from collections import namedtuple
import string
from nameparser import HumanName
def csv_to_dict(filename):
file_dict = dict()
with open(filename, 'r', newline='', encoding='utf-8') as csvfile:
csvreader = csv.reader(csvfile, delimiter='\t', quotechar='"')
headers = next(csvreader)
CWBR = namedtuple('CWBR', headers)
for row in csvreader:
item = CWBR(*row)
if file_dict.get(item.ID):
print('**** Two examples of {} in these spreadsheets ****'.format(item.ID))
exit()
file_dict[item.ID] = item
return file_dict
def make_paragraphs_text(issue):
return '\n\t'.join([i for i in
issue.Review.replace('<br>', '<p>')
.replace('</br>', '</p>')
.replace('</p>', '')
.split('<p>')
if i])
def make_announcement_block(issue):
record_type = issue.Record_type
if record_type.lower() == 'classics':
return 'Feature Essay'
elif record_type.lower() in ('interview', 'editorial', 'review', ):
return record_type
def format_title_parts(string_segment):
string_segment = string.capwords(string_segment, ' ')
string_segment = string_segment.lstrip().replace("'S ", "'s ").replace('’', "'")
string_segment = string_segment.replace('“', '"')
string_segment = string_segment.replace('</p>', '').replace('<p>', '')
return string_segment
def make_title_block(issue):
title_parts, subtitle_parts = find_title_lines(issue)
title_string = ''.join([format_title_parts(title_part)
for title_part in title_parts if title_part])
subtitle_string = ''.join([format_title_parts(subtitle_part)
for subtitle_part in subtitle_parts if subtitle_part])
if title_string and subtitle_string:
return ': '.join([title_string, subtitle_string])
else:
return ''.join([title_string, subtitle_string])
def pull_title_from_Title(issue):
title = strip_bolds_breaks(issue.Title).replace('EDITORIAL:', '').replace('INTERVIEW:', '')
title_parts = [item for item in title.split('<p>') if item]
subtitle_parts = ''
return title_parts, subtitle_parts
def pull_title_from_Headline(issue):
title_parts = [item for item in issue.Headline.split('<p>') if item]
subtitle_parts = [item for item in issue.Sub_headline.split('<p>') if item]
return title_parts, subtitle_parts
def find_title_lines(issue):
if issue.Record_type not in ('Editorial', 'Interview'):
title_parts, subtitle_parts = pull_title_from_Headline(issue)
else:
title_parts, subtitle_parts = pull_title_from_Title(issue)
if not (title_parts or subtitle_parts):
title_parts, subtitle_parts = pull_title_from_Title(issue)
return title_parts, subtitle_parts
def strip_bolds_breaks(text):
for i in ('<br>', '</br>', '<BR>', '</BR>', '<b>', '</b>', '<B>', '</B>', ):
text = text.replace(i, '')
return text
def pick_authors(issue):
author_list = []
if issue.Record_type not in ('Review', 'Classics'):
for author in (issue.Auth_1, issue.Auth_2, issue.Auth_3):
if author:
author = author.replace('<br>', '<p>').replace('</br>', '</p>')
author_list.append(author)
return author_list
else:
if issue.Reviewer:
author_list.append(issue.Reviewer)
return author_list
def parse_name(name):
parsed_name = HumanName(name)
first = parsed_name.first
middle = parsed_name.middle
last = parsed_name.last
suffix = parsed_name.suffix
return (first, middle, last, suffix)
def reformat_issue_type(issue_type):
internal_external_dict = {'Editorial': 'editorial',
'Classics': 'feature_essay',
'Interview': 'author_interview',
'Review': 'review',
}
return internal_external_dict[issue_type]
def make_publication_date(issue_date):
season, year = issue_date.split(' ')
seasons_month_dict = {'Spring': '03',
'Summer': '06',
'Fall': '09',
'Winter': '12'}
month = seasons_month_dict[season]
return '{}-{}-01'.format(year, month)
def make_season(issue_date):
return issue_date.split(' ')[0]
def make_url(issue_id):
return 'https://s3-us-west-2.amazonaws.com/cwbr-publicshare/{}.pdf'.format(issue_id)
def make_csv_data(issues_dict):
csv_data = []
csv_data.append(['title',
'book_id',
'fulltext_url',
'isbn',
'price',
'publication_date',
'season',
'document_type',
'publisher',
'book_pub_date',
'author1_fname',
'author1_mname',
'author1_lname',
'author1_suffix',
'author2_fname',
'author2_mname',
'author2_lname',
'author2_suffix',
'author3_fname',
'author3_mname',
'author3_lname',
'author3_suffix',
'abstract',
])
for k, issue in sorted(issues_dict.items()):
authors_list = pick_authors(issue)
author1_fname, author1_mname, author1_lname, author1_suffix = '', '', '', ''
author2_fname, author2_mname, author2_lname, author2_suffix = '', '', '', ''
author3_fname, author3_mname, author3_lname, author3_suffix = '', '', '', ''
if authors_list:
author1_fname, author1_mname, author1_lname, author1_suffix = parse_name(authors_list[0])
if len(authors_list) > 1:
author2_fname, author2_mname, author2_lname, author2_suffix = parse_name(authors_list[1])
if len(authors_list) > 2:
author3_fname, author3_mname, author3_lname, author3_suffix = parse_name(authors_list[2])
csv_data.append([make_title_block(issue),
issue.ID,
make_url(issue.ID),
issue.ISBN,
issue.Price,
make_publication_date(issue.Issue_date),
make_season(issue.Issue_date),
reformat_issue_type(issue.Record_type),
issue.Publisher,
issue.Pub_date,
author1_fname,
author1_mname,
author1_lname,
author1_suffix,
author2_fname,
author2_mname,
author2_lname,
author2_suffix,
author3_fname,
author3_mname,
author3_lname,
author3_suffix,
make_paragraphs_text(issue),
])
csv_writer(csv_data)
def csv_writer(data):
output_dir = 'uploadSpreadsheet'
os.makedirs(output_dir, exist_ok=True)
with open('uploadSpreadsheet/DigitalCommonsSpreadsheet.csv', "w", newline='', encoding='utf-8') as csv_file:
writer = csv.writer(csv_file, delimiter='\t', quotechar='"')
for line in data:
writer.writerow(line)
if __name__ == '__main__':
issues_dict = csv_to_dict('3rdStageSourceCSVs/Interviews.csv')
make_csv_data(issues_dict)
| 36.004545
| 112
| 0.550562
| 826
| 7,921
| 4.986683
| 0.211864
| 0.033989
| 0.03496
| 0.039087
| 0.369507
| 0.284535
| 0.177227
| 0.168488
| 0.132799
| 0.110464
| 0
| 0.013849
| 0.334554
| 7,921
| 219
| 113
| 36.16895
| 0.767596
| 0.002651
| 0
| 0.067416
| 0
| 0
| 0.102418
| 0.010128
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095506
| false
| 0
| 0.02809
| 0.016854
| 0.224719
| 0.005618
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfb9a74f5e09588db5c20e479a0c85f0735ce76b
| 7,524
|
py
|
Python
|
pip_services3_redis/cache/RedisCache.py
|
pip-services-python/pip-services-redis-python
|
ecb2e667ab266af0274b0891a19e802cb256766a
|
[
"MIT"
] | null | null | null |
pip_services3_redis/cache/RedisCache.py
|
pip-services-python/pip-services-redis-python
|
ecb2e667ab266af0274b0891a19e802cb256766a
|
[
"MIT"
] | null | null | null |
pip_services3_redis/cache/RedisCache.py
|
pip-services-python/pip-services-redis-python
|
ecb2e667ab266af0274b0891a19e802cb256766a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from typing import Optional, Any
import redis
from pip_services3_commons.config import IConfigurable, ConfigParams
from pip_services3_commons.errors import ConfigException, InvalidStateException
from pip_services3_commons.refer import IReferenceable, IReferences
from pip_services3_commons.run import IOpenable
from pip_services3_components.auth import CredentialResolver
from pip_services3_components.cache import ICache
from pip_services3_components.connect import ConnectionResolver
class RedisCache(ICache, IConfigurable, IReferenceable, IOpenable):
"""
Distributed cache that stores values in Redis in-memory database.
### Configuration parameters ###
- connection(s):
- discovery_key: (optional) a key to retrieve the connection from :class:`IDiscovery <pip_services3_components.connect.IDiscovery.IDiscovery>`
- host: host name or IP address
- port: port number
- uri: resource URI or connection string with all parameters in it
- credential(s):
- store_key: key to retrieve parameters from credential store
- username: user name (currently is not used)
- password: user password
- options:
- retries: number of retries (default: 3)
- timeout: default caching timeout in milliseconds (default: 1 minute)
- max_size: maximum number of values stored in this cache (default: 1000)
### References ###
- `*:discovery:*:*:1.0` (optional) :class:`IDiscovery <pip_services3_components.connect.IDiscovery.IDiscovery>` services to resolve connection
- `*:credential-store:*:*:1.0` (optional) Credential stores to resolve credential
Example:
.. code-block:: python
cache = RedisCache()
cache.configure(ConfigParams.from_tuples(
"host", "localhost",
"port", 6379
))
cache.open("123")
cache.store("123", "key1", "ABC", None)
value = cache.retrieve("123", "key1") # Result: "ABC"
"""
def __init__(self):
"""
Creates a new instance of this cache
"""
self.__connection_resolver: ConnectionResolver = ConnectionResolver()
self.__credential_resolver: CredentialResolver = CredentialResolver()
self.__timeout: int = 30000
self.__retries: int = 3
self.__client: redis.Redis = None
def configure(self, config: ConfigParams):
"""
Configures component by passing configuration parameters.
:param config: configuration parameters to be set.
"""
self.__connection_resolver.configure(config)
self.__credential_resolver.configure(config)
self.__timeout = config.get_as_integer_with_default('options.timeout', self.__timeout)
self.__retries = config.get_as_integer_with_default('options.retries', self.__retries)
def set_references(self, references: IReferences):
"""
Sets references to dependent components.
:param references: references to locate the component dependencies.
"""
self.__connection_resolver.set_references(references)
self.__connection_resolver.set_references(references)
def is_open(self) -> bool:
"""
Checks if the component is opened.
:return: true if the component has been opened and false otherwise.
"""
return self.__client is not None
def open(self, correlation_id: Optional[str]):
"""
Opens the component.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
connection = self.__connection_resolver.resolve(correlation_id)
if connection is None:
raise ConfigException(
correlation_id,
'NO_CONNECTION',
'Connection is not configured'
)
credential = self.__credential_resolver.lookup(correlation_id)
options = {
# connect_timeout: self.__timeout,
# max_attempts: self.__retries,
'retry_on_timeout': True,
# 'retry_strategy': lambda options: self.__retry_strategy(options) # TODO add reconnect callback
}
if connection.get_uri():
options['url'] = connection.get_uri()
else:
options['host'] = connection.get_host() or 'localhost'
options['port'] = connection.get_port() or 6379
if credential is not None:
options['password'] = credential.get_password()
self.__client = redis.Redis(**options)
def close(self, correlation_id: Optional[str]):
"""
Closes component and frees used resources.
:param correlation_id: (optional) transaction id to trace execution through call chain.
"""
if self.__client is None: return
self.__client.close()
self.__client = None
def __check_opened(self, correlation_id: Optional[str]):
if not self.is_open():
raise InvalidStateException(
correlation_id,
'NOT_OPENED',
'Connection is not opened'
)
def __retry_strategy(self, options: dict) -> Any:
if options['error'] and options['error']['code'] == 'ECONNREFUSED':
# End reconnecting on a specific error and flush all commands with
# a individual error
return Exception('The server refused the connection')
if options['total_retry_time'] > self.__timeout:
# End reconnecting after a specific timeout and flush all commands
# with a individual error
return Exception('Retry time exhausted')
if options['attempt'] > self.__retries:
# End reconnecting with built in error
return None
return min(int(options['attempt']) * 100, 3000)
def retrieve(self, correlation_id: Optional[str], key: str) -> Any:
"""
Retrieves cached value from the cache using its key.
If value is missing in the cache or expired it returns `None`.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param key: a unique value key.
:return: a retrieve cached value or `None` if nothing was found.
"""
self.__check_opened(correlation_id)
return self.__client.get(key)
def store(self, correlation_id: Optional[str], key: str, value: Any, timeout: int) -> Any:
"""
Stores value in the cache with expiration time.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param key: a unique value key.
:param value: a value to store.
:param timeout: expiration timeout in milliseconds.
:return: the stored value.
"""
self.__check_opened(correlation_id)
return self.__client.set(name=key, value=value, px=timeout)
def remove(self, correlation_id: Optional[str], key: str) -> Any:
"""
Removes a value from the cache by its key.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param key: a unique value key.
:return: the removed value.
"""
self.__check_opened(correlation_id)
return self.__client.delete(key)
| 37.064039
| 158
| 0.636895
| 819
| 7,524
| 5.666667
| 0.25641
| 0.05042
| 0.049774
| 0.032321
| 0.25167
| 0.23357
| 0.214178
| 0.191338
| 0.138332
| 0.117216
| 0
| 0.009568
| 0.277645
| 7,524
| 202
| 159
| 37.247525
| 0.844342
| 0.427432
| 0
| 0.094595
| 0
| 0
| 0.067328
| 0
| 0
| 0
| 0
| 0.004951
| 0
| 1
| 0.148649
| false
| 0.013514
| 0.121622
| 0
| 0.391892
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfbade8328cd7332030b49fd40ed470582f05c91
| 7,392
|
py
|
Python
|
main/model/property.py
|
lipis/gae-init-magic
|
6b1e0b50f8e5200cb2dacebca9ac65e796b241a9
|
[
"MIT"
] | 1
|
2018-10-26T13:33:20.000Z
|
2018-10-26T13:33:20.000Z
|
main/model/property.py
|
lipis/gae-init-magic
|
6b1e0b50f8e5200cb2dacebca9ac65e796b241a9
|
[
"MIT"
] | 652
|
2018-10-26T12:28:08.000Z
|
2021-08-02T09:13:48.000Z
|
main/model/property.py
|
lipis/gae-init-magic
|
6b1e0b50f8e5200cb2dacebca9ac65e796b241a9
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from __future__ import absolute_import
from google.appengine.ext import ndb
from api import fields
import model
import util
class Property(model.Base):
name = ndb.StringProperty(required=True)
rank = ndb.IntegerProperty(default=0)
verbose_name = ndb.StringProperty(default='')
show_on_view = ndb.BooleanProperty(default=True, verbose_name='Show on View')
show_on_update = ndb.BooleanProperty(default=True, verbose_name='Show on Update')
show_on_list = ndb.BooleanProperty(default=True, verbose_name='Show on List')
show_on_admin_update = ndb.BooleanProperty(default=True, verbose_name='Show on Admin Update')
show_on_admin_list = ndb.BooleanProperty(default=True, verbose_name='Show on Admin List')
ndb_property = ndb.StringProperty(default='', verbose_name='NDB Property')
kind = ndb.StringProperty()
default = ndb.StringProperty()
required = ndb.BooleanProperty(default=False)
repeated = ndb.BooleanProperty(default=False)
tags = ndb.BooleanProperty(default=False)
indexed = ndb.BooleanProperty(default=True)
auto_now = ndb.BooleanProperty(default=False)
auto_now_add = ndb.BooleanProperty(default=False)
compressed = ndb.BooleanProperty(default=False)
ndb_choices = ndb.StringProperty(verbose_name='Choices')
field_property = ndb.StringProperty(default='')
wtf_property = ndb.StringProperty(default='', verbose_name='WTF Property')
description = ndb.StringProperty(default='')
strip_filter = ndb.BooleanProperty(default=False)
email_filter = ndb.BooleanProperty(default=False)
sort_filter = ndb.BooleanProperty(default=False)
choices = ndb.StringProperty()
forms_property = ndb.StringProperty(default='')
placeholder = ndb.StringProperty(default='')
autofocus = ndb.BooleanProperty(default=False)
readonly = ndb.BooleanProperty(default=False)
def ndb_field(self, include_babel=False):
args = [
'kind=%s' % self.kind if self.kind else '',
'default=%s' % self.default if self.default else '',
'required=True' if self.required else '',
'repeated=%s' % self.repeated if self.repeated else '',
'indexed=False' if not self.indexed else '',
'compressed=True' if self.compressed else '',
'choices=[%s]' % self.ndb_choices if self.ndb_choices else '',
]
if include_babel:
args.append("verbose_name=_(u'%s')" % self.verbose_name_)
else:
args.append("verbose_name=u'%s'" % self.verbose_name if self.verbose_name else '')
return '%s = %s(%s)' % (
self.name,
self.ndb_property,
', '.join([arg for arg in args if arg]),
)
@ndb.ComputedProperty
def api_field(self):
if not self.field_property:
return ''
if self.repeated:
return "'%s': fields.List(%s)," % (self.name, self.field_property)
return "'%s': %s," % (self.name, self.field_property)
@ndb.ComputedProperty
def wtf_field(self):
validators = ['wtforms.validators.%s()' % ('required' if self.required else 'optional')]
if self.ndb_property == 'ndb.StringProperty' and self.wtf_property in ['wtforms.TextAreaField', 'wtforms.StringField']:
validators.append('wtforms.validators.length(max=500)')
filters = [
'util.strip_filter' if self.strip_filter else '',
'util.email_filter' if self.email_filter else '',
'util.sort_filter' if self.sort_filter else '',
]
filters = [f for f in filters if f]
filters = ' filters=[%s],\n' % ', '.join(filters) if filters else ''
description = " description='%s',\n" % self.description if self.description else ''
choices = ''
if self.wtf_property in ['wtforms.RadioField', 'wtforms.SelectField', 'wtforms.SelectMultipleField']:
choices = ' choices=%s,\n' % (self.choices if self.choices else '[]')
date_format = ''
if self.wtf_property == 'wtforms.DateTimeField':
date_format = " format='%Y-%m-%dT%H:%M',\n"
title = '%r' % self.verbose_name_
if self.ndb_property:
title = 'model.%s.%s._verbose_name' % (self.key.parent().get().name, self.name)
if self.wtf_property == 'wtforms.GeoPtField':
validators += ['wtforms.validators.NumberRange(min=-90, max=90)']
validatorss = '[%s]' % ', '.join(validators)
lat = (
'%s_lat = wtforms.FloatField(\n'
' %s,\n'
' %s,\n%s%s%s%s'
' )'
% (self.name, title + " + ' Latitude'", validatorss, filters, choices, description, date_format))
validators.pop()
validators += ['wtforms.validators.NumberRange(min=-180, max=180)']
validatorss = '[%s]' % ', '.join(validators)
lon = (
'\n %s_lon = wtforms.FloatField(\n'
' %s,\n'
' %s,\n%s%s%s%s'
' )'
% (self.name, title + " + ' Longtitute'", validatorss, filters, choices, description, date_format))
return '%s %s' % (lat, lon)
validators = '[%s]' % ', '.join(validators)
return (
'%s = %s(\n'
' %s,\n'
' %s,\n%s%s%s%s'
' )'
% (self.name, self.wtf_property, title, validators, filters, choices, description, date_format))
@ndb.ComputedProperty
def forms_field(self):
autofocus = ', autofocus=True' if self.autofocus else ''
readonly = ', readonly=True' if self.readonly else ''
placeholder = ", placeholder='%s'" % self.placeholder if self.placeholder else ''
if self.forms_property == 'forms.geo_pt_field':
lat = "{{forms.number_field(form.%s_lat%s%s%s)}}" % (self.name, autofocus, readonly, placeholder)
lon = "{{forms.number_field(form.%s_lon%s%s%s)}}" % (self.name, autofocus, readonly, placeholder)
return ('<div class="row">\n'
' <div class="col-sm-6">%s</div>\n <div class="col-sm-6">%s</div>\n </div>' %(lat, lon))
return "{{%s(form.%s%s%s%s)}}" % (self.forms_property, self.name, autofocus, readonly, placeholder)
@ndb.ComputedProperty
def default_verbose_name(self):
return util.snake_to_verbose(self.name)
@ndb.ComputedProperty
def verbose_name_(self):
return self.verbose_name or self.default_verbose_name
def get_title_name(self):
if self.ndb_property != 'ndb.KeyProperty' or not self.kind:
return None
if self.kind == 'model.User':
return 'name'
model_qry = model.Model.query(ancestor=self.key.parent().parent())
model_qry = model_qry.filter(model.Model.name == self.kind.split('.')[1])
model_db = model_qry.get()
if model_db and model_db.title_property_key:
return model_db.title_property_key.get().name
return None
FIELDS = {
'auto_now': fields.Boolean,
'auto_now_add': fields.Boolean,
'autofocus': fields.Boolean,
'choices': fields.String,
'default': fields.String,
'description': fields.String,
'email_filter': fields.Boolean,
'field_property': fields.String,
'forms_property': fields.String,
'kind': fields.String,
'name': fields.String,
'ndb_property': fields.String,
'placeholder': fields.String,
'rank': fields.Integer,
'readonly': fields.Boolean,
'repeated': fields.Boolean,
'required': fields.Boolean,
'sort_filter': fields.Boolean,
'strip_filter': fields.Boolean,
'verbose_name': fields.String,
'wtf_property': fields.String,
}
FIELDS.update(model.Base.FIELDS)
| 38.701571
| 123
| 0.652056
| 911
| 7,392
| 5.155873
| 0.148189
| 0.031935
| 0.090483
| 0.070258
| 0.285288
| 0.170321
| 0.117309
| 0.117309
| 0.090909
| 0.029168
| 0
| 0.003033
| 0.197105
| 7,392
| 190
| 124
| 38.905263
| 0.788374
| 0.001759
| 0
| 0.111111
| 0
| 0.006173
| 0.203741
| 0.064525
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04321
| false
| 0
| 0.030864
| 0.012346
| 0.358025
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfbc302b59b318fa83066ffc6aa91c4caa2533da
| 1,189
|
py
|
Python
|
tests/test_request.py
|
pauleveritt/wired_components
|
a9072d5fc48680d5ff895887842ffd0f06bc0081
|
[
"MIT"
] | 1
|
2019-09-15T12:30:44.000Z
|
2019-09-15T12:30:44.000Z
|
tests/test_request.py
|
pauleveritt/wired_components
|
a9072d5fc48680d5ff895887842ffd0f06bc0081
|
[
"MIT"
] | null | null | null |
tests/test_request.py
|
pauleveritt/wired_components
|
a9072d5fc48680d5ff895887842ffd0f06bc0081
|
[
"MIT"
] | null | null | null |
import pytest
from wired import ServiceContainer
@pytest.fixture
def request_container(registry, simple_root) -> ServiceContainer:
from wired_components.request import wired_setup as request_setup
from wired_components.resource import IRoot
from wired_components.url import IUrl, Url
# Outside system puts some things in the registry
registry.register_singleton(simple_root, IRoot)
request_setup(registry)
# Make a container and return it
container: ServiceContainer = registry.create_container(
context=simple_root
)
url = Url(path='somepath')
container.register_singleton(url, IUrl)
return container
def test_request_wired_setup(registry):
from wired_components.request import wired_setup
assert wired_setup(registry) is None
def test_request_instance(registry, request_container, simple_root):
# Get the request from the container
from wired_components.request import IRequest, Request
request: Request = request_container.get(IRequest)
# See if we're constructed correctly
assert request.context.title == 'My Site'
assert request.path == 'somepath'
assert request.root == simple_root
| 31.289474
| 69
| 0.764508
| 148
| 1,189
| 5.966216
| 0.358108
| 0.061155
| 0.107588
| 0.088335
| 0.13137
| 0.09513
| 0.09513
| 0
| 0
| 0
| 0
| 0
| 0.175778
| 1,189
| 37
| 70
| 32.135135
| 0.90102
| 0.124474
| 0
| 0
| 0
| 0
| 0.022201
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.125
| false
| 0
| 0.291667
| 0
| 0.458333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfbd03cf9bf0d42acbc4621a1653916d133bdb8e
| 958
|
py
|
Python
|
Charts and Graphs/LollipopCharts.py
|
aprakash7/Buildyourown
|
58f0530ea84bf9e91f258d947610ea1e93d7d456
|
[
"MIT"
] | null | null | null |
Charts and Graphs/LollipopCharts.py
|
aprakash7/Buildyourown
|
58f0530ea84bf9e91f258d947610ea1e93d7d456
|
[
"MIT"
] | null | null | null |
Charts and Graphs/LollipopCharts.py
|
aprakash7/Buildyourown
|
58f0530ea84bf9e91f258d947610ea1e93d7d456
|
[
"MIT"
] | 1
|
2021-05-31T04:20:54.000Z
|
2021-05-31T04:20:54.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 17 21:24:53 2021
@author: Akshay Prakash
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
table = pd.read_csv(r'\1617table.csv')
table.head()
plt.hlines(y= np.arange(1, 21), xmin = 0, xmax = table['Pts'], color = 'skyblue')
plt.plot(table['Pts'], np.arange(1,21), "o")
plt.yticks(np.arange(1,21), table['team'])
plt.show()
teamColours = ['#034694','#001C58','#5CBFEB','#D00027',
'#EF0107','#DA020E','#274488','#ED1A3B',
'#000000','#091453','#60223B','#0053A0',
'#E03A3E','#1B458F','#000000','#53162f',
'#FBEE23','#EF6610','#C92520','#BA1F1A']
plt.hlines(y= np.arange(1, 21), xmin = 0, xmax = table['Pts'], color = teamColours)
plt.plot(table['Pts'], np.arange(1,21), "o")
plt.yticks(np.arange(1,21), table['team'])
plt.xlabel('Points')
plt.ylabel('Teams')
plt.title("Premier league 16/17")
| 30.903226
| 84
| 0.583507
| 134
| 958
| 4.164179
| 0.544776
| 0.086022
| 0.096774
| 0.11828
| 0.365591
| 0.365591
| 0.365591
| 0.365591
| 0.365591
| 0.365591
| 0
| 0.16092
| 0.182672
| 958
| 30
| 85
| 31.933333
| 0.551724
| 0.086639
| 0
| 0.2
| 0
| 0
| 0.256287
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfbf2ca5c949daa624f3881dc6dcb4567701067b
| 1,126
|
py
|
Python
|
python/merge-kml-files/merge-kml-files.py
|
bmaupin/graveyard
|
71d52fe6589ce13dfe7433906d1aa50df48c9f94
|
[
"MIT"
] | 1
|
2019-11-23T10:44:58.000Z
|
2019-11-23T10:44:58.000Z
|
python/merge-kml-files/merge-kml-files.py
|
bmaupin/graveyard
|
71d52fe6589ce13dfe7433906d1aa50df48c9f94
|
[
"MIT"
] | 8
|
2020-07-16T07:14:12.000Z
|
2020-10-14T17:25:33.000Z
|
python/merge-kml-files/merge-kml-files.py
|
bmaupin/graveyard
|
71d52fe6589ce13dfe7433906d1aa50df48c9f94
|
[
"MIT"
] | 1
|
2019-11-23T10:45:00.000Z
|
2019-11-23T10:45:00.000Z
|
#!/usr/bin/env python
import sys
import lxml.etree
def main():
if len(sys.argv) < 3:
sys.stderr.write('ERROR: Must provide at least 2 KML files to merge\n')
sys.exit('Usage: {} FILE1 FILE2 ...'.format(sys.argv[0]))
first_kml_root = lxml.etree.parse(sys.argv[1]).getroot()
first_kml_ns = first_kml_root.nsmap[None]
first_kml_document = first_kml_root.find('{{{}}}Document'.format(
first_kml_ns))
for filename in sys.argv[2:]:
kml_root = lxml.etree.parse(filename).getroot()
kml_ns = kml_root.nsmap[None]
kml_document = kml_root.find('{{{}}}Document'.format(kml_ns))
# Add the Document node's child elements to the first KML file
for element in kml_document.iterchildren():
first_kml_document.append(element)
print(lxml.etree.tostring(
first_kml_root,
encoding='utf-8',
xml_declaration=True,
pretty_print=True,
# .decode('utf-8') is required for Python 3
).decode('utf-8'))
if __name__ == '__main__':
main()
| 31.277778
| 79
| 0.599467
| 151
| 1,126
| 4.251656
| 0.456954
| 0.11215
| 0.074766
| 0.049844
| 0.143302
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013398
| 0.27087
| 1,126
| 35
| 80
| 32.171429
| 0.768575
| 0.109236
| 0
| 0
| 0
| 0
| 0.122
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.125
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfbf59c5b26596753447f4f968efc9068d24fa0b
| 3,829
|
py
|
Python
|
tccli/services/partners/v20180321/help.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/partners/v20180321/help.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
tccli/services/partners/v20180321/help.py
|
tarnover/tencentcloud-cli
|
5b0537913a33884a20d7663405a8aa1c2276b41a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
DESC = "partners-2018-03-21"
INFO = {
"AgentPayDeals": {
"params": [
{
"name": "OwnerUin",
"desc": "订单所有者uin"
},
{
"name": "AgentPay",
"desc": "代付标志,1:代付;0:自付"
},
{
"name": "DealNames",
"desc": "订单号数组"
}
],
"desc": "代理商支付订单接口,支持自付/代付"
},
"DescribeAgentBills": {
"params": [
{
"name": "SettleMonth",
"desc": "支付月份,如2018-02"
},
{
"name": "ClientUin",
"desc": "客户账号ID"
},
{
"name": "PayMode",
"desc": "支付方式,prepay/postpay"
},
{
"name": "OrderId",
"desc": "预付费订单号"
},
{
"name": "ClientRemark",
"desc": "客户备注名称"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "限制数目"
}
],
"desc": "代理商可查询自己及名下代客所有业务明细"
},
"AgentTransferMoney": {
"params": [
{
"name": "ClientUin",
"desc": "客户账号ID"
},
{
"name": "Amount",
"desc": "转账金额,单位分"
}
],
"desc": "为合作伙伴提供转账给客户能力。仅支持合作伙伴为自己名下客户转账。"
},
"DescribeRebateInfos": {
"params": [
{
"name": "RebateMonth",
"desc": "返佣月份,如2018-02"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "限制数目"
}
],
"desc": "代理商可查询自己名下全部返佣信息"
},
"ModifyClientRemark": {
"params": [
{
"name": "ClientRemark",
"desc": "客户备注名称"
},
{
"name": "ClientUin",
"desc": "客户账号ID"
}
],
"desc": "代理商可以对名下客户添加备注、修改备注"
},
"DescribeAgentClients": {
"params": [
{
"name": "ClientUin",
"desc": "客户账号ID"
},
{
"name": "ClientName",
"desc": "客户名称。由于涉及隐私,名称打码显示,故名称仅支持打码后的模糊搜索"
},
{
"name": "ClientFlag",
"desc": "客户类型,a/b,类型定义参考代理商相关政策文档"
},
{
"name": "OrderDirection",
"desc": "ASC/DESC, 不区分大小写,按申请时间排序"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "限制数目"
}
],
"desc": "代理商可查询自己名下待审核客户列表"
},
"DescribeClientBalance": {
"params": [
{
"name": "ClientUin",
"desc": "客户(代客)账号ID"
}
],
"desc": "为合作伙伴提供查询客户余额能力。调用者必须是合作伙伴,只能查询自己名下客户余额"
},
"DescribeAgentAuditedClients": {
"params": [
{
"name": "ClientUin",
"desc": "客户账号ID"
},
{
"name": "ClientName",
"desc": "客户名称。由于涉及隐私,名称打码显示,故名称仅支持打码后的模糊搜索"
},
{
"name": "ClientFlag",
"desc": "客户类型,a/b,类型定义参考代理商相关政策文档"
},
{
"name": "OrderDirection",
"desc": "ASC/DESC, 不区分大小写,按审核通过时间排序"
},
{
"name": "ClientUins",
"desc": "客户账号ID列表"
},
{
"name": "HasOverdueBill",
"desc": "是否欠费。0:不欠费;1:欠费"
},
{
"name": "ClientRemark",
"desc": "客户备注"
},
{
"name": "Offset",
"desc": "偏移量"
},
{
"name": "Limit",
"desc": "限制数目"
},
{
"name": "ClientType",
"desc": "客户类型:可以为new(新拓)/assign(指定)/old(存量)/空"
},
{
"name": "ProjectType",
"desc": "项目类型:可以为self(自拓项目)/platform(合作项目)/repeat(复算项目 )/空"
}
],
"desc": "查询已审核客户列表"
},
"AuditApplyClient": {
"params": [
{
"name": "ClientUin",
"desc": "待审核客户账号ID"
},
{
"name": "AuditResult",
"desc": "审核结果,可能的取值:accept/reject"
},
{
"name": "Note",
"desc": "申请理由,B类客户审核通过时必须填写申请理由"
}
],
"desc": "代理商可以审核其名下申请中代客"
}
}
| 19.049751
| 68
| 0.404022
| 256
| 3,829
| 6.042969
| 0.457031
| 0.058177
| 0.076923
| 0.074337
| 0.365869
| 0.312217
| 0.290886
| 0.290886
| 0.268908
| 0.195217
| 0
| 0.010841
| 0.397754
| 3,829
| 201
| 69
| 19.049751
| 0.660017
| 0.005484
| 0
| 0.305
| 0
| 0
| 0.420804
| 0.094563
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfc3450cc6a455bca7329de3130cbc552b8baa62
| 747
|
py
|
Python
|
2019/10 October/dp10302019.py
|
vishrutkmr7/DailyPracticeProblemsDIP
|
d1bfbc75f2024736c22c05385f753a90ddcfa0f5
|
[
"MIT"
] | 5
|
2019-08-06T02:34:41.000Z
|
2022-01-08T03:03:16.000Z
|
2019/10 October/dp10302019.py
|
ourangzeb/DailyPracticeProblemsDIP
|
66c07af88754e5d59b243e3ee9f02db69f7c0a77
|
[
"MIT"
] | 15
|
2021-06-01T14:04:16.000Z
|
2022-03-08T21:17:22.000Z
|
2019/10 October/dp10302019.py
|
ourangzeb/DailyPracticeProblemsDIP
|
66c07af88754e5d59b243e3ee9f02db69f7c0a77
|
[
"MIT"
] | 4
|
2019-09-19T20:00:05.000Z
|
2021-08-16T11:31:51.000Z
|
# This problem was recently asked by LinkedIn:
# Given a non-empty array where each element represents a digit of a non-negative integer, add one to the integer.
# The most significant digit is at the front of the array and each element in the array contains only one digit.
# Furthermore, the integer does not have leading zeros, except in the case of the number '0'.
class Solution:
def plusOne(self, digits):
# Fill this in.
num = ""
for i in range(0, len(digits)):
num = num + str(digits[i])
sol = int(num) + 1
sol = list(str(sol))
for j in range(0, len(sol)):
sol[j] = int(sol[j])
return sol
num = [2, 9, 9]
print(Solution().plusOne(num))
# [3, 0, 0]
| 28.730769
| 114
| 0.619813
| 120
| 747
| 3.858333
| 0.55
| 0.017279
| 0.034557
| 0.047516
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018553
| 0.278447
| 747
| 25
| 115
| 29.88
| 0.840445
| 0.514056
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.25
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfc40c993839966190091cb6ae4333cb9d7b2cc3
| 1,122
|
py
|
Python
|
kbr/run_utils.py
|
brugger/kbr-tools
|
95c8f8274e28b986e7fd91c8404026433488c940
|
[
"MIT"
] | 1
|
2021-02-02T09:47:40.000Z
|
2021-02-02T09:47:40.000Z
|
kbr/run_utils.py
|
brugger/kbr-tools
|
95c8f8274e28b986e7fd91c8404026433488c940
|
[
"MIT"
] | 1
|
2021-08-04T13:00:00.000Z
|
2021-08-04T13:00:00.000Z
|
kbr/run_utils.py
|
brugger/kbr-tools
|
95c8f8274e28b986e7fd91c8404026433488c940
|
[
"MIT"
] | null | null | null |
import subprocess
import sys
import os
class ExecutionInfo:
def __init__(self, p_status: int, stdout: str, stderr: str):
self.p_status = p_status
self.stdout = stdout
self.stderr = stderr
def exit_fail(msg: str = "") -> None:
print(msg)
sys.exit(-1)
def exit_ok(msg: str = "") -> None:
print(msg)
sys.exit(0)
def launch_cmd(cmd: str, cwd: str = "", use_shell_env:bool=False) -> ExecutionInfo:
effective_command = cmd
d = None
if use_shell_env:
d = dict(os.environ)
if cwd == '':
p = subprocess.Popen(effective_command, stdout=subprocess.PIPE, shell=True, stderr=subprocess.PIPE, env=d)
else:
p = subprocess.Popen(effective_command, stdout=subprocess.PIPE, shell=True, stderr=subprocess.PIPE, cwd=cwd, env=d)
stdout, stderr = p.communicate()
p_status = p.wait()
return ExecutionInfo(p_status, stdout, stderr)
def print_outputs(e:ExecutionInfo) -> None:
if e.stdout != b'':
print(e.stdout.decode('utf-8').rstrip("\n"))
if e.stderr != b'':
print(e.stderr.decode('utf-8').rstrip("\n"))
| 23.375
| 123
| 0.632799
| 156
| 1,122
| 4.423077
| 0.333333
| 0.050725
| 0.031884
| 0.043478
| 0.356522
| 0.307246
| 0.307246
| 0.234783
| 0.234783
| 0.234783
| 0
| 0.004582
| 0.221925
| 1,122
| 47
| 124
| 23.87234
| 0.785796
| 0
| 0
| 0.064516
| 0
| 0
| 0.012489
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0
| 0.096774
| 0
| 0.322581
| 0.16129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfc40d4989f8ef494b36888ba91588827d76ffc5
| 2,614
|
py
|
Python
|
tests/client/test_files.py
|
philopon/datapane
|
d7d69865d4def0cbe6eb334acd9edeb829dd67e6
|
[
"Apache-2.0"
] | 481
|
2020-04-25T05:40:21.000Z
|
2022-03-30T22:04:35.000Z
|
tests/client/test_files.py
|
tig/datapane
|
defae6776e73b07191c0a5804a50b284ec3c9a63
|
[
"Apache-2.0"
] | 74
|
2020-04-28T10:47:35.000Z
|
2022-03-14T15:50:55.000Z
|
tests/client/test_files.py
|
admariner/datapane
|
c440eaf07bd1c1f2de3ff952e0fd8c78d636aa8f
|
[
"Apache-2.0"
] | 41
|
2020-07-21T16:30:21.000Z
|
2022-02-21T22:50:27.000Z
|
from pathlib import Path
import altair as alt
import folium
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.graph_objects as p_go
import pytest
from bokeh.layouts import column
from bokeh.models import ColumnDataSource
from bokeh.plotting import figure
from pandas.io.formats.style import Styler
from datapane.client.api.files import save
data = pd.DataFrame({"x": np.random.randn(20), "y": np.random.randn(20)})
def test_save_base(tmp_path: Path, monkeypatch):
# absolute filename tests
# test with no filename
save(data)
save(data)
# relative filename tests
monkeypatch.chdir(tmp_path)
save(data)
def test_save_matplotlib(tmp_path: Path):
pd.set_option("plotting.backend", "matplotlib")
fig, ax = plt.subplots()
data.plot.scatter("x", "y", ax=ax)
# test svg default
save(fig)
# test save axes only
save(ax)
# test save ndarray
save(data.hist())
def test_save_bokeh(tmp_path: Path):
source = ColumnDataSource(data)
p = figure()
p.circle(x="x", y="y", source=source)
f = save(p)
assert f.mime == "application/vnd.bokeh.show+json"
def test_save_bokeh_layout(tmp_path: Path):
source = ColumnDataSource(data)
p = figure()
p.circle(x="x", y="y", source=source)
f = save(column(p, p))
assert f.mime == "application/vnd.bokeh.show+json"
def test_save_altair(tmp_path: Path):
plot = alt.Chart(data).mark_bar().encode(y="y", x="x")
save(plot)
def test_save_folium(tmp_path: Path):
map = folium.Map(location=[45.372, -121.6972], zoom_start=12, tiles="Stamen Terrain")
save(map)
def test_save_plotly(tmp_path: Path):
fig = p_go.Figure()
fig.add_trace(p_go.Scatter(x=[0, 1, 2, 3, 4, 5], y=[1.5, 1, 1.3, 0.7, 0.8, 0.9]))
save(fig)
# NOTE - test disabled until pip release of altair_pandas - however should work if altair test passes
@pytest.mark.skip(reason="altair_pandas not yet supported")
def test_save_altair_pandas(tmp_path: Path):
pd.set_option("plotting.backend", "altair") # Installing altair_pandas registers this.
plot = data.plot.scatter("x", "y")
save(plot)
# NOTE - test disabled updated pip release of pdvega that tracks git upstream - however should work if altair test passes
@pytest.mark.skip(reason="pdvega not yet supported")
def test_save_pdvega(tmp_path: Path):
import pdvega # noqa: F401
plot = data.vgplot.scatter("x", "y")
save(plot)
def test_save_table(tmp_path: Path):
# tests saving a DF directly to a html file
save(data)
# save styled table
save(Styler(data))
| 26.948454
| 121
| 0.694721
| 411
| 2,614
| 4.309002
| 0.350365
| 0.054207
| 0.062112
| 0.01694
| 0.321287
| 0.271033
| 0.241671
| 0.241671
| 0.199887
| 0.199887
| 0
| 0.017757
| 0.181331
| 2,614
| 96
| 122
| 27.229167
| 0.809813
| 0.174445
| 0
| 0.278689
| 0
| 0
| 0.089977
| 0.028904
| 0
| 0
| 0
| 0
| 0.032787
| 1
| 0.163934
| false
| 0
| 0.229508
| 0
| 0.393443
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfc5ea1ec35f681b24bc22174c17b45b8de95235
| 1,417
|
py
|
Python
|
twirp/logging.py
|
batchar2/twirpy
|
e5940a2a038926844098def09748953287071747
|
[
"Unlicense"
] | 51
|
2020-05-23T22:31:53.000Z
|
2022-03-08T19:14:04.000Z
|
twirp/logging.py
|
batchar2/twirpy
|
e5940a2a038926844098def09748953287071747
|
[
"Unlicense"
] | 20
|
2020-05-15T10:20:38.000Z
|
2022-02-06T23:21:56.000Z
|
twirp/logging.py
|
batchar2/twirpy
|
e5940a2a038926844098def09748953287071747
|
[
"Unlicense"
] | 10
|
2020-05-29T09:55:49.000Z
|
2021-10-16T00:14:04.000Z
|
import os
import logging
import sys
import structlog
from structlog.stdlib import LoggerFactory, add_log_level
_configured = False
def configure(force = False):
"""
Configures logging & structlog modules
Keyword Arguments:
force: Force to reconfigure logging.
"""
global _configured
if _configured and not force:
return
# Check whether debug flag is set
debug = os.environ.get('DEBUG_MODE', False)
# Set appropriate log level
if debug:
log_level = logging.DEBUG
else:
log_level = logging.INFO
# Set logging config
logging.basicConfig(
level = log_level,
format = "%(message)s",
)
# Configure structlog
structlog.configure(
logger_factory = LoggerFactory(),
processors = [
add_log_level,
# Add timestamp
structlog.processors.TimeStamper('iso'),
# Add stack information
structlog.processors.StackInfoRenderer(),
# Set exception field using exec info
structlog.processors.format_exc_info,
# Render event_dict as JSON
structlog.processors.JSONRenderer()
]
)
_configured = True
def get_logger(**kwargs):
"""
Get the structlog logger
"""
# Configure logging modules
configure()
# Return structlog
return structlog.get_logger(**kwargs)
| 22.140625
| 57
| 0.628088
| 143
| 1,417
| 6.097902
| 0.468531
| 0.055046
| 0.025229
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.299224
| 1,417
| 63
| 58
| 22.492063
| 0.878147
| 0.253352
| 0
| 0
| 0
| 0
| 0.023762
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.151515
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfc68640fe94c25498745f6373d4a8f15e6f9a5f
| 878
|
py
|
Python
|
setup.py
|
Arkq/pyexec
|
ec90b0aaff80996155d033bd722ff59c9259460e
|
[
"MIT"
] | null | null | null |
setup.py
|
Arkq/pyexec
|
ec90b0aaff80996155d033bd722ff59c9259460e
|
[
"MIT"
] | null | null | null |
setup.py
|
Arkq/pyexec
|
ec90b0aaff80996155d033bd722ff59c9259460e
|
[
"MIT"
] | null | null | null |
# setup.py
# Copyright (c) 2015-2017 Arkadiusz Bokowy
#
# This file is a part of pyexec.
#
# This project is licensed under the terms of the MIT license.
from setuptools import setup
import pyexec
with open("README.rst") as f:
long_description = f.read()
setup(
name="pyexec",
version=pyexec.__version__,
author="Arkadiusz Bokowy",
author_email="arkadiusz.bokowy@gmail.com",
url="https://github.com/Arkq/pyexec",
description="Signal-triggered process reloader",
long_description=long_description,
license="MIT",
py_modules=["pyexec"],
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
],
)
| 25.085714
| 62
| 0.654897
| 100
| 878
| 5.66
| 0.65
| 0.079505
| 0.088339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014577
| 0.218679
| 878
| 34
| 63
| 25.823529
| 0.810496
| 0.160592
| 0
| 0
| 0
| 0
| 0.450685
| 0.035616
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.086957
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfc7144f2268699316911b76b5597b6509452a54
| 4,898
|
py
|
Python
|
data-sources/kbr/authority-persons-marc-to-csv.py
|
kbrbe/beltrans-data-integration
|
951ae3941b22a6fe0a8d30079bdf6f4f0a55f092
|
[
"MIT"
] | null | null | null |
data-sources/kbr/authority-persons-marc-to-csv.py
|
kbrbe/beltrans-data-integration
|
951ae3941b22a6fe0a8d30079bdf6f4f0a55f092
|
[
"MIT"
] | 21
|
2022-02-14T10:58:52.000Z
|
2022-03-28T14:04:40.000Z
|
data-sources/kbr/authority-persons-marc-to-csv.py
|
kbrbe/beltrans-data-integration
|
951ae3941b22a6fe0a8d30079bdf6f4f0a55f092
|
[
"MIT"
] | null | null | null |
#
# (c) 2022 Sven Lieber
# KBR Brussels
#
#import xml.etree.ElementTree as ET
import lxml.etree as ET
import os
import json
import itertools
import enchant
import hashlib
import csv
from optparse import OptionParser
import utils
import stdnum
NS_MARCSLIM = 'http://www.loc.gov/MARC21/slim'
ALL_NS = {'marc': NS_MARCSLIM}
# -----------------------------------------------------------------------------
def addAuthorityFieldsToCSV(elem, writer, natWriter, stats):
"""This function extracts authority relevant data from the given XML element 'elem' and writes it to the given CSV file writer."""
#
# extract relevant data from the current record
#
authorityID = utils.getElementValue(elem.find('./marc:controlfield[@tag="001"]', ALL_NS))
namePerson = utils.getElementValue(elem.find('./marc:datafield[@tag="100"]/marc:subfield[@code="a"]', ALL_NS))
nameOrg = utils.getElementValue(elem.find('./marc:datafield[@tag="110"]/marc:subfield[@code="a"]', ALL_NS))
nationalities = utils.getElementValue(elem.findall('./marc:datafield[@tag="370"]/marc:subfield[@code="c"]', ALL_NS))
gender = utils.getElementValue(elem.find('./marc:datafield[@tag="375"]/marc:subfield[@code="a"]', ALL_NS))
birthDateRaw = utils.getElementValue(elem.find('./marc:datafield[@tag="046"]/marc:subfield[@code="f"]', ALL_NS))
deathDateRaw = utils.getElementValue(elem.find('./marc:datafield[@tag="046"]/marc:subfield[@code="g"]', ALL_NS))
isniRaw = utils.getElementValue(elem.xpath('./marc:datafield[@tag="024"]/marc:subfield[@code="2" and (text()="isni" or text()="ISNI")]/../marc:subfield[@code="a"]', namespaces=ALL_NS))
viafRaw = utils.getElementValue(elem.xpath('./marc:datafield[@tag="024"]/marc:subfield[@code="2" and text()="viaf"]/../marc:subfield[@code="a"]', namespaces=ALL_NS))
countryCode = utils.getElementValue(elem.find('./marc:datafield[@tag="043"]/marc:subfield[@code="c"]', ALL_NS))
(familyName, givenName) = utils.extractNameComponents(namePerson)
birthDate = ''
deathDate = ''
datePatterns = ['%Y', '(%Y)', '[%Y]', '%Y-%m-%d', '%d/%m/%Y', '%Y%m%d']
if birthDateRaw:
birthDate = utils.parseDate(birthDateRaw, datePatterns)
if deathDateRaw:
deathDate = utils.parseDate(deathDateRaw, datePatterns)
name = f'{namePerson} {nameOrg}'.strip()
if nationalities:
nationalityURIString = utils.createURIString(nationalities, ';', 'http://id.loc.gov/vocabulary/countries/')
for n in nationalityURIString.split(';'):
natWriter.writerow({'authorityID': authorityID, 'nationality': n})
newRecord = {
'authorityID': authorityID,
'name': name,
'family_name': familyName,
'given_name': givenName,
'gender': gender,
'birth_date': birthDate,
'death_date': deathDate,
'isni_id': utils.extractIdentifier(authorityID, f'ISNI {isniRaw}', pattern='ISNI'),
'viaf_id': utils.extractIdentifier(authorityID, f'VIAF {viafRaw}', pattern='VIAF'),
'country_code': countryCode
}
writer.writerow(newRecord)
# -----------------------------------------------------------------------------
def main():
"""This script reads an XML file in MARC slim format and extracts several fields to create a CSV file."""
parser = OptionParser(usage="usage: %prog [options]")
parser.add_option('-i', '--input-file', action='store', help='The input file containing MARC SLIM XML records')
parser.add_option('-o', '--output-file', action='store', help='The output CSV file containing selected MARC fields')
parser.add_option('-n', '--nationality-csv', action='store', help='The output CSV file containing the IDs of authorities and their nationality')
(options, args) = parser.parse_args()
#
# Check if we got all required arguments
#
if( (not options.input_file) or (not options.output_file) or (not options.nationality_csv) ):
parser.print_help()
exit(1)
#
# Instead of loading everything to main memory, stream over the XML using iterparse
#
with open(options.output_file, 'w') as outFile, \
open(options.nationality_csv, 'w') as natFile:
stats = {}
outputFields = ['authorityID', 'name', 'family_name', 'given_name', 'gender', 'birth_date', 'death_date', 'isni_id', 'viaf_id', 'country_code']
outputWriter = csv.DictWriter(outFile, fieldnames=outputFields, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
outputWriter.writeheader()
nationalityWriter = csv.DictWriter(natFile, fieldnames=['authorityID', 'nationality'], delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
nationalityWriter.writeheader()
for event, elem in ET.iterparse(options.input_file, events=('start', 'end')):
# The parser finished reading one authority record, get information and then discard the record
if event == 'end' and elem.tag == ET.QName(NS_MARCSLIM, 'record'):
addAuthorityFieldsToCSV(elem, outputWriter, nationalityWriter, stats)
main()
| 43.732143
| 186
| 0.682115
| 589
| 4,898
| 5.602716
| 0.339559
| 0.016667
| 0.053333
| 0.059394
| 0.276364
| 0.23697
| 0.179394
| 0.106667
| 0.081818
| 0.081818
| 0
| 0.009119
| 0.126786
| 4,898
| 111
| 187
| 44.126126
| 0.76245
| 0.144957
| 0
| 0
| 0
| 0.028571
| 0.30882
| 0.142033
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.142857
| 0
| 0.171429
| 0.014286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfc7e5a8bbc57e53f20590d631fe2b87c31a1671
| 3,886
|
py
|
Python
|
promoterz/evaluationPool.py
|
emillj/gekkoJaponicus
|
d77c8c7a303b97a3643eb3f3c8b995b8b393f3f7
|
[
"MIT"
] | null | null | null |
promoterz/evaluationPool.py
|
emillj/gekkoJaponicus
|
d77c8c7a303b97a3643eb3f3c8b995b8b393f3f7
|
[
"MIT"
] | null | null | null |
promoterz/evaluationPool.py
|
emillj/gekkoJaponicus
|
d77c8c7a303b97a3643eb3f3c8b995b8b393f3f7
|
[
"MIT"
] | 1
|
2021-11-29T20:18:25.000Z
|
2021-11-29T20:18:25.000Z
|
#!/bin/python
import time
import random
from multiprocessing import Pool, Process, Pipe, TimeoutError
from multiprocessing.pool import ThreadPool
class EvaluationPool():
def __init__(self, EvaluationTool, Urls, poolsize):
self.EvaluationTool = EvaluationTool
self.Urls = Urls
self.lasttimes = [0 for x in Urls]
self.lasttimesperind = [0 for x in Urls]
self.poolsizes = [5 for x in Urls]
def ejectURL(self, Index):
self.Urls.pop(Index)
self.lasttimes.pop(Index)
self.lasttimesperind.pop(Index)
self.poolsizes.pop(Index)
def evaluateBackend(self, DateRange, I, inds):
stime = time.time()
Q = [ (DateRange, ind, self.Urls[I]) for ind in inds ]
P = Pool(self.poolsizes[I])
fitnesses = P.starmap(self.EvaluationTool, Q )
P.close()
P.join()
delta_time=time.time()-stime
return fitnesses, delta_time
def evaluatePopulation(self, locale):
individues_to_simulate = [ind for ind in locale.population\
if not ind.fitness.valid]
props=self.distributeIndividuals(individues_to_simulate)
args = [ [locale.DateRange, I, props[I]]\
for I in range(len(self.Urls))]
pool = ThreadPool(len(self.Urls))
results=[]
for A in args:
results.append(pool.apply_async(self.evaluateBackend, A))
pool.close()
TimedOut=[]
for A in range(len(results)):
try:
perindTime = 3 * self.lasttimesperind[A] if self.lasttimesperind[A] else 12
timeout = perindTime*len(props[A]) if A else None # no timeout for local machine;
results[A] = results[A].get(timeout=timeout)
except TimeoutError: # Timeout: remote machine is dead, et al
print("Machine timeouts!")
args[A][1] = 0 # Set to evaluate @ local machine
results[A] = self.evaluateBackend(*args[A])
TimedOut.append(A)
pool.join()
for PoolIndex in range(len(results)):
for i, fit in zip(range(len(results[PoolIndex][0])), results[PoolIndex][0]):
props[PoolIndex][i].fitness.values = fit
self.lasttimes[PoolIndex] = results[PoolIndex][1]
L = len(props[PoolIndex])
self.lasttimesperind[PoolIndex] = self.lasttimes[PoolIndex] / L if L else 5
F = [x.fitness.valid for x in individues_to_simulate]
assert(all(F))
for T in TimedOut:
self.ejectURL(T)
return len(individues_to_simulate)
def distributeIndividuals(self, tosimulation):
nb_simulate = len(tosimulation)
sumtimes = sum(self.lasttimes)
#stdtime = sum(self.lasttimes)/len(self.lasttimes)
std = nb_simulate/len(self.Urls)
#stdTPI = sum(self.lasttimesperind)/len(self.lasttimesperind)
#print(stdTPI)
if sumtimes:
vels = [ 1/x for x in self.lasttimes ]
constant = nb_simulate/sum(vels)
proportions = [ max(1, x*constant) for x in vels ]
else:
proportions = [std for x in self.Urls]
proportions = [int(round(x)) for x in proportions]
pC = lambda x:random.randrange(0,len(x))
pB = lambda x: x.index(min(x))
pM = lambda x: x.index(max(x))
while sum(proportions) < nb_simulate:
proportions[pB(proportions)] +=1
print('+')
while sum(proportions) > nb_simulate:
proportions[pM(proportions)] -=1
print('-')
print(proportions)
assert(sum(proportions) == nb_simulate)
distribution = []
L=0
for P in proportions:
distribution.append(tosimulation[L:L+P])
L=L+P
return distribution
| 33.213675
| 97
| 0.587494
| 457
| 3,886
| 4.949672
| 0.258206
| 0.045977
| 0.02122
| 0.013263
| 0.04863
| 0.04863
| 0
| 0
| 0
| 0
| 0
| 0.006662
| 0.304683
| 3,886
| 116
| 98
| 33.5
| 0.830496
| 0.060473
| 0
| 0
| 0
| 0
| 0.005214
| 0
| 0
| 0
| 0
| 0
| 0.022989
| 1
| 0.057471
| false
| 0
| 0.045977
| 0
| 0.149425
| 0.045977
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfc9bea7af7becf02c3cd0e4f00d6640fee9f247
| 3,001
|
py
|
Python
|
website/drawquest/apps/following/models.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 19
|
2015-11-10T17:36:20.000Z
|
2021-04-12T07:36:00.000Z
|
website/drawquest/apps/following/models.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 1
|
2021-06-09T03:45:34.000Z
|
2021-06-09T03:45:34.000Z
|
website/drawquest/apps/following/models.py
|
bopopescu/drawquest-web
|
8d8f9149b6efeb65202809a5f8916386f58a1b3b
|
[
"BSD-3-Clause"
] | 6
|
2015-11-11T00:38:38.000Z
|
2020-07-25T20:10:08.000Z
|
from canvas.cache_patterns import CachedCall
from drawquest import knobs
from drawquest.apps.drawquest_auth.models import User
from drawquest.apps.drawquest_auth.details_models import UserDetails
from drawquest.pagination import FakePaginator
def _sorted(users):
return sorted(users, key=lambda user: user.username.lower())
def _for_viewer(users, viewer=None):
if viewer is None or not viewer.is_authenticated():
return users
following = [int(id_) for id_ in viewer.redis.new_following.zrange(0, -1)]
for user in users:
user.viewer_is_following = user.id in following
return users
def _paginate(redis_obj, offset, request=None):
'''
items should already start at the proper offset.
'''
if offset == 'top':
items = redis_obj.zrevrange(0, knobs.FOLLOWERS_PER_PAGE, withscores=True)
else:
items = redis_obj.zrevrangebyscore('({}'.format(offset), '-inf',
start=0,
num=knobs.FOLLOWERS_PER_PAGE,
withscores=True)
try:
next_offset = items[-1][1]
next_offset = next_offset.__repr__()
except IndexError:
next_offset = None
items = [item for item, ts in items]
pagination = FakePaginator(items, offset=offset, next_offset=next_offset)
return items, pagination
def followers(user, viewer=None, offset='top', direction='next', request=None):
""" The users who are following `user`. """
if direction != 'next':
raise ValueError("Follwers only supports 'next' - scrolling in one direction.")
if request is None or (request.idiom == 'iPad' and request.app_version_tuple <= (3, 1)):
user_ids = user.redis.new_followers.zrevrange(0, -1)
pagination = None
else:
user_ids, pagination = _paginate(user.redis.new_followers, offset, request=request)
users = UserDetails.from_ids(user_ids)
if request is None or request.app_version_tuple < (3, 0):
users = _sorted(users)
return _for_viewer(users, viewer=viewer), pagination
def following(user, viewer=None, offset='top', direction='next', request=None):
""" The users that `user` is following. """
if direction != 'next':
raise ValueError("Following only supports 'next' - scrolling in one direction.")
if request is None or (request.idiom == 'iPad' and request.app_version_tuple <= (3, 1)):
user_ids = user.redis.new_following.zrange(0, -1)
pagination = None
else:
user_ids, pagination = _paginate(user.redis.new_following, offset, request=request)
users = UserDetails.from_ids(user_ids)
if request is None or request.app_version_tuple < (3, 0):
users = _sorted(users)
return _for_viewer(users, viewer=viewer), pagination
def counts(user):
return {
'followers': user.redis.new_followers.zcard(),
'following': user.redis.new_following.zcard(),
}
| 34.102273
| 92
| 0.65978
| 375
| 3,001
| 5.117333
| 0.245333
| 0.029182
| 0.03752
| 0.031266
| 0.517978
| 0.455446
| 0.398124
| 0.398124
| 0.398124
| 0.398124
| 0
| 0.007829
| 0.233922
| 3,001
| 87
| 93
| 34.494253
| 0.826881
| 0.04032
| 0
| 0.322034
| 0
| 0
| 0.062105
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101695
| false
| 0
| 0.084746
| 0.033898
| 0.305085
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfcaf8188821bfe0448579c92b86161cf07a8cb5
| 3,674
|
py
|
Python
|
Python 3/PyGame/Matrix_based_3D/entities.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | null | null | null |
Python 3/PyGame/Matrix_based_3D/entities.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | null | null | null |
Python 3/PyGame/Matrix_based_3D/entities.py
|
DarkShadow4/python
|
4cd94e0cf53ee06c9c31e9272572ca9656697c30
|
[
"MIT"
] | 1
|
2020-08-19T17:25:22.000Z
|
2020-08-19T17:25:22.000Z
|
import numpy as np
def translationMatrix(dx=0, dy=0, dz=0):
return np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[dx, dy, dz, 1]])
def scalingMatrix(sx=1, sy=1, sz=1):
return np.array([[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1]])
def rotateXmatrix(radians):
c = np.cos(radians)
s = np.sin(radians)
return np.array([[1, 0, 0, 0],
[0, c, -s, 0],
[0, s, c, 0],
[0, 0, 0, 1]])
def rotateYmatrix(radians):
c = np.cos(radians)
s = np.sin(radians)
return np.array([[ c, 0, s, 0],
[ 0, 1, 0, 0],
[-s, 0, c, 0],
[ 0, 0, 0, 1]])
def rotateZmatrix(radians):
c = np.cos(radians)
s = np.sin(radians)
return np.array([[c, -s, 0 ,0],
[s, c, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
class Entity(object):
"""docstring for Entity."""
def __init__(self, name="", type="", node_color=(0, 0, 0), edge_color=(255, 255, 255), node_radius=4):
super(Entity, self).__init__()
self.name = name
self.type = type
self.nodes = np.zeros((0, 4))
self.node_color = node_color
self.edge_color = edge_color
self.node_radius = node_radius
self.edges = []
####
self.initial_nodes = np.zeros((0, 4))
self.totalTransformations = {
"T":[ [1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1] ],
"RX":[ [1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1] ],
"RY":[ [1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1] ],
"RZ":[ [1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1] ],
"S":[ [1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1] ]
}
####
def addNodes(self, nodes):
ones = np.ones((len(nodes), 1))
nodes = np.hstack((nodes, ones))
####
self.initial_nodes = np.vstack((self.initial_nodes, nodes))
self.nodes = np.dot(self.initial_nodes, self.totalTransformations["RY"])
self.nodes = np.dot(self.nodes, self.totalTransformations["RX"])
self.nodes = np.dot(self.nodes, self.totalTransformations["RZ"])
self.nodes = np.dot(self.nodes, self.totalTransformations["T"])
self.nodes = np.dot(self.nodes, self.totalTransformations["S"])
# centerX = sum(node[0] for node in self.nodes)/len(self.nodes)
# centerY = sum(node[1] for node in self.nodes)/len(self.nodes)
# centerZ = sum(node[2] for node in self.nodes)/len(self.nodes)
# self.center = (centerX, centerY, centerZ)
####
# self.nodes = np.vstack((self.nodes, nodes))
def addEdges(self, edges):
self.edges += edges
def transform(self, matrix, type):
self.totalTransformations[type] = np.dot(self.totalTransformations[type], matrix)
self.nodes = np.dot(self.initial_nodes, self.totalTransformations["RY"])
self.nodes = np.dot(self.nodes, self.totalTransformations["RX"])
self.nodes = np.dot(self.nodes, self.totalTransformations["RZ"])
self.nodes = np.dot(self.nodes, self.totalTransformations["T"])
self.nodes = np.dot(self.nodes, self.totalTransformations["S"])
| 33.099099
| 106
| 0.459717
| 484
| 3,674
| 3.444215
| 0.136364
| 0.097181
| 0.091782
| 0.059988
| 0.557888
| 0.554889
| 0.526695
| 0.519496
| 0.454709
| 0.454709
| 0
| 0.072284
| 0.363636
| 3,674
| 110
| 107
| 33.4
| 0.640719
| 0.080022
| 0
| 0.481928
| 0
| 0
| 0.007156
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108434
| false
| 0
| 0.012048
| 0.024096
| 0.192771
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfcda9e0f1ad0a543490dfbdc63f6f36b102ec00
| 1,258
|
py
|
Python
|
setup.py
|
utix/django-json-api
|
938f78f664a4ecbabb9e678595926d1a580f9d0c
|
[
"MIT"
] | 7
|
2021-02-26T14:35:17.000Z
|
2021-02-26T21:21:58.000Z
|
setup.py
|
utix/django-json-api
|
938f78f664a4ecbabb9e678595926d1a580f9d0c
|
[
"MIT"
] | 7
|
2021-02-26T14:44:30.000Z
|
2021-06-02T14:27:17.000Z
|
setup.py
|
utix/django-json-api
|
938f78f664a4ecbabb9e678595926d1a580f9d0c
|
[
"MIT"
] | 1
|
2021-02-26T20:10:42.000Z
|
2021-02-26T20:10:42.000Z
|
#!/usr/bin/env python
from os.path import join
from setuptools import find_packages, setup
# DEPENDENCIES
def requirements_from_pip(filename):
with open(filename, "r") as pip:
return [line.strip() for line in pip if not line.startswith("#") and line.strip()]
core_deps = requirements_from_pip("requirements.txt")
dev_deps = requirements_from_pip("requirements_dev.txt")
# DESCRIPTION
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
author="Sharework",
author_email="root@sharework.co",
description="JSON API specification for Django services",
extras_require={"all": dev_deps, "dev": dev_deps},
install_requires=core_deps,
long_description=long_description,
long_description_content_type="text/markdown",
name="django-json-api",
package_data={"django_json_api": ["resources/VERSION"]},
packages=find_packages(),
python_requires=">=3.8",
url="https://github.com/share-work/django-json-api",
version=open(join("django_json_api", "resources", "VERSION")).read().strip(),
classifiers=[
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| 29.952381
| 90
| 0.692369
| 160
| 1,258
| 5.2625
| 0.53125
| 0.041568
| 0.061758
| 0.054632
| 0.152019
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004739
| 0.161367
| 1,258
| 41
| 91
| 30.682927
| 0.793365
| 0.035771
| 0
| 0
| 0
| 0
| 0.31157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.068966
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfcf9bc6b50b9274d2e45ff7e0b6d1af9920cab0
| 1,632
|
py
|
Python
|
youtube_dl/extractor/businessinsider.py
|
MOODesign/Youtube-videos-Download
|
730c0d12a06f349907481570f1f2890251f7a181
|
[
"Unlicense"
] | 16
|
2020-12-01T15:26:58.000Z
|
2022-02-24T23:12:14.000Z
|
youtube_dl/extractor/businessinsider.py
|
MOODesign/Youtube-videos-Download
|
730c0d12a06f349907481570f1f2890251f7a181
|
[
"Unlicense"
] | 5
|
2021-02-20T10:30:00.000Z
|
2021-06-01T21:12:31.000Z
|
youtube_dl/extractor/businessinsider.py
|
MOODesign/Youtube-videos-Download
|
730c0d12a06f349907481570f1f2890251f7a181
|
[
"Unlicense"
] | 7
|
2020-12-01T15:27:04.000Z
|
2022-01-09T23:21:53.000Z
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .jwplatform import JWPlatformIE
class BusinessInsiderIE(InfoExtractor):
_VALID_URL = r'https?://(?:[^/]+\.)?businessinsider\.(?:com|nl)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://uk.businessinsider.com/how-much-radiation-youre-exposed-to-in-everyday-life-2016-6',
'md5': 'ca237a53a8eb20b6dc5bd60564d4ab3e',
'info_dict': {
'id': 'hZRllCfw',
'ext': 'mp4',
'title': "Here's how much radiation you're exposed to in everyday life",
'description': 'md5:9a0d6e2c279948aadaa5e84d6d9b99bd',
'upload_date': '20170709',
'timestamp': 1499606400,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.businessinsider.nl/5-scientifically-proven-things-make-you-less-attractive-2017-7/',
'only_matching': True,
}, {
'url': 'http://www.businessinsider.com/excel-index-match-vlookup-video-how-to-2015-2?IR=T',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
jwplatform_id = self._search_regex(
(r'data-media-id=["\']([a-zA-Z0-9]{8})',
r'id=["\']jwplayer_([a-zA-Z0-9]{8})',
r'id["\']?\s*:\s*["\']?([a-zA-Z0-9]{8})'),
webpage, 'jwplatform id')
return self.url_result(
'jwplatform:%s' % jwplatform_id, ie=JWPlatformIE.ie_key(),
video_id=video_id)
| 37.953488
| 112
| 0.571078
| 183
| 1,632
| 4.928962
| 0.540984
| 0.031042
| 0.01663
| 0.019956
| 0.080931
| 0.022173
| 0.022173
| 0
| 0
| 0
| 0
| 0.065375
| 0.240809
| 1,632
| 42
| 113
| 38.857143
| 0.662631
| 0.007966
| 0
| 0.108108
| 0
| 0.081081
| 0.411874
| 0.089054
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.081081
| 0
| 0.216216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfcfb445e47c75ccbf0dd0f1527b09b9571a8702
| 578
|
py
|
Python
|
map_house.py
|
renankalfa/python-0-ao-Data_Scientist
|
2f61e1cbb1c5565da53cc1cd9aa5c3f5d1cacc88
|
[
"MIT"
] | 1
|
2022-03-27T23:55:37.000Z
|
2022-03-27T23:55:37.000Z
|
map_house.py
|
renankalfa/python-0-ao-Data_Scientist
|
2f61e1cbb1c5565da53cc1cd9aa5c3f5d1cacc88
|
[
"MIT"
] | null | null | null |
map_house.py
|
renankalfa/python-0-ao-Data_Scientist
|
2f61e1cbb1c5565da53cc1cd9aa5c3f5d1cacc88
|
[
"MIT"
] | null | null | null |
import plotly.express as px
import pandas as pd
data = pd.read_csv('kc_house_data.csv')
data_mapa = data[['id', 'lat', 'long', 'price']]
grafico1 = px.scatter_mapbox(data_mapa, lat='lat', lon='long',
hover_name='id', hover_data=['price'],
color_discrete_sequence=['fuchsia'],
zoom=3, height=300)
grafico1.update_layout(mapbox_style='open-street-map')
grafico1.update_layout(height=600, margin={'r': 0, 't': 0, 'l': 0, 'b': 0})
grafico1.show()
grafico1.write_html('map_house_rocket.html')
| 36.125
| 75
| 0.610727
| 79
| 578
| 4.265823
| 0.607595
| 0.047478
| 0.118694
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035635
| 0.223183
| 578
| 15
| 76
| 38.533333
| 0.714922
| 0
| 0
| 0
| 0
| 0
| 0.15917
| 0.036332
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfcfba95af54686ffe34f16d2ea3725de4ec6aa5
| 1,561
|
py
|
Python
|
scripts/api-timeboard.py
|
ryhennessy/hiring-engineers
|
f151fb593a016b38b92767ce48d217c3d57c492a
|
[
"Apache-2.0"
] | null | null | null |
scripts/api-timeboard.py
|
ryhennessy/hiring-engineers
|
f151fb593a016b38b92767ce48d217c3d57c492a
|
[
"Apache-2.0"
] | null | null | null |
scripts/api-timeboard.py
|
ryhennessy/hiring-engineers
|
f151fb593a016b38b92767ce48d217c3d57c492a
|
[
"Apache-2.0"
] | 1
|
2019-02-06T00:09:36.000Z
|
2019-02-06T00:09:36.000Z
|
#!/usr/bin/python
from datadog import initialize, api
options = {
'api_key': '17370fa45ebc4a8184d3dde9f8189c38',
'app_key': 'b0d652bbd1d861656723c1a93bc1a2f22d493d57'
}
initialize(**options)
title = "Ryan Great Timeboard"
description = "My Timeboard that is super awesome"
graphs = [
{
"title": "My Metric over my host",
"definition": {
"requests": [
{
"q": "avg:my_metric{host:secondaryhost.hennvms.net}",
"type": "line",
"style": {
"palette": "dog_classic",
"type": "solid",
"width": "normal"
},
"conditional_formats": [],
"aggregator": "avg"
}
],
"autoscale": "true",
"viz": "timeseries"
}
},
{
"title": "MySQL Anomaly Function Applied",
"definition": {
"viz": "timeseries",
"requests": [
{
"q": "anomalies(avg:mysql.performance.user_time{*}, 'basic', 2)",
"type": "line",
"style": {
"palette": "dog_classic",
"type": "solid",
"width": "normal"
},
"conditional_formats": [],
"aggregator": "avg"
}
],
"autoscale": "true"
}
},
{
"title": "My Metric Rollup Function",
"definition": {
"viz": "query_value",
"requests": [
{
"q": "avg:my_metric{*}.rollup(sum, 60)",
"type": "line",
"style": {
"palette": "dog_classic",
"type": "solid",
"width": "normal"
},
"conditional_formats": [],
"aggregator": "avg"
}
],
"autoscale": "true"
}
}]
api.Timeboard.create(title=title, description=description, graphs=graphs)
| 20.539474
| 73
| 0.547726
| 139
| 1,561
| 6.064748
| 0.47482
| 0.03796
| 0.046263
| 0.071174
| 0.381969
| 0.33452
| 0.33452
| 0.33452
| 0.33452
| 0.33452
| 0
| 0.042278
| 0.257527
| 1,561
| 75
| 74
| 20.813333
| 0.685073
| 0.01025
| 0
| 0.457143
| 0
| 0
| 0.486399
| 0.123057
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.014286
| 0
| 0.014286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfd10fcf278a06e3edb0f59aed0bddac1ebc200d
| 732
|
py
|
Python
|
Playground/Spin.py
|
fountainment/cherry-soda
|
3dd0eb7d0b5503ba572ff2104990856ef7a87495
|
[
"MIT"
] | 27
|
2020-01-16T08:20:54.000Z
|
2022-03-29T20:40:15.000Z
|
Playground/Spin.py
|
fountainment/cherry-soda
|
3dd0eb7d0b5503ba572ff2104990856ef7a87495
|
[
"MIT"
] | 10
|
2022-01-07T14:07:27.000Z
|
2022-03-19T18:13:44.000Z
|
Playground/Spin.py
|
fountainment/cherry-soda
|
3dd0eb7d0b5503ba572ff2104990856ef7a87495
|
[
"MIT"
] | 6
|
2019-12-27T10:04:07.000Z
|
2021-12-15T17:29:24.000Z
|
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_spin(radius_scale, height_scale, rounds):
xs, ys, zs = [], [], []
theta = 0.0
delta = 0.1
twopi = math.pi * 2.0
for i in range(int(rounds * twopi / delta)):
theta += delta
radius = theta / twopi * radius_scale
x = np.cos(theta) * radius
y = np.sin(theta) * radius
xs.append(x)
ys.append(y)
zs.append(theta / twopi * height_scale)
return xs, ys, zs
def main():
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(*get_spin(1.0, 3.0, 5.0))
ax.plot(*get_spin(1.05, 3.15, 5.0))
plt.show()
if __name__ == '__main__':
main()
| 23.612903
| 49
| 0.579235
| 114
| 732
| 3.578947
| 0.464912
| 0.051471
| 0.029412
| 0.063725
| 0.068627
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043643
| 0.280055
| 732
| 30
| 50
| 24.4
| 0.73055
| 0
| 0
| 0
| 0
| 0
| 0.010929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.269231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfd2a91be88a84783b35bd946e501cc258160953
| 1,911
|
py
|
Python
|
bin/get_latest_rotation.py
|
rhots/automation
|
cfa97656885f4ff91e1c79af5eb8fa38a85c35a8
|
[
"0BSD"
] | 1
|
2017-06-06T03:07:01.000Z
|
2017-06-06T03:07:01.000Z
|
bin/get_latest_rotation.py
|
rhots/automation
|
cfa97656885f4ff91e1c79af5eb8fa38a85c35a8
|
[
"0BSD"
] | 3
|
2016-12-19T21:09:53.000Z
|
2017-02-14T03:32:18.000Z
|
bin/get_latest_rotation.py
|
rhots/automation
|
cfa97656885f4ff91e1c79af5eb8fa38a85c35a8
|
[
"0BSD"
] | null | null | null |
import os.path
from bs4 import BeautifulSoup
import requests
# Location of file to store latest known page number
LAST_KNOWN_PAGE_FILE = "/tmp/rotation_checker_latest"
# URL of forum thread where latest rotations are posted
ROTATION_FORUM_THREAD = "https://us.battle.net/forums/en/heroes/topic/17936383460"
def write_last_known_page(page_num):
with open(LAST_KNOWN_PAGE_FILE, "w") as f:
f.write(str(page_num))
def read_last_known_page():
try:
with open(LAST_KNOWN_PAGE_FILE, "r") as f:
return int(f.read())
except OSError:
return 0
def is_404(html):
return "Page Not Found" in html
def load_page(page_num):
return requests.get(
ROTATION_FORUM_THREAD,
params={"page": page_num}
)
def load_latest_page(last_known_page=0):
if is_404(load_page(last_known_page+1).text):
return load_page(last_known_page)
else:
return load_latest_page(last_known_page+1)
def remove_slot_text(s):
if "Slot unlocked at" in s:
return s
return s.split(" (Slot unlocked at")[0]
def rotation_info_from_source(html):
soup = BeautifulSoup(html, 'html.parser')
latest_post_content = soup.select(".TopicPost-bodyContent")[-1]
header = latest_post_content.span.text
date = header.split("Rotation: ")[-1]
heroes = [remove_slot_text(li.text) for li in latest_post_content.find_all("li")]
return date, heroes
if __name__ == "__main__":
# read last known page number if we have it
last_known = read_last_known_page()
# load latest page, starting from last known page number
resp = load_latest_page(last_known)
# extract date and hero rotation
date, heroes = rotation_info_from_source(resp.text)
# write latest page number for future
page_num = int(resp.url.split("=")[-1])
write_last_known_page(page_num)
print(date)
print(heroes)
| 27.695652
| 85
| 0.697541
| 286
| 1,911
| 4.384615
| 0.34965
| 0.107656
| 0.134769
| 0.054226
| 0.176236
| 0.122807
| 0
| 0
| 0
| 0
| 0
| 0.017117
| 0.205128
| 1,911
| 68
| 86
| 28.102941
| 0.808427
| 0.140241
| 0
| 0
| 0
| 0
| 0.117359
| 0.030562
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155556
| false
| 0
| 0.066667
| 0.044444
| 0.422222
| 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfd2e5bbf8ec59072195c98d519d767f6b535cb9
| 2,485
|
py
|
Python
|
fidelis/credentials.py
|
semperstew/fidelis
|
8766b1bfa5bac342faf61bf4302a0e822d0a0ec9
|
[
"Apache-2.0"
] | null | null | null |
fidelis/credentials.py
|
semperstew/fidelis
|
8766b1bfa5bac342faf61bf4302a0e822d0a0ec9
|
[
"Apache-2.0"
] | null | null | null |
fidelis/credentials.py
|
semperstew/fidelis
|
8766b1bfa5bac342faf61bf4302a0e822d0a0ec9
|
[
"Apache-2.0"
] | null | null | null |
# fidelis/credentials.py
import datetime
import requests
import threading
from dateutil.tz import tzlocal
from collections import namedtuple
def _local_now():
return datetime.datetime.now(tzlocal())
class FidelisCredentials(object):
"""Object to hold authentication credentials"""
_default_token_timeout = 10 * 60
def __init__(self, username, password, baseURL, token=None, ignoressl=False):
self.baseURL = baseURL
self._username = username
self._password = password
self._token = token
self._ignoressl = ignoressl
self._time_fetcher = _local_now
self._expiration = self._time_fetcher()
self._refresh_lock = threading.Lock()
self.refresh()
@property
def token(self):
return self._token
@token.setter
def token(self, value):
self._token = value
@baseURL
def baseURL(self):
return self.baseURL
@baseURL.setter
def baseURL(self, value):
self.baseURL = value
self._update_expiration()
def _refresh_needed(self, refresh_in=None):
"""Check if a token refresh is needed."""
if self._expiration is None:
return False
if refresh_in is None:
refresh_in = self._default_token_timeout
if self._seconds_remaining() >= refresh_in:
return False
return True
def _is_expired(self):
"""Check if token is expired"""
return self._refresh_needed(refresh_in=0)
def refresh(self, new_token=None):
if new_token is not None:
self._token = new_token
self._update_expiration()
if not self._is_expired():
return
else:
with self._refresh_lock:
self._protected_refresh()
def _protected_refresh(self):
"""Refresh bearer token"""
url= self.baseURL + 'authenticate'
body={'username': self._username, 'password': self._password}
headers={'Content-Type':'application/json'}
verify=self._ignoressl
r = requests.post(url=url, headers=headers, json=body, verify=verify)
self._token = r.data['token']
self._update_expiration()
def _seconds_remaining(self):
"""Calculate remaining seconds until token expiration"""
delta = self._expiration - self._time_fetcher()
return delta.total_seconds()
def _update_expiration(self):
delta = datetime.timedelta(seconds=self._default_token_timeout)
self._expiration = self._time_fetcher() + delta
def __call__(self, r):
self.refresh()
r.headers['Authorization'] = "bearer " + self._token
return r
| 25.10101
| 79
| 0.695775
| 304
| 2,485
| 5.417763
| 0.266447
| 0.046752
| 0.03643
| 0.040073
| 0.052823
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002521
| 0.202012
| 2,485
| 98
| 80
| 25.357143
| 0.828038
| 0.08008
| 0
| 0.101449
| 0
| 0
| 0.035857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.188406
| false
| 0.043478
| 0.072464
| 0.043478
| 0.434783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfd45fc42c8fe07d08d4459f4ff51b022c580213
| 6,254
|
py
|
Python
|
pos_wechat/tests/test_wechat_order.py
|
nahualventure/pos-addons
|
3c911c28c259967fb74e311ddcc8e6ca032c005d
|
[
"MIT"
] | null | null | null |
pos_wechat/tests/test_wechat_order.py
|
nahualventure/pos-addons
|
3c911c28c259967fb74e311ddcc8e6ca032c005d
|
[
"MIT"
] | null | null | null |
pos_wechat/tests/test_wechat_order.py
|
nahualventure/pos-addons
|
3c911c28c259967fb74e311ddcc8e6ca032c005d
|
[
"MIT"
] | 3
|
2021-06-15T05:45:42.000Z
|
2021-07-27T12:28:53.000Z
|
# Copyright 2018 Ivan Yelizariev <https://it-projects.info/team/yelizariev>
# License MIT (https://opensource.org/licenses/MIT).
import logging
from odoo.addons.point_of_sale.tests.common import TestPointOfSaleCommon
try:
from unittest.mock import patch
except ImportError:
from mock import patch
_logger = logging.getLogger(__name__)
DUMMY_AUTH_CODE = "134579302432164181"
DUMMY_POS_ID = 1
class TestWeChatOrder(TestPointOfSaleCommon):
at_install = True
post_install = True
def setUp(self):
super(TestWeChatOrder, self).setUp()
# create wechat journals
self.pos_config.init_pos_wechat_journals()
self.Order = self.env["wechat.order"]
self.Refund = self.env["wechat.refund"]
self.product1 = self.env["product.product"].create({"name": "Product1"})
self.product2 = self.env["product.product"].create({"name": "Product2"})
def _patch_post(self, post_result):
def post(url, data):
self.assertIn(url, post_result)
_logger.debug("Request data for %s: %s", url, data)
return post_result[url]
# patch wechat
patcher = patch("wechatpy.pay.base.BaseWeChatPayAPI._post", wraps=post)
patcher.start()
self.addCleanup(patcher.stop)
def _create_pos_order(self):
def compute_tax(product, price, qty=1, taxes=None):
if taxes is None:
taxes = product.taxes_id.filtered(
lambda t: t.company_id.id == self.env.user.id
)
currency = self.pos_config.pricelist_id.currency_id
res = taxes.compute_all(price, currency, qty, product=product)
untax = res["total_excluded"]
return untax, sum(tax.get("amount", 0.0) for tax in res["taxes"])
# I click on create a new session button
self.pos_config.open_session_cb()
# I create a PoS order with 2 units of PCSC234 at 450 EUR
# and 3 units of PCSC349 at 300 EUR.
untax1, atax1 = compute_tax(self.product3, 450, 2)
untax2, atax2 = compute_tax(self.product4, 300, 3)
order = self.PosOrder.create(
{
"company_id": self.company_id,
"pricelist_id": self.partner1.property_product_pricelist.id,
"partner_id": self.partner1.id,
"lines": [
(
0,
0,
{
"name": "OL/0001",
"product_id": self.product3.id,
"price_unit": 450,
"discount": 0.0,
"qty": 2.0,
"tax_ids": [(6, 0, self.product3.taxes_id.ids)],
"price_subtotal": untax1,
"price_subtotal_incl": untax1 + atax1,
},
),
(
0,
0,
{
"name": "OL/0002",
"product_id": self.product4.id,
"price_unit": 300,
"discount": 0.0,
"qty": 3.0,
"tax_ids": [(6, 0, self.product4.taxes_id.ids)],
"price_subtotal": untax2,
"price_subtotal_incl": untax2 + atax2,
},
),
],
"amount_tax": atax1 + atax2,
"amount_total": untax1 + untax2 + atax1 + atax2,
"amount_paid": 0,
"amount_return": 0,
}
)
return order
def _create_wechat_order(self):
post_result = {
"pay/unifiedorder": {
"code_url": "weixin://wxpay/s/An4baqw",
"trade_type": "NATIVE",
"result_code": "SUCCESS",
}
}
self.lines = [
{
"product_id": self.product1.id,
"name": "Product 1 Name",
"quantity": 2,
"price": 450,
"category": "123456",
"description": "翻译服务器错误",
},
{
"product_id": self.product2.id,
"name": "Product 2 Name",
"quantity": 3,
"price": 300,
"category": "123456",
"description": "網路白目哈哈",
},
]
self._patch_post(post_result)
order, code_url = self.Order._create_qr(self.lines, total_fee=300)
self.assertEqual(order.state, "draft", "Just created order has wrong state")
return order
def test_refund(self):
# Order are not really equal because I'm lazy
# Just imagine that they are correspond each other
order = self._create_pos_order()
wechat_order = self._create_wechat_order()
order.wechat_order_id = wechat_order.id
# patch refund api request
post_result = {
"secapi/pay/refund": {"trade_type": "NATIVE", "result_code": "SUCCESS"}
}
self._patch_post(post_result)
# I create a refund
refund_action = order.refund()
refund = self.PosOrder.browse(refund_action["res_id"])
wechat_journal = self.env["account.journal"].search([("wechat", "=", "native")])
payment_context = {"active_ids": refund.ids, "active_id": refund.id}
refund_payment = self.PosMakePayment.with_context(**payment_context).create(
{
"amount": refund.amount_total,
"journal_id": wechat_journal.id,
"wechat_order_id": wechat_order.id,
}
)
# I click on the validate button to register the payment.
refund_payment.with_context(**payment_context).check()
self.assertEqual(refund.state, "paid", "The refund is not marked as paid")
self.assertEqual(
wechat_order.state,
"refunded",
"Wechat Order state is not changed after making refund payment",
)
| 35.942529
| 88
| 0.512312
| 635
| 6,254
| 4.861417
| 0.32126
| 0.035633
| 0.016845
| 0.014577
| 0.097182
| 0.068675
| 0.023324
| 0
| 0
| 0
| 0
| 0.035088
| 0.380237
| 6,254
| 173
| 89
| 36.150289
| 0.761352
| 0.077071
| 0
| 0.115942
| 0
| 0
| 0.162876
| 0.011113
| 0
| 0
| 0
| 0
| 0.028986
| 1
| 0.050725
| false
| 0
| 0.036232
| 0
| 0.137681
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfd6670490ad28a09d2ea2ea84c8564b4b85c4b8
| 582
|
py
|
Python
|
docker/settings.py
|
uw-it-aca/course-roster-lti
|
599dad70e06bc85d3d862116c00e8ecf0e2e9c8c
|
[
"Apache-2.0"
] | null | null | null |
docker/settings.py
|
uw-it-aca/course-roster-lti
|
599dad70e06bc85d3d862116c00e8ecf0e2e9c8c
|
[
"Apache-2.0"
] | 53
|
2017-01-28T00:03:57.000Z
|
2022-03-23T21:57:13.000Z
|
docker/settings.py
|
uw-it-aca/course-roster-lti
|
599dad70e06bc85d3d862116c00e8ecf0e2e9c8c
|
[
"Apache-2.0"
] | null | null | null |
from .base_settings import *
INSTALLED_APPS += [
'course_roster.apps.CourseRosterConfig',
'compressor',
]
COMPRESS_ROOT = '/static/'
COMPRESS_PRECOMPILERS = (('text/less', 'lessc {infile} {outfile}'),)
COMPRESS_OFFLINE = True
STATICFILES_FINDERS += ('compressor.finders.CompressorFinder',)
if os.getenv('ENV', 'localdev') == 'localdev':
DEBUG = True
RESTCLIENTS_DAO_CACHE_CLASS = None
else:
RESTCLIENTS_DAO_CACHE_CLASS = 'course_roster.cache.IDCardPhotoCache'
COURSE_ROSTER_PER_PAGE = 50
IDCARD_PHOTO_EXPIRES = 60 * 60 * 2
IDCARD_TOKEN_EXPIRES = 60 * 60 * 2
| 26.454545
| 72
| 0.735395
| 68
| 582
| 5.985294
| 0.676471
| 0.088452
| 0.093366
| 0.117936
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023952
| 0.139175
| 582
| 21
| 73
| 27.714286
| 0.788423
| 0
| 0
| 0
| 0
| 0
| 0.305842
| 0.185567
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfdf748f02b9b943852a62e3f8521187d01d62ea
| 2,175
|
py
|
Python
|
app/__init__.py
|
muthash/Weconnect-api
|
d3434c99b96a911258dfb8e3ff68696a2021a64b
|
[
"MIT"
] | 1
|
2018-03-15T17:08:11.000Z
|
2018-03-15T17:08:11.000Z
|
app/__init__.py
|
muthash/Weconnect-api
|
d3434c99b96a911258dfb8e3ff68696a2021a64b
|
[
"MIT"
] | 1
|
2018-02-28T21:26:04.000Z
|
2018-03-01T07:19:05.000Z
|
app/__init__.py
|
muthash/Weconnect-api
|
d3434c99b96a911258dfb8e3ff68696a2021a64b
|
[
"MIT"
] | 1
|
2018-03-09T03:45:22.000Z
|
2018-03-09T03:45:22.000Z
|
""" The create_app function wraps the creation of a new Flask object, and
returns it after it's loaded up with configuration settings
using app.config
"""
from flask import jsonify
from flask_api import FlaskAPI
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
from flask_jwt_extended import JWTManager
from flask_mail import Mail
from instance.config import app_config
db = SQLAlchemy()
jwt = JWTManager()
mail = Mail()
def create_app(config_name):
"""Function wraps the creation of a new Flask object, and returns it after it's
loaded up with configuration settings
"""
app = FlaskAPI(__name__, instance_relative_config=True)
cors = CORS(app)
app.config.from_object(app_config[config_name])
app.config.from_pyfile('config.py')
db.init_app(app)
jwt.init_app(app)
mail.init_app(app)
from app.auth.views import auth
from app.business.views import biz
from app.reviews.views import rev
from app.search.views import search
from app.models import BlacklistToken
@app.errorhandler(400)
def bad_request(error):
"""Error handler for a bad request"""
return jsonify(dict(error='The Server did not understand' +
'the request')), 400
@app.errorhandler(404)
def not_found(error):
"""Error handler for not found page"""
return jsonify(dict(error='The Resource is not available')), 404
@app.errorhandler(405)
def method_not_allowed(error):
"""Error handler for wrong method to an endpoint"""
return jsonify(dict(error='The HTTP request Method' +
' is not allowed')), 405
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
"""Check if token is blacklisted"""
jti = decrypted_token['jti']
blacklist = BlacklistToken.query.filter_by(token=jti).first()
if blacklist is None:
return False
return blacklist.revoked
app.register_blueprint(auth)
app.register_blueprint(biz)
app.register_blueprint(rev)
app.register_blueprint(search)
return app
| 31.521739
| 83
| 0.686897
| 291
| 2,175
| 4.993127
| 0.333333
| 0.037164
| 0.055059
| 0.041294
| 0.181005
| 0.129387
| 0.129387
| 0.129387
| 0.129387
| 0.129387
| 0
| 0.010772
| 0.231724
| 2,175
| 68
| 84
| 31.985294
| 0.858767
| 0.184828
| 0
| 0
| 0
| 0
| 0.069226
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.26087
| 0
| 0.5
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfe01ac7f7adb258b0362ce750c15bb90b3ecb5f
| 520
|
py
|
Python
|
run.py
|
bayusetiawan01/poj
|
9c205ce298a2b3ca0d9c00b7d4a3fd05fecf326a
|
[
"MIT"
] | 25
|
2016-02-26T17:35:19.000Z
|
2021-08-17T10:30:14.000Z
|
run.py
|
bayusetiawan01/poj
|
9c205ce298a2b3ca0d9c00b7d4a3fd05fecf326a
|
[
"MIT"
] | 5
|
2016-04-27T16:52:46.000Z
|
2021-04-24T10:06:16.000Z
|
run.py
|
bayusetiawan01/poj
|
9c205ce298a2b3ca0d9c00b7d4a3fd05fecf326a
|
[
"MIT"
] | 6
|
2016-04-27T16:50:13.000Z
|
2021-04-03T06:27:41.000Z
|
import sys
import subprocess
if __name__ == "__main__":
try:
executable = sys.argv[1]
input_filename = sys.argv[2]
output_filename = sys.argv[3]
tl = sys.argv[4]
except IndexError:
sys.exit(-1)
input_file = open(input_filename, "r")
output_file = open(output_filename, "w")
returncode = subprocess.call(["timeout", tl, "./{}".format(executable)], stdin = input_file, stdout = output_file)
print(returncode)
input_file.close()
output_file.close()
| 27.368421
| 118
| 0.634615
| 64
| 520
| 4.875
| 0.5
| 0.089744
| 0.096154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 0.230769
| 520
| 18
| 119
| 28.888889
| 0.7675
| 0
| 0
| 0
| 0
| 0
| 0.040385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfe159b3edd0ee9b633d2a90e3ddecd214d799b8
| 4,580
|
py
|
Python
|
Version Autonome/main.py
|
chiudidier/ProjetBloc5
|
214f401e5b35bc5894ecc3d20f338762b689f2ca
|
[
"CC0-1.0"
] | null | null | null |
Version Autonome/main.py
|
chiudidier/ProjetBloc5
|
214f401e5b35bc5894ecc3d20f338762b689f2ca
|
[
"CC0-1.0"
] | null | null | null |
Version Autonome/main.py
|
chiudidier/ProjetBloc5
|
214f401e5b35bc5894ecc3d20f338762b689f2ca
|
[
"CC0-1.0"
] | null | null | null |
from taquin import *
from random import *
from math import *
#main
'''
old : ancienne façon de mélanger qui correspond à une manipulation du taquin. Plus à coder pour les élèves et pour faire des essais de profondeur
montxt='012345678'# position initiale = solution
montaquin=Taquin(montxt)# création du taquin
# mélange en realisant 15 coups aléatoires à partir de la position initiale pour garantir que la position obtenu soit bien solutionnable.
while montaquin.gagnant():
montaquin.melanger(15)
'''
continuer=True
while continuer:
'''
#old : ancienne façon de mélanger qui correspond à une manipulation du taquin. Plus à coder pour les élèves et pour faire des essais de profondeur
montxt='012345678'# position initiale = solution
montaquin=Taquin(montxt)# création du taquin
# mélange en realisant 15 coups aléatoires à partir de la position initiale pour garantir que la position obtenu soit bien solutionnable.
while montaquin.estGagnant():
montaquin.melanger(15)
'''
# création aléatoire du taquin initiale, n'utiliser qu'avec IDA
montxt=random_init('012345678')# position initiale créé à partir d'une position aléatoire mais dont la solvabilité est vérifiable
montaquin=Taquin(montxt)# création du taquin
print(montaquin)
# valeur arbitrairement choisie : une valeur plus grande donnera des taquins plus difficiles
if nbcoup(montxt) > 8 :
print('dsl nous ne pouvont pas résoudre se taquin en un temps raisonable')
else:
while not montaquin.estGagnant():# boucle principale du jeu. Sort qaund le taquin est rangé
chemin=[]
'''
#version BFS
# attention ne pas utiliser cette version avec la génération de taquin aléatoire mais utiliser le mélange à base de coup aléatoire depuis la solution.
reste=bfs(montaquin.etat)# calcul la profondeur minimum de la solution
print(reste,' mouvements au moins pour terminer.')# affiche l'aide
#fin version BFS
'''
'''
#version DLS=BFS+DFS
# attention ne pas utuiliser cette version avec la génération de taquin aléatoire mais utiliser le mélange à base de coup aléatoire depuis la solution.
reste=bfs(montaquin.etat)# calcul la profondeur minimum de la solution
dls(reste,montaquin.etat,0,chemin) #version DLS = DFS + BFS # attention ne pas utuiliser cette version avec la génération de taquin aléatoire mais utiliser le mélange à base de coup aléatoire depuis la solution.
#fin version DLS
'''
'''
#version IDS = itération d'IDS
# attention ne pas utiliser cette version avec la génération de taquin aléatoire mais utiliser le mélange à base de coup aléatoire depuis la solution.
#ids(montaquin.etat,chemin)
#fin version IDS
'''
'''
#version IDA calcule la profondeur minimum de la solution, les paramètres ne sont pas indispensables mais améliorent la lisibilité du code
'''
ida(montaquin.etat,chemin)
# cette partie est utilisable pour les version IDS, DFS et IDA
print('solution = ', chemin)#affichage des differents etats de la solution
print('nb coup à la solution',len(chemin))
nextmove=chemin.pop()
nexttaquin=Taquin(nextmove)
print('meilleur coup suivant :')
print(comparetaquins(montaquin,nexttaquin))#affichage du prochain coup
#fin de la partie solution
# enregistrement du coup du joueur
move=input('\n que voulez vous jouer (h,b,d,g): ')# demande le coup à jouer et applique le mouvement
if move=='h':
montaquin.haut()
elif move=='b':
montaquin.bas()
elif move=='d':
montaquin.droite()
elif move=='g':
montaquin.gauche()
print(montaquin)
# fin du coup du joueur
print('Bravo vous avez gagné !')
reponse=input('Voulez vous recommencer ? o/n : ')
if reponse == 'n':
continuer=False
print('Au revoir')
| 24.491979
| 224
| 0.606332
| 533
| 4,580
| 5.208255
| 0.322702
| 0.032421
| 0.020173
| 0.025937
| 0.477666
| 0.477666
| 0.45317
| 0.45317
| 0.45317
| 0.45317
| 0
| 0.012092
| 0.331878
| 4,580
| 186
| 225
| 24.623656
| 0.895098
| 0.129039
| 0
| 0.057143
| 0
| 0
| 0.164326
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.085714
| 0
| 0.085714
| 0.257143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfe30fcd6927ef89f0f16539956bef3a4837e607
| 1,336
|
py
|
Python
|
tests/filestack_helpers_test.py
|
SanthoshBala18/filestack-python
|
db55f3a27a4d073e1ba33d3d09a3def8da1a25e4
|
[
"Apache-2.0"
] | 47
|
2017-01-28T12:27:18.000Z
|
2021-07-02T16:29:04.000Z
|
tests/filestack_helpers_test.py
|
malarozi/filestack-python
|
7109a9c20225532c95f0204d12649137c0de01a1
|
[
"Apache-2.0"
] | 36
|
2017-01-25T23:48:33.000Z
|
2022-01-29T22:33:12.000Z
|
tests/filestack_helpers_test.py
|
malarozi/filestack-python
|
7109a9c20225532c95f0204d12649137c0de01a1
|
[
"Apache-2.0"
] | 24
|
2017-01-24T23:57:32.000Z
|
2022-01-29T22:34:34.000Z
|
import pytest
from filestack.helpers import verify_webhook_signature
@pytest.mark.parametrize('signature, expected_result', [
('57cbb25386c3d6ff758a7a75cf52ba02cf2b0a1a2d6d5dfb9c886553ca6011cb', True),
('incorrect-signature', False),
])
def test_webhook_verification(signature, expected_result):
secret = 'webhook-secret'
body = b'{"text": {"filename": "filename.jpg", "key": "kGaeljnga9wkysK6Z_filename.jpg"}}'
headers = {
'FS-Signature': signature,
'FS-Timestamp': 123456789999
}
result, details = verify_webhook_signature(secret, body, headers)
assert result is expected_result
if expected_result is False:
assert 'Signature mismatch' in details['error']
@pytest.mark.parametrize('secret, body, headers, err_msg', [
('hook-secret', b'body', 'should be a dict', 'value is not a dict'),
(1, b'body', {'FS-Signature': 'abc', 'FS-Timestamp': 123}, 'value is not a string'),
('hook-secret', b'', {'FS-Timestamp': 123}, 'fs-signature header is missing'),
('hook-secret', ['incorrect'], {'FS-Signature': 'abc', 'FS-Timestamp': 123}, 'Invalid webhook body'),
])
def test_agrument_validation(secret, body, headers, err_msg):
result, details = verify_webhook_signature(secret, body, headers)
assert result is False
assert err_msg in details['error']
| 40.484848
| 105
| 0.695359
| 156
| 1,336
| 5.839744
| 0.358974
| 0.054885
| 0.074643
| 0.05708
| 0.256861
| 0.206367
| 0.144896
| 0.144896
| 0.144896
| 0.144896
| 0
| 0.053333
| 0.157934
| 1,336
| 32
| 106
| 41.75
| 0.756444
| 0
| 0
| 0.148148
| 0
| 0
| 0.378743
| 0.073353
| 0
| 0
| 0
| 0
| 0.148148
| 1
| 0.074074
| false
| 0
| 0.074074
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfe42600497b94099e0a72123b092ceef56b943a
| 4,558
|
py
|
Python
|
pattern/check_multiples.py
|
Lostefra/TranslationCoherence
|
b7b09c475cc78842d9724161a8cbee372d41da08
|
[
"MIT"
] | null | null | null |
pattern/check_multiples.py
|
Lostefra/TranslationCoherence
|
b7b09c475cc78842d9724161a8cbee372d41da08
|
[
"MIT"
] | null | null | null |
pattern/check_multiples.py
|
Lostefra/TranslationCoherence
|
b7b09c475cc78842d9724161a8cbee372d41da08
|
[
"MIT"
] | null | null | null |
import rdflib
from rdflib.term import URIRef
from utilities.utility_functions import prefix
from utilities import constants
def has_equivalent(node, graph):
equivalents = list(graph.subjects(predicate=constants.EQUIVALENCE_PREDICATE, object=node)) + \
list(graph.subjects(predicate=constants.SYNONYMY_PREDICATE, object=node)) + \
list(graph.objects(subject=node, predicate=constants.EQUIVALENCE_PREDICATE)) + \
list(graph.objects(subject=node, predicate=constants.SYNONYMY_PREDICATE))
if equivalents:
return True
return False
# def multiple_classified(node1, node2, n, result_graph):
# expressions = result_graph.subject_objects(predicate=n.differentExpression)
# exprs_1, exprs_2 = [], []
# list_exprs = list(map(list, zip(*expressions)))
# if list_exprs:
# exprs_1, exprs_2 = list_exprs[0], list_exprs[1]
# # print(f"{exprs_1}, {exprs_2}")
# return any([(expr_1, n.involves_node, node1) in result_graph for expr_1 in exprs_1 + exprs_2]) or \
# any([(expr_2, n.involves_node, node2) in result_graph for expr_2 in exprs_2 + exprs_1])
# return False
def check_multiples(g1, g2, n, result_graph, indexes, lemmas, frontiers, new_frontiers):
# Check for pattern "several"
# fred:number_1 fred:numberOf | bunchOf | seriesOf, quant:hasQuantifier quant:multiple | quant:some, hasQuality fred:Several
multiples = ['number', 'bunch', 'series', 'array', 'collection', 'group', 'amount']
quantifiers = ['multiple', 'some', 'many']
quant_predicate = URIRef(constants.NAMESPACES['quant'] + 'hasQuantifier')
for node1, node2 in frontiers:
objs = list(g2.objects(subject=node2, predicate=quant_predicate))
if any([q in obj for q in quantifiers for obj in objs]):# and not multiple_classified(node1, node2, n, result_graph):
# print(f"OBJS: {[prefix(o2, g2) for o2 in objs]}")
for s1, p1 in g1.subject_predicates(object=node1):
if not has_equivalent(s1, result_graph):
for m in multiples:
if m in prefix(p1, g1):# and any([q in prefix(o2,g2) for q in quantifiers for o2 in objs]):
# Create a hierarchy relationship
# "multiples_i" is a reification of a N-ary relationship
expr_1 = "expression_" + next(indexes["expressions"])
expr_2 = "expression_" + next(indexes["expressions"])
result_graph.add((n[expr_1], constants.TYPE_PREDICATE,
rdflib.term.URIRef(constants.NAMESPACES["translation_coherence_vocabulary"] + "Expression")))
result_graph.add((n[expr_2], constants.TYPE_PREDICATE,
rdflib.term.URIRef(constants.NAMESPACES["translation_coherence_vocabulary"] + "Expression")))
result_graph.add((n[expr_1], n.involvesNoun, node1))
result_graph.add((n[expr_1], n.involvesMultiple, s1))
result_graph.add((n[expr_2], n.involvesNoun, node2))
for obj in objs:
result_graph.add((n[expr_2], quant_predicate, obj))
result_graph.add((n[expr_1], n.differentExpression, n[expr_2]))
# print("FOUND", prefix(node1, g1), prefix(p1, g1), prefix(node2, g2), [prefix(o2, g2) for o2 in objs])
objs = list(g1.objects(subject=node1, predicate=quant_predicate))
if any([q in obj for q in quantifiers for obj in objs]):# and not multiple_classified(node1, node2, n, result_graph):
# print(f"OBJS: {[prefix(o1, g1) for o1 in objs]}")
for s2,p2 in g2.subject_predicates(object=node2):
if not has_equivalent(s2, result_graph):
for m in multiples:
if m in prefix(p2, g2):# and any([q in prefix(o1,g1) for q in quantifiers for o1 in objs]):
# Create a hierarchy relationship
# "multiples_i" is a reification of a N-ary relationship
expr_1 = "expression_" + next(indexes["expressions"])
expr_2 = "expression_" + next(indexes["expressions"])
result_graph.add((n[expr_1], constants.TYPE_PREDICATE,
rdflib.term.URIRef(constants.NAMESPACES["translation_coherence_vocabulary"] + "Expression")))
result_graph.add((n[expr_2], constants.TYPE_PREDICATE,
rdflib.term.URIRef(constants.NAMESPACES["translation_coherence_vocabulary"] + "Expression")))
result_graph.add((n[expr_1], n.involvesNoun, node1))
for obj in objs:
result_graph.add((n[expr_1], quant_predicate, obj))
result_graph.add((n[expr_2], n.involvesNoun, node2))
result_graph.add((n[expr_2], n.involvesMultiple, s2))
result_graph.add((n[expr_1], n.differentExpression, n[expr_2]))
# print("FOUND", prefix(node2, g2), prefix(p2, g2), prefix(node1, g1), [prefix(o1, g1) for o1 in objs])
| 54.915663
| 125
| 0.698113
| 634
| 4,558
| 4.856467
| 0.173502
| 0.08217
| 0.063657
| 0.068204
| 0.640468
| 0.569665
| 0.555375
| 0.476129
| 0.465086
| 0.420266
| 0
| 0.027213
| 0.169592
| 4,558
| 82
| 126
| 55.585366
| 0.786262
| 0.306713
| 0
| 0.45283
| 0
| 0
| 0.106254
| 0.040842
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.075472
| 0
| 0.150943
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfe4a5779ee044b60ee7d70e0fc7668e972bffae
| 5,875
|
py
|
Python
|
app/main/views.py
|
Ammoh-Moringa/pitches
|
2551f2c8e323066ebdde3f92046368d7c7759fa6
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
Ammoh-Moringa/pitches
|
2551f2c8e323066ebdde3f92046368d7c7759fa6
|
[
"MIT"
] | null | null | null |
app/main/views.py
|
Ammoh-Moringa/pitches
|
2551f2c8e323066ebdde3f92046368d7c7759fa6
|
[
"MIT"
] | null | null | null |
from flask import render_template, request, redirect, url_for, abort
from flask_login import login_required, current_user
from . forms import PitchForm, CommentForm, CategoryForm
from .import main
from .. import db
from ..models import User, Pitch, Comments, PitchCategory, Votes
#display categories on the landing page
@main.route('/')
def index():
"""
View root page function that returns index page
"""
all_category = PitchCategory.get_categories()
all_pitches = Pitch.query.order_by('id').all()
print(all_pitches)
title = 'Home- Welcome'
return render_template('index.html', title = title, categories=all_category, all_pitches=all_pitches)
#Route for adding a new pitch
@main.route('/pitch/newpitch',methods= ['POST','GET'])
@login_required
def newPitch():
pitch = PitchForm()
if pitch.validate_on_submit():
title = pitch.pitch_title.data
category = pitch.pitch_category.data
yourPitch = pitch.pitch_comment.data
#update pitch instance
newPitch = Pitch(pitch_title = title,pitch_category = category,pitch_comment = yourPitch,user= current_user)
#save pitch
newPitch.save_pitch()
return redirect(url_for('.index'))
title = 'NEW PITCH'
return render_template('new_pitch.html',title = title,pitchform = pitch)
@main.route('/categories/<int:id>')
def category(id):
category = PitchCategory.query.get(id)
if category is None:
abort(404)
pitches=Pitch.get_pitches(id)
return render_template('category.html', pitches=pitches, category=category)
@main.route('/add/category', methods=['GET','POST'])
@login_required
def new_category():
"""
View new group route function that returns a page with a form to create a category
"""
form = CategoryForm()
if form.validate_on_submit():
name = form.name.data
new_category = PitchCategory(name = name)
new_category.save_category()
return redirect(url_for('.index'))
title = 'New category'
return render_template('new_category.html', category_form = form, title = title)
#view single pitch alongside its comments
@main.route('/comment/<int:id>',methods= ['POST','GET'])
@login_required
def viewPitch(id):
onepitch = Pitch.getPitchId(id)
comments = Comments.get_comments(id)
if request.args.get("like"):
onepitch.likes = onepitch.likes + 1
db.session.add(onepitch)
db.session.commit()
return redirect("/comment/{pitch_id}".format(pitch_id=category.id))
elif request.args.get("dislike"):
onepitch.dislikes = onepitch.dislikes + 1
db.session.add(onepitch)
db.session.commit()
return redirect("/comment/{pitch_id}".format(pitch_id=category.id))
commentForm = CommentForm()
if commentForm.validate_on_submit():
opinion = commentForm.opinion.data
newComment = Comments(opinion = opinion,user = current_user,pitches_id= id)
newComment.save_comment()
return render_template('comments.html',commentForm = commentForm,comments = comments,pitch = onepitch)
#adding a comment
@main.route('/write_comment/<int:id>', methods=['GET', 'POST'])
@login_required
def post_comment(id):
"""
Function to post comments
"""
form = CommentForm()
title = 'post comment'
pitches = Pitch.query.filter_by(id=id).first()
if pitches is None:
abort(404)
if form.validate_on_submit():
opinion = form.opinion.data
new_comment = Comments(opinion = opinion, user_id = current_user.id, pitches_id = pitches.id)
new_comment.save_comment()
return redirect(url_for('.view_pitch', id = pitches.id))
return render_template('post_comment.html', comment_form = form, title = title)
@main.route('/category/interview',methods= ['GET'])
def displayInterviewCategory():
interviewPitches = Pitch.get_pitches('interview')
return render_template('interviews.html',interviewPitches = interviewPitches)
@main.route('/category/product',methods= ['POST','GET'])
def displayProductCategory():
productPitches = Pitch.get_pitches('product')
return render_template('product.html',productPitches = productPitches)
@main.route('/category/promotion',methods= ['POST','GET'])
def displayPromotionCategory():
promotionPitches = Pitch.get_pitches('promotion')
return render_template('promotion.html',promotionPitches = promotionPitches)
@main.route('/category/pickup',methods= ['POST','GET'])
def displayPickupCategory():
pickupPitches = Pitch.get_pitches('pickup')
return render_template('pickup.html',pickupPitches = pickupPitches)
#Routes upvoting/downvoting pitches
@main.route('/pitch/upvote/<int:id>&<int:vote_type>')
@login_required
def upvote(id,vote_type):
"""
View function that adds one to the vote_number column in the votes table
"""
# Query for user
votes = Votes.query.filter_by(user_id=current_user.id).all()
print(f'The new vote is {votes}')
to_str=f'{vote_type}:{current_user.id}:{id}'
print(f'The current vote is {to_str}')
if not votes:
new_vote = Votes(vote=vote_type, user_id=current_user.id, pitches_id=id)
new_vote.save_vote()
# print(len(count_likes))
print('YOU HAVE new VOTED')
for vote in votes:
if f'{vote}' == to_str:
print('YOU CANNOT VOTE MORE THAN ONCE')
break
else:
new_vote = Votes(vote=vote_type, user_id=current_user.id, pitches_id=id)
new_vote.save_vote()
print('YOU HAVE VOTED')
break
# count_likes = Votes.query.filter_by(pitches_id=id, vote=1).all()
# upvotes=len(count_likes)
# count_dislikes = Votes.query.filter_by(pitches_id=id, vote=2).all()
return redirect(url_for('.view_pitch', id=id))
| 30.759162
| 116
| 0.683574
| 735
| 5,875
| 5.308844
| 0.191837
| 0.039467
| 0.051256
| 0.020502
| 0.201179
| 0.170169
| 0.139416
| 0.099436
| 0.082522
| 0.082522
| 0
| 0.002106
| 0.19166
| 5,875
| 190
| 117
| 30.921053
| 0.819541
| 0.104681
| 0
| 0.198276
| 0
| 0
| 0.135172
| 0.018319
| 0
| 0
| 0
| 0
| 0
| 1
| 0.094828
| false
| 0
| 0.051724
| 0
| 0.284483
| 0.051724
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfe61cbf2bb3b5a52f9141b8b81d778c054609e4
| 10,299
|
py
|
Python
|
networks/classes/centernet/models/ModelCenterNet.py
|
ALIENK9/Kuzushiji-recognition
|
a18c1fbfa72b6bbbcfe4004148cd0e90531acf6b
|
[
"MIT"
] | 2
|
2019-09-15T08:52:38.000Z
|
2019-09-15T08:58:58.000Z
|
networks/classes/centernet/models/ModelCenterNet.py
|
MatteoRizzo96/CognitiveServices
|
a5efeb8f585ae2ee0465ab25e587c4db0e2b32b3
|
[
"MIT"
] | null | null | null |
networks/classes/centernet/models/ModelCenterNet.py
|
MatteoRizzo96/CognitiveServices
|
a5efeb8f585ae2ee0465ab25e587c4db0e2b32b3
|
[
"MIT"
] | 2
|
2020-11-06T07:29:56.000Z
|
2020-11-06T07:33:27.000Z
|
import glob
import os
import pandas as pd
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from typing import Dict, List, Union, Tuple
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.callbacks import ModelCheckpoint, TensorBoard, LearningRateScheduler
from networks.classes.centernet.datasets.ClassificationDataset import ClassificationDataset
class ModelCenterNet:
def __init__(self, logs: Dict):
self.__logs = logs
self.__input_width: int = None
self.__input_height: int = None
def build_model(self,
model_generator,
input_shape: Tuple[int, int, int], mode: str,
n_category: int = 1) -> tf.keras.Model:
"""
Builds the network.
:param model_generator: a generator for the network
:param input_shape: the shape of the input images
:param mode: the type of model that must be generated
:param n_category: the number of categories (possible classes). Defaults to 1 in order to detect the
presence or absence of an object only (and not its label).
:return: a Keras model
"""
self.__input_width = input_shape[0]
self.__input_height = input_shape[1]
self.__logs['execution'].info('Building {} model...'.format(mode))
return model_generator.generate_model(input_shape, mode, n_category)
@staticmethod
def setup_callbacks(weights_log_path: str, batch_size: int, lr: float) -> List[
tf.keras.callbacks.Callback]:
"""
Sets up the callbacks for the training of the model.
"""
# Setup callback to save the best weights after each epoch
checkpointer = ModelCheckpoint(filepath=os.path.join(weights_log_path,
'weights.{epoch:02d}-{val_loss:.2f}.hdf5'),
verbose=0,
save_best_only=True,
save_weights_only=True,
monitor='val_loss',
mode='min')
tensorboard_log_dir = os.path.join(weights_log_path, 'tensorboard')
# Note that update_freq is set to batch_size * 10,
# because the epoch takes too long and batch size too short
tensorboard = TensorBoard(log_dir=tensorboard_log_dir,
write_graph=True,
histogram_freq=0,
write_grads=True,
write_images=False,
batch_size=batch_size,
update_freq=batch_size * 10)
def lrs(epoch):
if epoch > 10:
return lr / 10
elif epoch > 6:
return lr / 5
else:
return lr
lr_schedule = LearningRateScheduler(lrs, verbose=1)
return [tensorboard, checkpointer, lr_schedule]
def restore_weights(self,
model: tf.keras.Model,
init_epoch: int,
weights_folder_path: str) -> None:
"""
Restores the weights from an existing weights file
:param model:
:param init_epoch:
:param weights_folder_path:
"""
init_epoch_str = '0' + str(init_epoch) if init_epoch < 10 else str(init_epoch)
restore_path_reg = os.path.join(weights_folder_path, 'weights.{}-*.hdf5'.format(init_epoch_str))
list_files = glob.glob(restore_path_reg)
assert len(list_files) > 0, \
'ERR: No weights file match provided name {}'.format(restore_path_reg)
# Take real filename
restore_filename = list_files[0].split('/')[-1]
restore_path = os.path.join(weights_folder_path, restore_filename)
assert os.path.isfile(restore_path), \
'ERR: Weight file in path {} seems not to be a file'.format(restore_path)
self.__logs['execution'].info("Restoring weights in file {}...".format(restore_filename))
model.load_weights(restore_path)
def train(self,
dataset: Union[tf.data.Dataset, ClassificationDataset],
model: tf.keras.Model,
init_epoch: int,
epochs: int,
batch_size: int,
callbacks: List[tf.keras.callbacks.Callback],
class_weights=None,
augmentation: bool = False):
"""
Compiles and trains the model for the specified number of epochs.
"""
self.__logs['training'].info('Training the model...\n')
# Display the architecture of the model
self.__logs['training'].info('Architecture of the model:')
model.summary()
# Train the model
self.__logs['training'].info('Starting the fitting procedure:')
self.__logs['training'].info('* Total number of epochs: ' + str(epochs))
self.__logs['training'].info('* Initial epoch: ' + str(init_epoch) + '\n')
training_set, training_set_size = dataset.get_training_set()
validation_set, validation_set_size = dataset.get_validation_set()
training_steps = training_set_size // batch_size + 1
validation_steps = validation_set_size // batch_size + 1
if augmentation:
x_train, y_train = dataset.get_xy_training()
x_val, y_val = dataset.get_xy_validation()
train_image_data_generator = ImageDataGenerator(brightness_range=[0.7, 1.0],
rotation_range=10,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=.1)
val_image_data_generator = ImageDataGenerator()
train_generator = train_image_data_generator.flow_from_dataframe(
dataframe=pd.DataFrame({'image': x_train, 'class': y_train}),
directory='',
x_col='image',
y_col='class',
class_mode='other',
target_size=(self.__input_width, self.__input_height),
batch_size=batch_size)
val_generator = val_image_data_generator.flow_from_dataframe(
dataframe=pd.DataFrame({'image': x_val, 'class': y_val}),
directory='',
x_col='image',
y_col='class',
class_mode='other',
target_size=(self.__input_width, self.__input_height),
batch_size=batch_size)
model.fit_generator(train_generator,
epochs=epochs,
steps_per_epoch=training_steps,
validation_data=val_generator,
validation_steps=validation_steps,
callbacks=callbacks,
initial_epoch=init_epoch,
class_weight=class_weights)
else:
model.fit(training_set,
epochs=epochs,
steps_per_epoch=training_steps,
validation_data=validation_set,
validation_steps=validation_steps,
callbacks=callbacks,
initial_epoch=init_epoch,
class_weight=class_weights)
self.__logs['training'].info('Training procedure performed successfully!\n')
def evaluate(self,
model: tf.keras.Model,
evaluation_set: Union[tf.data.Dataset, ClassificationDataset],
evaluation_steps: Union[int, None] = None,
batch_size: Union[int, None] = None,
augmentation: bool = False) -> Union[float, List[float], None]:
"""
Evaluate the model on provided set.
:return: the loss value if model has no other metrics, otw returns array with loss and metrics
values.
"""
self.__logs['training'].info('Evaluating the model...')
if augmentation:
x_eval, y_eval = evaluation_set.get_xy_evaluation()
data_generator = ImageDataGenerator()
evaluation_set = data_generator.flow_from_dataframe(
dataframe=pd.DataFrame({'image': x_eval, 'class': y_eval}),
directory='',
x_col='image',
y_col='class',
class_mode='other',
target_size=(self.__input_width, self.__input_height),
batch_size=batch_size)
else:
if evaluation_steps is not None and evaluation_steps == 0:
self.__logs['training'].warn('Skipping evaluation since provided set is empty')
return None
return model.evaluate(evaluation_set, verbose=1, steps=evaluation_steps)
def predict(self,
model: tf.keras.Model,
dataset: Union[tf.data.Dataset, List[str]], # List is for submission
verbose: int = 1,
steps: Union[int, None] = None,
batch_size: Union[int, None] = None,
augmentation: bool = False) -> Union[np.ndarray, List[np.ndarray]]:
"""
Performs a prediction on a given dataset
"""
self.__logs['test'].info("Predicting...")
if augmentation:
data_generator = ImageDataGenerator()
generator = data_generator.flow_from_dataframe(
dataframe=pd.DataFrame({'image': dataset}),
directory='',
x_col='image',
class_mode=None,
target_size=(self.__input_width, self.__input_height),
batch_size=batch_size,
shuffle=False)
steps = 1
return model.predict_generator(generator, steps=steps, verbose=verbose)
else:
return model.predict(dataset, verbose=verbose, steps=steps)
| 40.869048
| 108
| 0.556656
| 1,073
| 10,299
| 5.086673
| 0.214352
| 0.03133
| 0.023452
| 0.02565
| 0.286918
| 0.225724
| 0.196775
| 0.186149
| 0.186149
| 0.157017
| 0
| 0.006806
| 0.357996
| 10,299
| 251
| 109
| 41.031873
| 0.818663
| 0.100204
| 0
| 0.304094
| 0
| 0
| 0.073057
| 0.004317
| 0
| 0
| 0
| 0
| 0.011696
| 1
| 0.046784
| false
| 0
| 0.052632
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfec725778cb5fb317db1061f7feba9a3d3f7b10
| 554
|
py
|
Python
|
tests/test_imgs2bw.py
|
antsfamily/improc
|
ceab171b0e61187fa2ced7c58540d5ffde79ebac
|
[
"MIT"
] | 2
|
2019-09-29T08:43:31.000Z
|
2022-01-12T09:46:18.000Z
|
tests/test_imgs2bw.py
|
antsfamily/improc
|
ceab171b0e61187fa2ced7c58540d5ffde79ebac
|
[
"MIT"
] | null | null | null |
tests/test_imgs2bw.py
|
antsfamily/improc
|
ceab171b0e61187fa2ced7c58540d5ffde79ebac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2018-07-04 09:43:51
# @Author : Zhi Liu (zhiliu.mind@gmail.com)
# @Link : http://iridescent.ink
# @Version : $1.1$
import matplotlib.cm as cm
from matplotlib import pyplot as plt
import improc as imp
datafolder = '/mnt/d/DataSets/oi/nsi/classical/'
imgspathes = [
datafolder + 'BaboonRGB.bmp',
datafolder + 'LenaRGB.bmp',
]
print(imgspathes)
bws = imp.imgs2bw(imgspathes, 50)
print(bws.dtype, bws.shape)
print(bws)
plt.figure()
plt.imshow(bws[:, :, :, 0], cm.gray)
plt.show()
| 19.103448
| 48
| 0.658845
| 81
| 554
| 4.506173
| 0.716049
| 0.043836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045259
| 0.162455
| 554
| 28
| 49
| 19.785714
| 0.741379
| 0.299639
| 0
| 0
| 0
| 0
| 0.149215
| 0.086387
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfec78daa3bbf2130e5e79b3fbc047fcd7c950b3
| 764
|
py
|
Python
|
Un4/Un4.py
|
tonypithony/forktinypythonprojectsscripts
|
3dae818c822ee7de6de021e9f46d02bfe05f7355
|
[
"MIT"
] | null | null | null |
Un4/Un4.py
|
tonypithony/forktinypythonprojectsscripts
|
3dae818c822ee7de6de021e9f46d02bfe05f7355
|
[
"MIT"
] | null | null | null |
Un4/Un4.py
|
tonypithony/forktinypythonprojectsscripts
|
3dae818c822ee7de6de021e9f46d02bfe05f7355
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Jump the Five"""
import argparse
# --------------------------------------------------
def get_args():
parser = argparse.ArgumentParser(description='Jump the Five',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('text', metavar='str', help='Input text')
return parser.parse_args()
def main():
args = get_args()
jumper = {'1': '9', '2': '8', '3': '7', '4': '6', '5': '0',
'6': '4', '7': '3', '8': '2', '9': '1', '0': '5'}
for char in args.text:
print(jumper.get(char, char), end='')
print()
# --------------------------------------------------
if __name__ == '__main__':
main()
# $ ./Un4.py 867-5309
# 243-0751
# $ ./Un4.py 'Call 1-800-329-8044 today!'
# Call 9-255-781-2566 today!
| 25.466667
| 64
| 0.522251
| 98
| 764
| 3.938776
| 0.612245
| 0.036269
| 0.056995
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090214
| 0.143979
| 764
| 30
| 65
| 25.466667
| 0.5
| 0.304974
| 0
| 0
| 0
| 0
| 0.111538
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.266667
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dff25402be58788805ce4000a620f3bec7823781
| 4,537
|
py
|
Python
|
iocage/main.py
|
krcNAS/iocage
|
13d87e92f8ba186b6c8b7f64a948f26a05586430
|
[
"BSD-2-Clause"
] | null | null | null |
iocage/main.py
|
krcNAS/iocage
|
13d87e92f8ba186b6c8b7f64a948f26a05586430
|
[
"BSD-2-Clause"
] | null | null | null |
iocage/main.py
|
krcNAS/iocage
|
13d87e92f8ba186b6c8b7f64a948f26a05586430
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2014-2017, iocage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""The main CLI for ioc."""
import locale
import os
import re
import signal
import subprocess as su
import sys
import click
# This prevents it from getting in our way.
from click import core
import iocage.lib.ioc_check as ioc_check
core._verify_python3_env = lambda: None
user_locale = os.environ.get("LANG", "en_US.UTF-8")
locale.setlocale(locale.LC_ALL, user_locale)
# @formatter:off
# Sometimes SIGINT won't be installed.
# http://stackoverflow.com/questions/40775054/capturing-sigint-using-keyboardinterrupt-exception-works-in-terminal-not-in-scr/40785230#40785230
signal.signal(signal.SIGINT, signal.default_int_handler)
# If a utility decides to cut off the pipe, we don't care (IE: head)
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
# @formatter:on
try:
su.check_call(["sysctl", "vfs.zfs.version.spa"],
stdout=su.PIPE, stderr=su.PIPE)
except su.CalledProcessError:
sys.exit("ZFS is required to use iocage.\n"
"Try calling 'kldload zfs' as root.")
def print_version(ctx, param, value):
"""Prints the version and then exits."""
if not value or ctx.resilient_parsing:
return
print("Version\t0.9.9.2 RC")
sys.exit()
cmd_folder = os.path.abspath(os.path.join(os.path.dirname(__file__),
'cli'))
class IOCageCLI(click.MultiCommand):
"""
Iterates in the 'cli' directory and will load any module's cli definition.
"""
def list_commands(self, ctx):
rv = []
for filename in os.listdir(cmd_folder):
if filename.endswith('.py') and \
not filename.startswith('__init__'):
rv.append(re.sub(".py$", "", filename))
rv.sort()
return rv
def get_command(self, ctx, name):
try:
mod = __import__(f"iocage.cli.{name}",
None, None, ["cli"])
mod_name = mod.__name__.replace("iocage.cli.", "")
try:
if mod.__rootcmd__ and "--help" not in sys.argv[1:]:
if len(sys.argv) != 1:
if os.geteuid() != 0:
sys.exit("You need to have root privileges to"
f" run {mod_name}")
except AttributeError:
# It's not a root required command.
pass
return mod.cli
except (ImportError, AttributeError):
return
@click.command(cls=IOCageCLI)
@click.option("--version", "-v", is_flag=True, callback=print_version,
help="Display iocage's version and exit.")
def cli(version):
"""A jail manager."""
skip_check = False
skip_check_cmds = ["--help", "activate", "-v", "--version"]
try:
if "iocage" in sys.argv[0] and len(sys.argv) == 1:
skip_check = True
for arg in sys.argv[1:]:
if arg in skip_check_cmds:
skip_check = True
elif "clean" in arg:
skip_check = True
ioc_check.IOCCheck(silent=True)
if not skip_check:
ioc_check.IOCCheck()
except RuntimeError as err:
exit(err)
if __name__ == '__main__':
cli(prog_name="iocage")
| 33.858209
| 143
| 0.642054
| 606
| 4,537
| 4.70297
| 0.458746
| 0.022105
| 0.011228
| 0.010526
| 0.072982
| 0.047719
| 0.047719
| 0.047719
| 0.047719
| 0.047719
| 0
| 0.013674
| 0.258541
| 4,537
| 133
| 144
| 34.112782
| 0.833532
| 0.384175
| 0
| 0.123288
| 0
| 0
| 0.118917
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054795
| false
| 0.013699
| 0.150685
| 0
| 0.273973
| 0.041096
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dff4072d877687a20524346adc49201f57ca4cea
| 905
|
py
|
Python
|
svety/tests.py
|
clemsciences/svety
|
44a0c2ab5453e9d01b71b5a3f0e0e959740c2d90
|
[
"MIT"
] | null | null | null |
svety/tests.py
|
clemsciences/svety
|
44a0c2ab5453e9d01b71b5a3f0e0e959740c2d90
|
[
"MIT"
] | null | null | null |
svety/tests.py
|
clemsciences/svety
|
44a0c2ab5453e9d01b71b5a3f0e0e959740c2d90
|
[
"MIT"
] | null | null | null |
"""
"""
import os
import unittest
from lxml import etree
from svety import PACKDIR
from svety import reader
from svety import retriever
__author__ = ["Clément Besnier <clemsciences@aol.com>", ]
class TestMain(unittest.TestCase):
"""
"""
def setUp(self) -> None:
self.filename = "hellqvist.xml"
self.path = os.getcwd()
retriever.retrieve_dictionary()
def test_retrieve_text(self):
result = retriever.retrieve_dictionary()
self.assertTrue(result)
self.assertIn(self.filename, os.listdir(self.path))
def test_root(self):
root = reader.get_xml_root(self.filename, self.path)
self.assertEqual(type(root), etree._Element)
def test_lookup_word(self):
root = reader.get_xml_root(self.filename, self.path)
word = reader.read_entry(root, "enkom")
self.assertEqual(word["faksimilID"], '0208')
| 23.205128
| 60
| 0.667403
| 109
| 905
| 5.385321
| 0.440367
| 0.081772
| 0.076661
| 0.057922
| 0.149915
| 0.149915
| 0.149915
| 0.149915
| 0.149915
| 0.149915
| 0
| 0.005626
| 0.214365
| 905
| 38
| 61
| 23.815789
| 0.819972
| 0
| 0
| 0.086957
| 0
| 0
| 0.079096
| 0.024859
| 0
| 0
| 0
| 0
| 0.173913
| 1
| 0.173913
| false
| 0
| 0.26087
| 0
| 0.478261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dff5479d5d3e3729b12a7cdf8fd0b259fd5d0c88
| 5,424
|
py
|
Python
|
tests/internal/processes/test_generator.py
|
clausmichele/openeo-python-client
|
b20af2b24fcb12d0fce0e2acdb8afeeb881ff454
|
[
"Apache-2.0"
] | 1
|
2021-04-01T13:15:35.000Z
|
2021-04-01T13:15:35.000Z
|
tests/internal/processes/test_generator.py
|
clausmichele/openeo-python-client
|
b20af2b24fcb12d0fce0e2acdb8afeeb881ff454
|
[
"Apache-2.0"
] | null | null | null |
tests/internal/processes/test_generator.py
|
clausmichele/openeo-python-client
|
b20af2b24fcb12d0fce0e2acdb8afeeb881ff454
|
[
"Apache-2.0"
] | null | null | null |
from textwrap import dedent
from openeo.internal.processes.generator import PythonRenderer
from openeo.internal.processes.parse import Process
def test_render_basic():
process = Process.from_dict({
"id": "incr",
"description": "Increment a value",
"summary": "Increment a value",
"parameters": [{"name": "x", "description": "value", "schema": {"type": "integer"}}],
"returns": {"description": "incremented value", "schema": {"type": "integer"}}
})
renderer = PythonRenderer()
src = renderer.render_process(process)
assert src == dedent('''\
def incr(x):
"""
Increment a value
:param x: value
:return: incremented value
"""
return process('incr', x=x)''')
def test_render_no_params():
process = Process.from_dict({
"id": "pi",
"description": "Pi",
"summary": "Pi",
"parameters": [],
"returns": {"description": "value of pi", "schema": {"type": "number"}}
})
renderer = PythonRenderer()
src = renderer.render_process(process)
assert src == dedent('''\
def pi():
"""
Pi
:return: value of pi
"""
return process('pi', )''')
def test_render_with_default():
process = Process.from_dict({
"id": "incr",
"description": "Increment a value",
"summary": "Increment a value",
"parameters": [
{"name": "x", "description": "value", "schema": {"type": "integer"}},
{"name": "i", "description": "increment", "schema": {"type": "integer"}, "default": 1},
],
"returns": {"description": "incremented value", "schema": {"type": "integer"}}
})
renderer = PythonRenderer()
src = renderer.render_process(process)
assert src == dedent('''\
def incr(x, i=1):
"""
Increment a value
:param x: value
:param i: increment
:return: incremented value
"""
return process('incr', x=x, i=i)''')
def test_render_with_optional():
process = Process.from_dict({
"id": "foo",
"description": "Foo",
"summary": "Foo",
"parameters": [
{"name": "x", "description": "value", "schema": {"type": "integer"}},
{"name": "y", "description": "something", "schema": {"type": "integer"}, "optional": True, "default": 1},
],
"returns": {"description": "new value", "schema": {"type": "integer"}}
})
renderer = PythonRenderer(optional_default="UNSET")
src = renderer.render_process(process)
assert src == dedent('''\
def foo(x, y=UNSET):
"""
Foo
:param x: value
:param y: something
:return: new value
"""
return process('foo', x=x, y=y)''')
def test_render_return_type_hint():
process = Process.from_dict({
"id": "incr",
"description": "Increment a value",
"summary": "Increment a value",
"parameters": [{"name": "x", "description": "value", "schema": {"type": "integer"}}],
"returns": {"description": "incremented value", "schema": {"type": "integer"}}
})
renderer = PythonRenderer(return_type_hint="FooBar")
src = renderer.render_process(process)
assert src == dedent('''\
def incr(x) -> FooBar:
"""
Increment a value
:param x: value
:return: incremented value
"""
return process('incr', x=x)''')
def test_render_oo_no_params():
process = Process.from_dict({
"id": "pi",
"description": "Pi",
"summary": "Pi",
"parameters": [],
"returns": {"description": "value of pi", "schema": {"type": "number"}}
})
renderer = PythonRenderer(oo_mode=True)
src = "class Consts:\n" + renderer.render_process(process)
assert src == dedent('''\
class Consts:
def pi(self):
"""
Pi
:return: value of pi
"""
return process('pi', )''')
def test_render_keyword():
process = Process.from_dict({
"id": "or",
"description": "Boolean and",
"summary": "Boolean and",
"parameters": [
{"name": "x", "description": "value", "schema": {"type": ["boolean", "null"]}},
{"name": "y", "description": "value", "schema": {"type": ["boolean", "null"]}}
],
"returns": {"description": "result", "schema": {"type": ["boolean", "null"]}},
})
renderer = PythonRenderer()
src = renderer.render_process(process)
assert src == dedent('''\
def or_(x, y):
"""
Boolean and
:param x: value
:param y: value
:return: result
"""
return process('or', x=x, y=y)''')
oo_renderer = PythonRenderer(oo_mode=True, body_template="return {safe_name}({args})", )
src = oo_renderer.render_process(process)
assert dedent(src) == dedent('''\
def or_(self, y):
"""
Boolean and
:param self: value
:param y: value
:return: result
"""
return or_(x=self, y=y)''')
| 28.851064
| 117
| 0.50295
| 509
| 5,424
| 5.265226
| 0.137525
| 0.078358
| 0.05597
| 0.065672
| 0.726866
| 0.672015
| 0.627985
| 0.571269
| 0.55597
| 0.516791
| 0
| 0.000816
| 0.322456
| 5,424
| 187
| 118
| 29.005348
| 0.728435
| 0
| 0
| 0.688742
| 0
| 0
| 0.499078
| 0
| 0
| 0
| 0
| 0
| 0.05298
| 1
| 0.046358
| false
| 0
| 0.019868
| 0
| 0.119205
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dff9aadffba2a29e37c671ac7172c7de73a82cb0
| 14,895
|
py
|
Python
|
hyperion/generators/adapt_sequence_batch_generator.py
|
jsalt2019-diadet/hyperion
|
14a11436d62f3c15cd9b1f70bcce3eafbea2f753
|
[
"Apache-2.0"
] | 9
|
2019-09-22T05:19:59.000Z
|
2022-03-05T18:03:37.000Z
|
hyperion/generators/adapt_sequence_batch_generator.py
|
jsalt2019-diadet/hyperion
|
14a11436d62f3c15cd9b1f70bcce3eafbea2f753
|
[
"Apache-2.0"
] | null | null | null |
hyperion/generators/adapt_sequence_batch_generator.py
|
jsalt2019-diadet/hyperion
|
14a11436d62f3c15cd9b1f70bcce3eafbea2f753
|
[
"Apache-2.0"
] | 4
|
2019-10-10T06:34:05.000Z
|
2022-03-05T18:03:56.000Z
|
"""
Copyright 2018 Jesus Villalba (Johns Hopkins University)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import sys
import os
import argparse
import time
import copy
import numpy as np
from sklearn.utils.class_weight import compute_class_weight
from ..hyp_defs import float_cpu
from ..io import RandomAccessDataReaderFactory as RF
from ..utils.scp_list import SCPList
from ..utils.tensors import to3D_by_seq
from ..transforms import TransformList
from .sequence_batch_generator_v1 import SequenceBatchGeneratorV1 as SBG
class AdaptSequenceBatchGenerator(SBG):
def __init__(self, rspecifier,
key_file, key_file_adapt,
r_adapt=1,
class_list = None,
path_prefix=None,
batch_size=1,
iters_per_epoch='auto',
gen_method='random',
min_seq_length=None, max_seq_length=None,
seq_overlap=0,
prune_min_length=0,
return_class = True,
class_weight = None,
seq_weight = 'balanced',
shuffle_seqs=True,
transform=None,
init_epoch=0,
sg_seed=1024, reset_rng=False,
scp_sep=' ',
part_idx=1, num_parts=1):
self.scp_adapt = SCPList.load(key_file_adapt, sep=scp_sep)
if num_parts > 1:
self.scp_adapt = self.scp_adapt.split(part_idx, num_parts, group_by_key=False)
assert r_adapt < batch_size
self.r_adapt = r_adapt
self._init_seq_lengths_adapt = None
self._seq_lengths_adapt = None
self.init_scp_adapt = self.scp_adapt
self.cur_seq_adapt = 0
self.cur_frame_adapt = None
self.cur_subseq = None
self._init_num_subseqs_adapt = None
self.num_subseqs_adapt = None
super(AdaptSequenceBatchGenerator, self).__init__(
rspecifier, key_file, class_list, path_prefix, batch_size,
iters_per_epoch, gen_method, min_seq_length, max_seq_length, seq_overlap,
prune_min_length, return_class, class_weight, seq_weight,
shuffle_seqs, transform, init_epoch, sg_seed, reset_rng, scp_sep,
part_idx,num_parts)
@property
def num_seqs(self):
return len(self.scp)
@property
def num_seqs_adapt(self):
return len(self.scp_adapt)
@property
def seq_lengths(self):
if self._seq_lengths is None:
self._init_seq_lengths = self.r.read_num_rows(self.scp.file_path)
self._seq_lengths = self._init_seq_lengths
return self._seq_lengths
@property
def seq_lengths_adapt(self):
if self._seq_lengths_adapt is None:
self._init_seq_lengths_adapt = self.r.read_num_rows(self.scp_adapt.file_path)
self._seq_lengths_adapt = self._init_seq_lengths_adapt
return self._seq_lengths_adapt
@property
def total_length(self):
return np.sum(self.seq_lengths)
@property
def total_length_adapt(self):
return np.sum(self.seq_lengths_adapt)
@property
def min_seq_length(self):
if self._min_seq_length is None:
self._min_seq_length = min(np.min(self.seq_lengths), np.min(self.seq_lengths_adapt))
return self._min_seq_length
@property
def max_seq_length(self):
if self._max_seq_length is None:
self._max_seq_length = max(np.max(self.seq_lengths), np.max(self.seq_lengths_adapt))
return self._max_seq_length
@property
def steps_per_epoch(self):
if self._steps_per_epoch is None:
if self.gen_method == 'sequential':
if self.seq_weight == 'balanced':
seqs_per_iter = self.num_seqs*np.max(self.num_subseqs)
else:
seqs_per_iter = np.sum(self.num_subseqs)
else:
seqs_per_iter = self.num_seqs
self._steps_per_epoch = int(np.floor(
self.iters_per_epoch * seqs_per_iter/(self.batch_size-self.r_adapt)))
return self._steps_per_epoch
@property
def num_total_subseqs(self):
return self.steps_per_epoch * self.batch_size
def _prune_min_length(self, min_length):
keep_idx = self.seq_lengths >= min_length
self.scp = self.scp.filter_index(keep_idx)
keep_idx = self.seq_lengths_adapt >= min_length
self.scp_adapt = self.scp_adapt.filter_index(keep_idx)
self._seq_lengths = None
self._seq_lengths_adapt = None
def _prepare_class_info(self, class_list):
if class_list is None:
class_dict = {k:i for i, k in enumerate(np.unique(self.scp.key))}
class_dict.update({k:i for i, k in enumerate(np.unique(self.scp_adapt.key))})
else:
with open(class_list) as f:
class_dict={line.rstrip().split()[0]: i for i, line in enumerate(f)}
self.num_classes = len(class_dict)
self.key2class = {p: class_dict[k] for k, p in zip(self.scp.key, self.scp.file_path)}
self.key2class.update({p: class_dict[k] for k, p in zip(self.scp_adapt.key, self.scp_adapt.file_path)})
def _balance_class_weight(self):
super(AdaptSequenceBatchGenerator, self)._balance_class_weight()
classes, class_ids = np.unique(self.scp_adapt.key, return_inverse=True)
idx = self._balance_class_weigth_helper(class_ids)
self.scp_adapt = self.scp_adapt.filter_index(idx)
assert len(self.scp_adapt) == len(num_samples)*max_samples
if self._init_seq_lengths_adapt is not None:
self._init_seq_legths_adapt = self._init_seq_lengths_adapt[idx]
self._seq_lengths_adapt = self._init_seq_legths_adapt
def _prepare_full_seqs(self):
pass
def _prepare_random_subseqs(self):
pass
def _prepare_sequential_subseqs(self):
super(AdaptSequenceBatchGenerator, self)._prepare_sequential_subseqs()
seq_lengths = self.seq_lengths_adapt
avg_length = int((self.max_seq_length + self.min_seq_length)/2)
shift = avg_length - self.seq_overlap
self._init_num_subseqs_adapt = np.ceil(seq_lengths/shift).astype(int)
self.num_subseqs_adapt = self._init_num_subseqs_adapt
self.cur_frame_adapt = np.zeros((self.num_seqs_adapt,), dtype=int)
self.cur_subseq_adapt = np.zeros((self.num_seqs_adapt,), dtype=int)
def reset(self):
super(AdaptSequenceBatchGenerator, self).reset()
self.cur_seq_adapt = 0
if self.shuffle_seqs:
if self._init_seq_lengths_adapt is None:
self.seq_lengths_adapt
self.scp_adapt = self.init_scp_adapt.copy()
index = self.scp_adapt.shuffle(rng=self.rng)
self._seq_lengths_adapt = self._init_seq_lengths_adapt[index]
if self._init_num_subseqs_adapt is not None:
self.num_subseqs_adapt = self._init_num_subseqs_adapt[index]
if self.gen_method == 'sequential':
self.cur_subseq_adapt[:] = 0
self.cur_frame_adapt[:] = 0
def _read_full_seqs(self):
batch_size = self.batch_size - self.r_adapt
keys = list(self.scp.file_path[self.cur_seq:self.cur_seq+batch_size])
self.cur_seq += batch_size
if len(keys) < batch_size:
delta = batch_size - len(keys)
keys += self.scp.file_path[:delta]
self.cur_seq = delta
assert len(keys) == batch_size
batch_size = self.r_adapt
keys_adapt = list(self.scp_adapt.file_path[self.cur_seq_adapt:self.cur_seq_adapt+batch_size])
self.cur_seq_adapt += batch_size
if len(keys_adapt) < batch_size:
delta = batch_size - len(keys)
keys_adapt += self.scp_adapt.file_path[:delta]
self.cur_seq_adapt = delta
assert len(keys_adapt) == batch_size
keys += keys_adapt
return keys, self.r.read(keys)
def _read_random_subseqs(self):
keys = []
seq_lengths =[]
first_frames = []
for i in xrange(self.batch_size-self.r_adapt):
key = self.scp.file_path[self.cur_seq]
full_seq_length = self.seq_lengths[self.cur_seq]
max_seq_length = min(full_seq_length, self.max_seq_length)
min_seq_length = min(full_seq_length, self.min_seq_length)
seq_length = self.rng.randint(low=min_seq_length, high=max_seq_length+1)
first_frame = self.rng.randint(
low=0, high=full_seq_length-seq_length+1)
keys.append(key)
seq_lengths.append(seq_length)
first_frames.append(first_frame)
self.cur_seq = (self.cur_seq + 1) % self.num_seqs
for i in xrange(self.r_adapt):
key = self.scp_adapt.file_path[self.cur_seq_adapt]
full_seq_length = self.seq_lengths_adapt[self.cur_seq_adapt]
max_seq_length = min(full_seq_length, self.max_seq_length)
min_seq_length = min(full_seq_length, self.min_seq_length)
seq_length = self.rng.randint(low=min_seq_length, high=max_seq_length+1)
first_frame = self.rng.randint(
low=0, high=full_seq_length-seq_length+1)
keys.append(key)
seq_lengths.append(seq_length)
first_frames.append(first_frame)
self.cur_seq_adapt = (self.cur_seq_adapt + 1) % self.num_seqs_adapt
return keys, self.r.read(keys, row_offset=first_frames,
num_rows=seq_lengths)
def _read_sequential_subseqs(self):
keys = []
seq_lengths =[]
first_frames = []
count = 0
while count < self.batch_size - self.r_adapt:
key = self.scp.file_path[self.cur_seq]
first_frame = self.cur_frame[self.cur_seq]
full_seq_length = self.seq_lengths[self.cur_seq]
remainder_seq_length = full_seq_length - first_frame
if self.cur_subseq[self.cur_seq] == self.num_subseqs[self.cur_seq]:
self.cur_seq = (self.cur_seq + 1) % self.num_seqs
continue
if self.cur_subseq[self.cur_seq] == self.num_subseqs[self.cur_seq]-1:
seq_length = min(remainder_seq_length, self.max_seq_length)
self.cur_frame[self.cur_seq] = 0
else:
max_seq_length = min(
max(self.min_seq_length,
remainder_seq_length-self.min_seq_length),
self.max_seq_length)
min_seq_length = min(remainder_seq_length, self.min_seq_length)
seq_length = self.rng.randint(low=min_seq_length, high=max_seq_length+1)
self.cur_frame[self.cur_seq] = min(
full_seq_length - self.min_seq_length,
first_frame + seq_length - self.seq_overlap)
keys.append(key)
seq_lengths.append(seq_length)
first_frames.append(first_frame)
self.cur_subseq[self.cur_seq] += 1
if self.seq_weight == 'balanced':
self.cur_subseq[self.cur_seq] %= self.num_subseqs[self.cur_seq]
self.cur_seq = (self.cur_seq + 1) % self.num_seqs
count += 1
while count < self.batch_size:
key = self.scp_adapt.file_path[self.cur_seq_adapt]
first_frame = self.cur_frame_adapt[self.cur_seq_adapt]
full_seq_length = self.seq_lengths_adapt[self.cur_seq_adapt]
remainder_seq_length = full_seq_length - first_frame
if self.cur_subseq_adapt[self.cur_seq_adapt] == self.num_subseqs_adapt[self.cur_seq_adapt]:
self.cur_seq_adapt = (self.cur_seq_adapt + 1) % self.num_seqs_adapt
continue
if self.cur_subseq_adapt[self.cur_seq_adapt] == self.num_subseqs_adapt[self.cur_seq_adapt]-1:
seq_length = min(remainder_seq_length, self.max_seq_length)
self.cur_frame_adapt[self.cur_seq_adapt] = 0
else:
max_seq_length = min(
max(self.min_seq_length,
remainder_seq_length-self.min_seq_length),
self.max_seq_length)
min_seq_length = min(remainder_seq_length, self.min_seq_length)
seq_length = self.rng.randint(low=min_seq_length, high=max_seq_length+1)
self.cur_frame_adapt[self.cur_seq_adapt] = min(
full_seq_length - self.min_seq_length,
first_frame + seq_length - self.seq_overlap)
keys.append(key)
seq_lengths.append(seq_length)
first_frames.append(first_frame)
self.cur_subseq_adapt[self.cur_seq_adapt] += 1
if self.seq_weight == 'balanced':
self.cur_subseq_adapt[self.cur_seq_adapt] %= self.num_subseqs_adapt[self.cur_seq_adapt]
self.cur_seq_adapt = (self.cur_seq_adapt + 1) % self.num_seqs_adapt
count += 1
assert len(keys) == self.batch_size
return keys, self.r.read(keys, row_offset=first_frames,
num_rows=seq_lengths)
@staticmethod
def filter_args(prefix=None, **kwargs):
args = super(AdaptSequenceBatchGenerator,
AdaptSequenceBatchGenerator).filter_args(prefix, **kwargs)
if prefix is None:
p = ''
else:
p = prefix + '_'
valid_args = ('r_adapt',)
new_args = dict((k, kwargs[p+k])
for k in valid_args if p+k in kwargs)
args.update(new_args)
return args
@staticmethod
def add_argparse_args(parser, prefix=None):
args = super(AdaptSequenceBatchGenerator,
AdaptSequenceBatchGenerator).add_argparse_args(parser, prefix)
if prefix is None:
p1 = '--'
p2 = ''
else:
p1 = '--' + prefix + '-'
p2 = prefix + '_'
parser.add_argument(p1+'r-adapt', dest=(p2+'r_adapt'),
default=64, type=int,
help=('batch size of adaptation data.'))
| 34.320276
| 111
| 0.608728
| 1,935
| 14,895
| 4.311628
| 0.101809
| 0.088457
| 0.05993
| 0.046746
| 0.626993
| 0.51684
| 0.428982
| 0.380559
| 0.352991
| 0.314275
| 0
| 0.006006
| 0.306949
| 14,895
| 433
| 112
| 34.399538
| 0.802189
| 0.007586
| 0
| 0.352159
| 0
| 0
| 0.008192
| 0
| 0
| 0
| 0
| 0
| 0.016611
| 1
| 0.076412
| false
| 0.006645
| 0.056478
| 0.016611
| 0.182724
| 0.003322
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dffa822e50735b496917f2c8ca75cc5ca8d78488
| 1,113
|
py
|
Python
|
main.py
|
Lojlvenom/simple-python-blockchain
|
b226f81644daa066156aa5b9581c04cf4d47d0dc
|
[
"MIT"
] | null | null | null |
main.py
|
Lojlvenom/simple-python-blockchain
|
b226f81644daa066156aa5b9581c04cf4d47d0dc
|
[
"MIT"
] | null | null | null |
main.py
|
Lojlvenom/simple-python-blockchain
|
b226f81644daa066156aa5b9581c04cf4d47d0dc
|
[
"MIT"
] | null | null | null |
import fastapi as _fastapi
import blockchain as _blockchain
app_desc = {
'title':'Simple python blockchain API',
'version':'1.0.0',
}
bc = _blockchain.Blockchain()
app = _fastapi.FastAPI(**app_desc)
def validade_blockchain():
if not bc._is_chain_valid():
return _fastapi.HTTPException(
status_code= 400, detail="Blockchain nao e valida"
)
@app.get("/", tags=["Endpoints"])
def hello():
return {
"message":"Bem vindo ao simple python blockchain API, para saber mais acesse /docs"
}
# EP PARA ADICIONAR UM BLOCO
@app.post("/mine_block/", tags=["Endpoints"])
def mine_block(data: str):
validade_blockchain()
block = bc.mine_block(data)
return block
@app.get("/blockchain/", tags=["Endpoints"])
def get_blockchain():
validade_blockchain
chain = bc.chain
return chain
@app.get('/check_is_valid', tags=["Endpoints"])
def check_is_valid():
is_valid = validade_blockchain()
if is_valid:
return {
"message": "Is valid"
}
else:
return {
"message": "Not valid"
}
| 22.26
| 91
| 0.629829
| 134
| 1,113
| 5.044776
| 0.402985
| 0.051775
| 0.094675
| 0.073965
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007084
| 0.238994
| 1,113
| 49
| 92
| 22.714286
| 0.791027
| 0.02336
| 0
| 0.076923
| 0
| 0
| 0.23318
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0.051282
| 0.025641
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dffa84ab01f78c539667e6f6871367dc2095eb09
| 1,747
|
py
|
Python
|
setup.py
|
SanjeevaRDodlapati/Chem-Learn
|
2db2e98061ee3dbb00ed20c51ea18b15956e298e
|
[
"MIT"
] | null | null | null |
setup.py
|
SanjeevaRDodlapati/Chem-Learn
|
2db2e98061ee3dbb00ed20c51ea18b15956e298e
|
[
"MIT"
] | null | null | null |
setup.py
|
SanjeevaRDodlapati/Chem-Learn
|
2db2e98061ee3dbb00ed20c51ea18b15956e298e
|
[
"MIT"
] | null | null | null |
from glob import glob
import os
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(name='chemlearn',
version='0.0.0',
description='Deep learning for chemistry',
long_description=read('README.rst'),
author='Sanjeeva Reddy Dodlapati',
author_email='sdodl001@odu.edu',
license="MIT",
url='https://github.com/SanjeevaRDodlapati/Chem-Learn',
packages=find_packages(),
scripts=glob('./scripts/*.py'),
install_requires=['h5py',
'argparse',
'pandas',
'numpy',
'pytest',
'torch',
'rdkit-pypi',
],
keywords=['Deep learning',
'Deep neural networks',
'Molecular graphs',
'Drug discovery',
'Drug target interaction'],
classifiers=['Development Status :: 0 - developmet',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Chem-Informatics',
]
)
| 36.395833
| 80
| 0.499714
| 143
| 1,747
| 6.041958
| 0.664336
| 0.109954
| 0.144676
| 0.12037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01476
| 0.379508
| 1,747
| 47
| 81
| 37.170213
| 0.782288
| 0
| 0
| 0
| 0
| 0
| 0.419576
| 0.025186
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0.071429
| 0.02381
| 0.119048
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfff599aef2fa931d79fa84797d0acce9a216f5a
| 5,407
|
py
|
Python
|
murder.py
|
lgrn/murder
|
1e4582cc5fa8c31c35e70997daebd111f1badf4d
|
[
"Unlicense"
] | null | null | null |
murder.py
|
lgrn/murder
|
1e4582cc5fa8c31c35e70997daebd111f1badf4d
|
[
"Unlicense"
] | null | null | null |
murder.py
|
lgrn/murder
|
1e4582cc5fa8c31c35e70997daebd111f1badf4d
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python3
# murder 0.2.3
import sys
if sys.version_info[0] != (3):
sys.stdout.write("Sorry this software requires Python 3. This is Python {}.\n".format(sys.version_info[0]))
sys.exit(1)
import time
import requests
import json
import re
# Your "filename" file should contain one word per row. Don't worry about
# newlines and whitespace, it will be stripped. Any names containing anything
# but A-Z/a-z, underscores and numbers will be skipped and not queried.
filename = "input.txt"
try:
with open(filename) as f:
lines = [line.strip().strip('\n').lower() for line in open(filename)]
lines = list(set(lines))
except FileNotFoundError:
print("For this script to work, {} needs to exist in the working directory. Exiting.".format(filename))
raise SystemExit
except UnicodeDecodeError:
print("Oops! {} isn't UTF-8. Convert it, for example by running iconv. Exiting.".format(filename))
raise SystemExit
unavailable_filename = "unavailable.txt"
try:
with open(unavailable_filename) as f:
unavailable_lines = [line.strip().strip('\n') for line in open(unavailable_filename)]
except FileNotFoundError:
print("\n{} was not found. That's fine, probably there wasn't a previous run.".format(unavailable_filename))
available_filename = "output.txt"
try:
with open(available_filename) as f:
available_lines = [line.strip().strip('\n') for line in open(available_filename)]
except FileNotFoundError:
print("\n{} was not found. That's fine, probably there wasn't a previous run.".format(available_filename))
pretty_amount = "{:,}".format(len(lines))
print("\n[>>>>>>>>>] Imported {} words from {}.".format(pretty_amount,filename))
# This regex pattern validates usernames.
pattern = re.compile("^[a-zA-Z0-9]+([._]?[a-zA-Z0-9]+)*$")
sys.stdout.flush()
# This function will check if a name is available:
def is_available(username):
url = ("https://twitter.com/users/username_available"
"?scribeContext%5Bcomponent%5D=form&scribeContext%5B"
"element%5D=screen_name&username=" + str(username.lower()) +
"&value=" + str(username.lower()))
response = requests.get(url)
try:
data = json.loads(response.text)
reason = data.get("reason")
except UnboundLocalError:
print('[ JSON! ] Twitter refused to give us a decent response for this request: ')
print(url)
print('[ JSON! ] Assuming its unavailable and attempting to move on.')
reason = "unavailable"
pass
except ValueError:
print('[ JSON! ] UH-OH! You\'re probably being rate limited :( ) ')
print('[ JSON! ] You should stop for now and/or adjust your sleep_timer ) ')
print('[ JSON! ] ValueError for this request: ')
print(url)
raise SystemExit
if reason == "available":
return True
else:
return False
def write_available(i):
f = open("output.txt", "a")
f.write(i)
f.close()
def write_unavailable(i):
f = open("unavailable.txt", "a")
f.write(i)
f.close()
failed_tries = 0
ok_tries = 0
# Let's clean up our "lines" array first so it only contains stuff we
# actually want to throw at the API.
clean_lines = []
for i in lines:
if pattern.match(i) and len(str(i)) == 5:
clean_lines.append(i)
# NOTE: "Compliant" below is decided by the for loop above.
pretty_amount = "{:,}".format(len(clean_lines))
print("[>>>>>>>>>] Cleaned up import to only include compliant words. We now have {} words.".format(pretty_amount) + "\n")
# Clean the array further by removing already checked names (failed and succeeded).
try:
for i in unavailable_lines:
if i in clean_lines:
clean_lines.remove(i)
print("[ CLEANUP ] '{}' will not be checked, we already know it's taken.".format(i.lower()))
except NameError:
# If there wasn't a previous run, this won't exist. That's fine.
pass
try:
for i in available_lines:
if i in clean_lines:
clean_lines.remove(i)
print("[ CLEANUP ] '{}' will not be checked, we already know it's available.".format(i.lower()))
except NameError:
# If there wasn't a previous run, this won't exist. That's fine.
pass
try:
if unavailable_lines or available_lines:
pretty_amount = "{:,}".format(len(clean_lines))
print("[>>>>>>>>>] Done cross-checking txt files from previous runs, we now have {} words.".format(pretty_amount) + "\n")
except NameError:
pass
# NOTE: time.sleep waits because twitter has a rate limit of 150/15min (?) <- bad guess
print("[>>>>>>>>>] Making API calls now." + "\n")
sleep_seconds = 10
for i in clean_lines:
sys.stdout.flush()
if is_available(i):
print("[AVAILABLE] '{}'! Saving to output.txt, stalling for next API call.".format(i.lower()))
ok_tries += 1
write_available(i.lower() + '\n')
sys.stdout.flush()
time.sleep(sleep_seconds)
else:
print("[ TAKEN ] '{}'. Too bad. Stalling for next API call.".format(i.lower()))
failed_tries += 1
#delete_row(i)
write_unavailable(i.lower() + '\n')
time.sleep(sleep_seconds)
total_tries = failed_tries + ok_tries
print("Script finished. Twitter was hit with "
"{} queries. We found {} available names, saved to output.txt".format(total_tries,ok_tries))
| 32.769697
| 129
| 0.653227
| 757
| 5,407
| 4.594452
| 0.336856
| 0.025877
| 0.010351
| 0.012651
| 0.273145
| 0.234043
| 0.234043
| 0.203565
| 0.165037
| 0.146061
| 0
| 0.007039
| 0.211763
| 5,407
| 165
| 130
| 32.769697
| 0.80901
| 0.149621
| 0
| 0.342105
| 0
| 0.04386
| 0.326134
| 0.025524
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0.035088
| 0.061404
| 0
| 0.105263
| 0.175439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dfff777451f2b530e80b5323a7284116b77ea627
| 703
|
py
|
Python
|
cfn_review_bot/merge.py
|
biochimia/cfn-review-bot
|
1c8a84b51f7c398c21725cb888a9ab694ddfbb56
|
[
"Apache-2.0"
] | 1
|
2019-04-04T12:09:16.000Z
|
2019-04-04T12:09:16.000Z
|
cfn_review_bot/merge.py
|
biochimia/cfn-review-bot
|
1c8a84b51f7c398c21725cb888a9ab694ddfbb56
|
[
"Apache-2.0"
] | null | null | null |
cfn_review_bot/merge.py
|
biochimia/cfn-review-bot
|
1c8a84b51f7c398c21725cb888a9ab694ddfbb56
|
[
"Apache-2.0"
] | null | null | null |
def _deep_merge_mapping(old, new):
merged = {}
merged.update(old)
for k, nv in new.items():
try:
ov = merged[k]
except KeyError:
merged[k] = nv
continue
merged[k] = deep_merge(ov, nv)
return merged
def _deep_merge_sequence(old, new):
return old + new
def deep_merge(old, new):
if (isinstance(old, dict)
and isinstance(new, dict)):
return _deep_merge_mapping(old, new)
if (isinstance(old, list)
and isinstance(new, list)):
return _deep_merge_sequence(old, new)
if old == new:
return old
raise Exception('Unable to merge {} with {}'.format(old, new))
| 20.676471
| 66
| 0.571835
| 90
| 703
| 4.311111
| 0.344444
| 0.123711
| 0.092784
| 0.097938
| 0.324742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.318634
| 703
| 33
| 67
| 21.30303
| 0.810021
| 0
| 0
| 0
| 0
| 0
| 0.036984
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0
| 0.043478
| 0.347826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f0133420725ce23664fd5aac6eace5b4be90d9b
| 324
|
py
|
Python
|
02_module/package_test/module1/my_sum.py
|
zzz0072/Python_Exercises
|
9918aa8197a77ef237e5e60306c7785eca5cb1d3
|
[
"BSD-2-Clause"
] | null | null | null |
02_module/package_test/module1/my_sum.py
|
zzz0072/Python_Exercises
|
9918aa8197a77ef237e5e60306c7785eca5cb1d3
|
[
"BSD-2-Clause"
] | null | null | null |
02_module/package_test/module1/my_sum.py
|
zzz0072/Python_Exercises
|
9918aa8197a77ef237e5e60306c7785eca5cb1d3
|
[
"BSD-2-Clause"
] | null | null | null |
#/usr/bin/env python
from ..module2 import my_print
def my_sum(x, y):
result = x + y
my_print.my_print(result)
# To run method alone
if __name__ == "__main__":
import sys
if len(sys.argv) != 3:
print("%s str1 str2" % sys.argv[0])
raise SystemExit(1)
my_sum(sys.argv[1], sys.argv[2])
| 18
| 43
| 0.608025
| 54
| 324
| 3.407407
| 0.611111
| 0.152174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032787
| 0.246914
| 324
| 17
| 44
| 19.058824
| 0.721311
| 0.12037
| 0
| 0
| 0
| 0
| 0.071174
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0
| 0.3
| 0.3
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f033434eab634732c27a8827763d152ae9391a1
| 1,054
|
py
|
Python
|
repos/system_upgrade/el7toel8/actors/preparepythonworkround/tests/test_preparepythonworkaround.py
|
AloisMahdal/leapp-repository
|
9ac2b8005750e8e56e5fde61e8762044d0f16257
|
[
"Apache-2.0"
] | null | null | null |
repos/system_upgrade/el7toel8/actors/preparepythonworkround/tests/test_preparepythonworkaround.py
|
AloisMahdal/leapp-repository
|
9ac2b8005750e8e56e5fde61e8762044d0f16257
|
[
"Apache-2.0"
] | 9
|
2020-01-07T12:48:59.000Z
|
2020-01-16T10:44:34.000Z
|
repos/system_upgrade/el7toel8/actors/preparepythonworkround/tests/test_preparepythonworkaround.py
|
AloisMahdal/leapp-repository
|
9ac2b8005750e8e56e5fde61e8762044d0f16257
|
[
"Apache-2.0"
] | null | null | null |
from os import symlink, path, access, X_OK
import pytest
from leapp.libraries.actor import workaround
from leapp.libraries.common.utils import makedirs
def fake_symlink(basedir):
def impl(source, target):
source_path = str(basedir.join(*source.lstrip('/').split('/')))
makedirs(source_path)
symlink(source_path, target)
return impl
def test_apply_python3_workaround(monkeypatch, tmpdir):
leapp_home = tmpdir.mkdir('tmp_leapp_py3')
monkeypatch.setattr(workaround.os, 'symlink', fake_symlink(tmpdir.mkdir('lib')))
monkeypatch.setattr(workaround, 'LEAPP_HOME', str(leapp_home))
# Ensure double invocation doesn't cause a problem
workaround.apply_python3_workaround()
workaround.apply_python3_workaround()
# Ensure creation of all required elements
assert path.islink(str(leapp_home.join('leapp')))
assert path.isfile(str(leapp_home.join('leapp3')))
assert access(str(leapp_home.join('leapp3')), X_OK)
assert str(leapp_home) in leapp_home.join('leapp3').read_text('utf-8')
| 32.9375
| 84
| 0.734345
| 140
| 1,054
| 5.35
| 0.428571
| 0.096128
| 0.080107
| 0.064085
| 0.058745
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008899
| 0.147059
| 1,054
| 31
| 85
| 34
| 0.824249
| 0.08444
| 0
| 0.1
| 0
| 0
| 0.065489
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.15
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f03df5d79ef568c79e0a3f2f05ab7cc845b03d5
| 707
|
py
|
Python
|
codility/equi_leader.py
|
py-in-the-sky/challenges
|
4a36095de8cb56b4f9f83c241eafb13dfbeb4065
|
[
"MIT"
] | null | null | null |
codility/equi_leader.py
|
py-in-the-sky/challenges
|
4a36095de8cb56b4f9f83c241eafb13dfbeb4065
|
[
"MIT"
] | null | null | null |
codility/equi_leader.py
|
py-in-the-sky/challenges
|
4a36095de8cb56b4f9f83c241eafb13dfbeb4065
|
[
"MIT"
] | null | null | null |
"""
https://codility.com/programmers/task/equi_leader/
"""
from collections import Counter, defaultdict
def solution(A):
def _is_equi_leader(i):
prefix_count_top = running_counts[top]
suffix_count_top = total_counts[top] - prefix_count_top
return (prefix_count_top * 2 > i + 1) and (suffix_count_top * 2 > len(A) - i - 1)
total_counts = Counter(A)
running_counts = defaultdict(int)
top = A[0]
result = 0
for i in xrange(len(A) - 1):
n = A[i]
running_counts[n] += 1
top = top if running_counts[top] >= running_counts[n] else n
if _is_equi_leader(i):
result += 1
return result
| 24.37931
| 89
| 0.595474
| 98
| 707
| 4.05102
| 0.387755
| 0.100756
| 0.105793
| 0.065491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018109
| 0.29703
| 707
| 28
| 90
| 25.25
| 0.780684
| 0.070721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.058824
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f05166068ffa658a5a11fcc559025940e70a85b
| 1,419
|
py
|
Python
|
downloader.py
|
Luonic/tf-cnn-lstm-ocr-captcha
|
9ac6202d546093d95083a32c71cdccb800dfdea2
|
[
"MIT"
] | 10
|
2017-08-08T22:57:32.000Z
|
2020-04-07T21:50:20.000Z
|
downloader.py
|
Luonic/tf-cnn-lstm-ocr-captcha
|
9ac6202d546093d95083a32c71cdccb800dfdea2
|
[
"MIT"
] | null | null | null |
downloader.py
|
Luonic/tf-cnn-lstm-ocr-captcha
|
9ac6202d546093d95083a32c71cdccb800dfdea2
|
[
"MIT"
] | 5
|
2018-07-17T16:47:21.000Z
|
2021-11-06T15:03:56.000Z
|
import urllib
import requests
import multiprocessing.pool
from multiprocessing import Pool
import uuid
import os
images_dir = os.path.join("data", "train")
small_letters = map(chr, range(ord('a'), ord('f')+1))
digits = map(chr, range(ord('0'), ord('9')+1))
base_16 = digits + small_letters
MAX_THREADS = 100
def captcha(code):
try:
r = requests.get("https://local.thedrhax.pw/rucaptcha/?" + code)
filename = code + "_" + str(uuid.uuid1().time) + ".png"
path = os.path.join(images_dir, filename)
with open(path, "wb") as png:
png.write(bytes(r.content))
print("Downloaded " + str(code))
except Exception as e:
print(str(e))
if __name__ == "__main__":
labels = []
for i in range(0, len(base_16)):
for j in range(0, len(base_16)):
for m in range(0, len(base_16)):
for n in range(0, len(base_16)):
try:
label = base_16[i] + base_16[j] + base_16[m] + base_16[n]
labels.append(label)
# urllib.urlretrieve("https://local.thedrhax.pw/rucaptcha/?" + str(label), str(label) + ".png")
except Exception as e:
print(str(e))
print(labels)
p = Pool(MAX_THREADS)
while 1:
p.map(captcha, labels)
print("Finished all downloads")
| 30.191489
| 143
| 0.547569
| 186
| 1,419
| 4.048387
| 0.419355
| 0.071713
| 0.042497
| 0.058433
| 0.250996
| 0.173971
| 0.151394
| 0
| 0
| 0
| 0
| 0.031697
| 0.310782
| 1,419
| 47
| 144
| 30.191489
| 0.738241
| 0.065539
| 0
| 0.162162
| 0
| 0
| 0.075327
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.162162
| 0
| 0.189189
| 0.135135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
5f05920c4f06c4b47bf5845e7dd08b41ac585c06
| 7,679
|
py
|
Python
|
code/Models.py
|
IGLICT/CMIC-Retrieval
|
d2f452517360f127d0a8175d55ba9f9491c152c2
|
[
"MIT"
] | 29
|
2021-10-01T12:05:54.000Z
|
2022-03-16T02:40:19.000Z
|
code/Models.py
|
IGLICT/CMIC-Retrieval
|
d2f452517360f127d0a8175d55ba9f9491c152c2
|
[
"MIT"
] | 5
|
2021-12-20T12:25:58.000Z
|
2022-03-10T19:08:32.000Z
|
code/Models.py
|
IGLICT/CMIC-Retrieval
|
d2f452517360f127d0a8175d55ba9f9491c152c2
|
[
"MIT"
] | 1
|
2022-01-04T05:52:49.000Z
|
2022-01-04T05:52:49.000Z
|
import jittor as jt
from jittor import nn, models
if jt.has_cuda:
jt.flags.use_cuda = 1 # jt.flags.use_cuda
class QueryEncoder(nn.Module):
def __init__(self, out_dim=128):
super(QueryEncoder, self).__init__()
self.dim = out_dim
self.resnet = models.resnet50(pretrained=False)
self.resnet.conv1 = nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, bias=False)
fc_features = self.resnet.fc.in_features
self.resnet.fc = nn.Sequential(
nn.BatchNorm1d(fc_features*1),
nn.Linear(fc_features*1, self.dim),
)
def execute(self, input):
embeddings = self.resnet(input)
embeddings = jt.normalize(embeddings, p=2, dim=1)
return embeddings
class RenderingEncoder(nn.Module):
def __init__(self, out_dim=128):
super(RenderingEncoder, self).__init__()
self.dim = out_dim
self.resnet = models.resnet18(pretrained=False)
self.resnet.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)
fc_features = self.resnet.fc.in_features
self.resnet.fc = nn.Sequential(
nn.BatchNorm1d(fc_features*1),
nn.Linear(fc_features*1, self.dim),
)
def execute(self, inputs):
embeddings = self.resnet(inputs)
embeddings = jt.normalize(embeddings, p=2, dim=1)
return embeddings
class Attention(nn.Module):
'''
Revised from pytorch version: <https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>
'''
""" Applies attention mechanism on the `context` using the `query`.
**Thank you** to IBM for their initial implementation of :class:`Attention`. Here is
their `License
<https://github.com/IBM/pytorch-seq2seq/blob/master/LICENSE>`__.
Args:
dimensions (int): Dimensionality of the query and context.
attention_type (str, optional): How to compute the attention score:
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
Example:
>>> attention = Attention(256)
>>> query = torch.randn(5, 1, 256)
>>> context = torch.randn(5, 5, 256)
>>> output, weights = attention(query, context)
>>> output.size()
torch.Size([5, 1, 256])
>>> weights.size()
torch.Size([5, 1, 5])
"""
def __init__(self, dimensions, attention_type='general'):
super(Attention, self).__init__()
if attention_type not in ['dot', 'general']:
raise ValueError('Invalid attention type selected.')
self.attention_type = attention_type
if self.attention_type == 'general':
self.linear_in = nn.Linear(dimensions, dimensions, bias=False)
self.linear_out = nn.Linear(dimensions * 2, dimensions, bias=False)
self.softmax = nn.Softmax(dim=-1)
self.tanh = nn.Tanh()
def execute(self, query, context):
"""
Args:
query (:class:`torch.FloatTensor` [batch size, output length, dimensions]): Sequence of
queries to query the context.
context (:class:`torch.FloatTensor` [batch size, query length, dimensions]): Data
overwhich to apply the attention mechanism.
Returns:
:class:`tuple` with `output` and `weights`:
* **output** (:class:`torch.LongTensor` [batch size, output length, dimensions]):
Tensor containing the attended features.
* **weights** (:class:`torch.FloatTensor` [batch size, output length, query length]):
Tensor containing attention weights.
"""
batch_size, output_len, dimensions = query.size()
query_len = context.size(1)
if self.attention_type == "general":
query = query.view(batch_size * output_len, dimensions)
query = self.linear_in(query)
query = query.view(batch_size, output_len, dimensions)
# TODO: Include mask on PADDING_INDEX?
# (batch_size, output_len, dimensions) * (batch_size, query_len, dimensions) ->
# (batch_size, output_len, query_len)
# attention_scores = nn.bmm(query, context.transpose(1, 2).contiguous())
attention_scores = nn.bmm(query, context.transpose(0, 2, 1))
# Compute weights across every context sequence
attention_scores = attention_scores.view(batch_size * output_len, query_len)
attention_weights = self.softmax(attention_scores)
attention_weights = attention_weights.view(batch_size, output_len, query_len)
# (batch_size, output_len, query_len) * (batch_size, query_len, dimensions) ->
# (batch_size, output_len, dimensions)
mix = nn.bmm(attention_weights, context)
# concat -> (batch_size * output_len, 2*dimensions)
combined = jt.concat((mix, query), dim=2)
combined = combined.view(batch_size * output_len, 2 * dimensions)
# Apply linear_out on every 2nd dimension of concat
# output -> (batch_size, output_len, dimensions)
output = self.linear_out(combined).view(batch_size, output_len, dimensions)
output = self.tanh(output)
return output, attention_weights
class RetrievalNet(nn.Module):
'''
QueryEncoder
RenderingEncoder
Attention
'''
def __init__(self, cfg):
super(RetrievalNet, self).__init__()
self.dim = cfg.models.z_dim
self.size = cfg.data.pix_size
self.view_num = cfg.data.view_num
self.query_encoder = QueryEncoder(self.dim)
self.rendering_encoder = RenderingEncoder(self.dim)
self.attention = Attention(self.dim)
def execute(self, query, rendering):
query_ebd = self.get_query_ebd(query)
bs = query_ebd.shape[0]
rendering = rendering.view(-1, 1, self.size, self.size)
rendering_ebds = self.get_rendering_ebd(rendering).view(-1, self.view_num, self.dim)
#(shape, image, ebd) -> (bs, bs, 128)
query_ebd = query_ebd.unsqueeze(0).repeat(bs, 1, 1)
# query_ebd: bs, bs, dim
# rendering_ebds: bs, 12, dim
_, weights = self.attention_query(query_ebd, rendering_ebds)
# weights: bxxbsx12
# rendering_ebds: bsx12x128
# queried_rendering_ebd: bsxbsx128 (shape, model, 128)
# reference to https://pytorchnlp.readthedocs.io/en/latest/_modules/torchnlp/nn/attention.html#Attentionl
queried_rendering_ebd = nn.bmm(weights, rendering_ebds)
return query_ebd, queried_rendering_ebd
def get_query_ebd(self, inputs):
return self.query_encoder(inputs)
def get_rendering_ebd(self, inputs):
return self.rendering_encoder(inputs)
def attention_query(self, ebd, pool_ebd):
return self.attention(ebd, pool_ebd)
if __name__ == '__main__':
import yaml
import argparse
with open('./configs/pix3d.yaml', 'r') as f:
config = yaml.load(f)
def dict2namespace(config):
namespace = argparse.Namespace()
for key, value in config.items():
if isinstance(value, dict):
new_value = dict2namespace(value)
else:
new_value = value
setattr(namespace, key, new_value)
return namespace
config = dict2namespace(config)
models = RetrievalNet(config)
img = jt.random([2,4,224,224]).stop_grad()
mask = jt.random([2,12,224,224]).stop_grad()
# mm = models.resnet50(pretrained=False)
# # print(mm)
# a = mm(img)
outputs = models(img, mask)
| 36.393365
| 113
| 0.625602
| 929
| 7,679
| 4.990312
| 0.227126
| 0.036885
| 0.051769
| 0.050475
| 0.360009
| 0.304357
| 0.278473
| 0.198878
| 0.163072
| 0.092752
| 0
| 0.021108
| 0.259669
| 7,679
| 211
| 114
| 36.393365
| 0.794371
| 0.216434
| 0
| 0.152381
| 0
| 0
| 0.018345
| 0
| 0
| 0
| 0
| 0.004739
| 0
| 1
| 0.114286
| false
| 0
| 0.038095
| 0.028571
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|