hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
da4c1af35b78bb185c69f2e2ce2c1d8ceee1a22d | 667 | py | Python | chapter03/knock25.py | m-star18/NLP100 | e199814f81943f7fb693fd5fe87d6df21da07f5b | [
"MIT"
] | 1 | 2020-07-15T17:21:13.000Z | 2020-07-15T17:21:13.000Z | chapter03/knock25.py | m-star18/NLP100 | e199814f81943f7fb693fd5fe87d6df21da07f5b | [
"MIT"
] | 1 | 2021-05-04T01:04:57.000Z | 2021-05-04T01:05:32.000Z | chapter03/knock25.py | m-star18/NLP100 | e199814f81943f7fb693fd5fe87d6df21da07f5b | [
"MIT"
] | null | null | null | import re
import pandas as pd
df = pd.read_json('jawiki-country.json', lines=True)
text = df.query('title=="イギリス"')['text'].values[0].split('\n')
memo, flag = [], False
template = '基礎情報'
check = re.compile('\|(.+?)\s=\s(.+)')
check1 = re.compile('\{\{' + template)
check2 = re.compile('\}\}')
check3 = re.compile('\|')
check4 = re.compile('<ref(\s|>).+?(</ref>|$)')
for t in text:
if flag:
if check2.match(t):
break
if check3.match(t):
memo.append(check4.sub('', t.strip()))
if check1.match(t):
flag = True
ans = {}
for tmp in [check.match(m) for m in memo]:
ans[tmp.group(1)] = tmp.group(2)
print(ans)
| 23.821429 | 62 | 0.55922 | 98 | 667 | 3.795918 | 0.5 | 0.120968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020677 | 0.202399 | 667 | 27 | 63 | 24.703704 | 0.678571 | 0 | 0 | 0 | 0 | 0 | 0.136432 | 0.034483 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.086957 | 0 | 0.086957 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da4c202f2a4d50150a0f027ce75d19d6e0f3d28d | 316 | py | Python | constants.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
] | 19 | 2018-06-08T05:33:47.000Z | 2021-04-26T16:19:32.000Z | constants.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
] | null | null | null | constants.py | jaingaurav3/ML_sample | 4e53de198f7965fa96f0db44717df27032df4b48 | [
"MIT"
] | 13 | 2018-09-24T21:52:06.000Z | 2021-02-26T10:40:25.000Z | # Datasets
TRAIN = 'trn'
VAL = 'val'
TEST = 'tst'
FULL = 'full'
# File extensions
JPG = '.jpg'
TIF = '.tif'
PNG = '.png'
GIF = '.gif'
BCOLZ = '.bc'
CSV = '.csv'
# PyTorch
MODEL_EXT = '.mdl'
WEIGHTS_EXT = '.th'
OPTIM_EXT = '.th'
# Data Aug
IMAGENET_MEAN = [0.485, 0.456, 0.406]
IMAGENET_STD = [0.229, 0.224, 0.225] | 14.363636 | 37 | 0.591772 | 51 | 316 | 3.568627 | 0.686275 | 0.054945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.094118 | 0.193038 | 316 | 22 | 38 | 14.363636 | 0.619608 | 0.129747 | 0 | 0 | 0 | 0 | 0.169742 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da4ce36d9c8b0451cd19a4276193992895c3a0ed | 23,304 | py | Python | create_dataset.py | akudan/nephi | 83d3ccee0b83b3ca2349e92c454ed178afd1d1fb | [
"MIT"
] | 50 | 2018-04-22T23:12:18.000Z | 2022-02-14T15:10:24.000Z | create_dataset.py | akudan/nephi | 83d3ccee0b83b3ca2349e92c454ed178afd1d1fb | [
"MIT"
] | 1 | 2019-03-01T02:54:19.000Z | 2019-03-01T15:30:12.000Z | create_dataset.py | akudan/nephi | 83d3ccee0b83b3ca2349e92c454ed178afd1d1fb | [
"MIT"
] | 19 | 2018-02-07T21:17:13.000Z | 2022-02-14T15:11:46.000Z | import os
import lmdb # install lmdb by "pip install lmdb"
import cv2
import numpy as np
from tool.xml_parser import page_images
from glob import glob
import re
import sys
import io
import argparse
from scipy.spatial import distance
encoding = 'utf-8'
stdout = sys.stdout
reload(sys)
sys.setdefaultencoding('utf-8')
sys.stdout = stdout
def checkImageIsValid(imageBin):
if imageBin is None:
return False
imageBuf = np.fromstring(imageBin, dtype=np.uint8)
img = cv2.imdecode(imageBuf, cv2.IMREAD_GRAYSCALE)
imgH, imgW = img.shape[0], img.shape[1]
if imgH * imgW == 0:
return False
return True
# basically "flush the cache to the actual DB"
def writeCache(env, cache):
with env.begin(write=True) as txn:
for k, v in cache.iteritems():
txn.put(k, v)
def createDataset(outputPath, imagePathList, labelList, lexiconList=None, checkValid=True):
"""
Create LMDB dataset for CRNN training.
ARGS:
outputPath : LMDB output path
imagePathList : list of image path
labelList : list of corresponding groundtruth texts
lexiconList : (optional) list of lexicon lists
checkValid : if true, check the validity of every image
"""
assert(len(imagePathList) == len(labelList))
nSamples = len(imagePathList)
env = lmdb.open(outputPath, map_size=1099511627776)
cache = {}
cnt = 1
for i in xrange(nSamples):
imagePath = imagePathList[i]
print imagePath
label = labelList[i]
if not os.path.exists(imagePath):
print('%s does not exist' % imagePath)
continue
with open(imagePath, 'r') as f:
imageBin = f.read()
if checkValid:
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % imagePath)
continue
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
cache[imageKey] = imageBin
cache[labelKey] = label
if lexiconList:
lexiconKey = 'lexicon-%09d' % cnt
cache[lexiconKey] = ' '.join(lexiconList[i])
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d / %d' % (cnt, nSamples))
cnt += 1
nSamples = cnt-1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
# From: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates
#def PolyArea(x,y):
# return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
def PolyArea(x,y):
correction = x[-1] * y[0] - y[-1]* x[0]
main_area = np.dot(x[:-1], y[1:]) - np.dot(y[:-1], x[1:])
return 0.5*np.abs(main_area + correction)
# Takes an image read by cv2 and masks out the region of interest (pts)
def apply_mask(img, pts, add_pixel = False):
pts = np.array(pts, np.int32)
xmin = min(pts, key=lambda x: x[0])[0]
xmax = max(pts, key=lambda x: x[0])[0]
ymin = min(pts, key=lambda x: x[1])[1]
ymax = max(pts, key=lambda x: x[1])[1]
#if False:
if add_pixel:
ymin = ymin - add_pixel
if ymin < 0:
ymin = 0
print("Ymin:")
print(ymin)
ymax = ymax + add_pixel
if ymax >= img.shape[0]:
ymax = img.shape[0] - 1
print("Ymax")
print(ymax)
print("IMage shape:")
print(img.shape)
# RA: I will probably have to make allowance for the inevitable error that for a first or last line on the page, adding pixels takes us off the page.
# RA: I am now just going to use the whole array, given that they are ordered correctly
updated_pts = np.array([(p[0] - xmin, p[1] - ymin) for p in pts], np.int32)
#if False:
#if isinstance(add_pixel, (int, long)):
if add_pixel:
#x_pts = np.expand_dims(np.array([x[0] for x in updated_pts]), axis=1)
#print("Shape and dimensions of x_pts")
#print(x_pts.shape)
#print(x_pts.ndim)
#d_array = distance.cdist(x_pts, x_pts, 'euclidean') # only care about x-distance
for i, pt in enumerate(updated_pts):
area_poly = PolyArea(updated_pts[:,0], updated_pts[:,1])
up_pts = updated_pts.copy()
down_pts = updated_pts.copy()
up_pts[i,1] = up_pts[i,1] + add_pixel
down_pts[i,1] = down_pts[i,1] - add_pixel
if PolyArea(up_pts[:,0], up_pts[:,1]) > area_poly:
updated_pts[i,1] = updated_pts[i,1] + add_pixel
elif PolyArea(down_pts[:,0], down_pts[:,1]) > area_poly:
updated_pts[i,1] = updated_pts[i,1] - add_pixel
if updated_pts[i,1] < 0:
updated_pts[i,1] = 0
elif updated_pts[i,1] > ymax:
updated_pts[i,1] = ymax
# First closest point code below:
# Find the 7 closest points along the x-axis
#closest_x_pts = np.argpartition(d_array[:,i], 8)[:8] # includes index of the first point
#print("Indecies of closest_x_pts")
#print(closest_x_pts)
# k smallest elements
#np.argpartition(arr, k)[:k]
#closest_pts = pts[np.array(closest_x_pts)]
#print("Current point considering")
#print(pt)
#print("Actual closes_x_pts")
#print(closest_pts)
# Find whether increasing pixel height or decreasing pixel height adds to the area of the region of interest
#area_poly = PolyArea(closest_pts[:,0], closest_pts[:,1])
#print("Area of polygon")
#print(area_poly)
#up_closest_pts = closest_pts.copy()
#down_closest_pts = closest_pts.copy()
#pt_idx = np.where(np.all(np.isin(closest_pts, pt), axis=1))[0][0]
#print("Point index")
#print(pt_idx)
#up_closest_pts[pt_idx,1] = up_closest_pts[pt_idx,1] + add_pixel
#down_closest_pts[pt_idx,1] = down_closest_pts[pt_idx,1] - add_pixel
#if PolyArea(up_closest_pts[:,0], up_closest_pts[:,1]) > area_poly:
# updated_pts[i,1] = updated_pts[i,1] + add_pixel
#elif PolyArea(down_closest_pts[:,0], down_closest_pts[:,1]) > area_poly:
# updated_pts[i,1] = updated_pts[i,1] - add_pixel
line_img = img[ymin:ymax, xmin:xmax].copy()
mask = np.zeros(line_img.shape, dtype=np.uint8)
channel_count = 1
if len(line_img.shape) > 2:
channel_count = line_img.shape[2]
ignore_mask_color = (255,) * channel_count
# Idiosyncrasy of cv2.fillPoly
updated_pts = [(p[0], p[1]) for p in updated_pts]
roi_corners = np.array([updated_pts], dtype=np.int32)
cv2.fillPoly(mask, roi_corners, ignore_mask_color)
line_img[mask == 0] = 255
return line_img
def simple_dataset_from_dir(image_dir, output_path):
# a simple example of generating data (does not generate an alphabet.txt file, generate your own out of band)
# pass an image_dir like data/dataset/images/train that contains files like
# 25_this is the contents.png
imagePathList = []
labelList = []
files = os.listdir(image_dir)
for file in files:
image_path = file
imagePathList.append(os.path.join(image_dir,image_path)) # full path
label = os.path.splitext(file.split('_')[1])[0] # "victor" from 25_victor.png
print(file, label)
labelList.append(label)
createDataset(output_path, imagePathList, labelList)
def russell_page_journal(data_dir, output_path):
env = lmdb.open(output_path, map_size=1099511627776)
cache = {}
cnt = 1
img_files = glob(os.path.join(data_dir, "*.jpg"))
alpha_text = u'' #'0123456789abcdefghijklmnopqrstuvwxyz'
alphabet = []
for img_file in img_files:
img_c = cv2.imread(img_file)
text_file = img_file.partition(".")[0] + ".txt"
t_f = io.open(text_file, "r", encoding=encoding)
gt = t_f.read()
t_f.close()
line_img = img_c
imageBin = cv2.imencode('.png', line_img)[1].tostring()
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % img_file)
continue
annotation = gt
label = annotation.encode('utf-8')
print("Printing encoded unicode!")
print(label)
for c in annotation:
if not c in alphabet:
alphabet.append(c)
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
fileKey = 'file-%09d' % cnt
print imageKey
cache[imageKey] = imageBin
cache[labelKey] = label
cache[fileKey] = os.path.basename(img_file)
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d' % (cnt))
cnt += 1
nSamples = cnt - 1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
alpha_text = u''.join(alphabet)
with io.open("alphabet.txt", "w", encoding=encoding) as text_file:
text_file.write(alpha_text)
# read into LMDB dataset from ICFHR 2018
def icfhr_dataset_read(data_dir, output_path, include_files=None, binarize = False, howe_dir = False, simplebin_dir = False, test = False):
env = lmdb.open(output_path, map_size=1099511627776)
cache = {}
cnt = 1
img_files = glob(os.path.join(data_dir, "*/*.jpg")) if test else glob(os.path.join(data_dir, "*/*/*.jpg"))
for img_file in img_files:
img_c = cv2.imread(img_file)
info_file = img_file + ".info"
if include_files is not None:
if ".jpg" not in include_files[0]:
include_files = [f + ".jpg" for f in include_files]
if os.path.basename(img_file) not in include_files:
continue
if not test:
text_file = img_file + ".txt"
if binarize:
howe_img = cv2.imread(os.path.join(howe_dir, os.path.basename(img_file).lower().partition(".jpg")[0] + "_howe.jpg"))
simplebin_img = cv2.imread(os.path.join(simplebin_dir, os.path.basename(img_file).lower().partition(".jpg")[0] + "_simplebin.jpg"))
with open(info_file, "r") as i_f:
if not test:
t_f = io.open(text_file, "r", encoding=encoding)
gt = t_f.read()
t_f.close()
info = i_f.read()
mask = info.partition("MASK\n")[2]
myre = re.compile(r"[0-9]+,[0-9]+")
mask_p = myre.findall(mask)
mask_pts = [tuple(int(x) for x in v.split(',')) for v in mask_p]
line_img = apply_mask(img_c, mask_pts)
if binarize:
howe_line_img = apply_mask(howe_img, mask_pts) # Hopefully this works even though Howe binarization takes out a few pixels
simplebin_line_img = apply_mask(simplebin_img, mask_pts)
imageBin = cv2.imencode('.png', line_img)[1].tostring()
if binarize:
howe_imageBin = cv2.imencode('.png', howe_line_img)[1].tostring()
simplebin_imageBin = cv2.imencode('.png', simplebin_line_img)[1].tostring()
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % img_file)
continue
if binarize:
if not (checkImageIsValid(howe_imageBin) and checkImageIsValid(simplebin_imageBin)):
print('%s is not a valid image in howe or sauvola binarization' % image['image_file'])
continue
if not test:
annotation = gt
label = annotation.encode('utf-8')
imageKey = 'image-%09d' % cnt
labelKey = 'label-%09d' % cnt
fileKey = 'file-%09d' % cnt
if binarize:
howe_imageKey = 'howe-image-%09d' % cnt
simplebin_imageKey = 'simplebin-image-%09d' % cnt
print imageKey
if binarize:
print howe_imageKey
print simplebin_imageKey
cache[imageKey] = imageBin
if binarize:
cache[howe_imageKey] = howe_imageBin
cache[simplebin_imageKey] = simplebin_imageBin
if not test:
cache[labelKey] = label
cache[fileKey] = os.path.basename(img_file)
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d' % (cnt))
cnt += 1
nSamples = cnt - 1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
# read into LMDB dataset from XML
def lmdb_dataset_read(data_dir, output_path, binarize = False, howe_dir = False, simplebin_dir = False, image_dir = False, add_pixel = False):
env = lmdb.open(output_path, map_size=1099511627776)
images = page_images(data_dir)
# print images
cache = {}
cnt = 1
alpha_text = u'' #'0123456789abcdefghijklmnopqrstuvwxyz'
alphabet = []
for c in alpha_text:
alphabet.append(c)
for image in images:
print image
file_image = os.path.join(data_dir,'Images',image.Page.get('imageFilename'))
print(file_image)
image['data'] = cv2.imread(file_image)
page_img = cv2.imread(file_image)
if binarize:
howe_img = cv2.imread(os.path.join(howe_dir, os.path.basename(file_image).lower().partition(".jpg")[0] + "_howe.jpg"))
simplebin_img = cv2.imread(os.path.join(simplebin_dir, os.path.basename(file_image).lower().partition(".jpg")[0] + "_simplebin.jpg"))
for region in image.Page.TextRegion:
print 'region'
print str(region.tag)
line_tags = [c.tag.split('}')[1] for c in region.getchildren()]
if any('TextLine' in l for l in line_tags):
for line in region.TextLine:
print 'line '+line.get('id')
print str(line.Coords.get('points'))
data = line.Coords.get('points')
pts = [tuple(int(x) for x in v.split(',')) for v in data.split()]
print("Image shape")
print(page_img.shape)
line_img = apply_mask(page_img, pts, add_pixel)
if binarize:
howe_line_img = apply_mask(howe_img, pts, add_pixel) # Hopefully this works even though Howe binarization takes out a few pixels
simplebin_line_img = apply_mask(simplebin_img, pts, add_pixel)
line_file_name = '_'.join([os.path.basename(file_image).partition('.')[0], line.get('id')])
print 'line_file_name: ' + line_file_name
if image_dir:
cv2.imwrite(os.path.join(image_dir, line_file_name + ".jpg"), line_img)
imageBin = cv2.imencode('.png', line_img)[1].tostring()
if binarize:
howe_imageBin = cv2.imencode('.png', howe_line_img)[1].tostring()
simplebin_imageBin = cv2.imencode('.png', simplebin_line_img)[1].tostring()
if not checkImageIsValid(imageBin):
print('%s is not a valid image' % image['image_file'])
continue
if binarize:
if not (checkImageIsValid(howe_imageBin) and checkImageIsValid(simplebin_imageBin)):
print('%s is not a valid image in howe or sauvola binarization' % image['image_file'])
continue
mini_line_tags = [c.tag.split('}')[1] for c in line.getchildren()]
annotation = line.TextEquiv.Unicode.text if any('TextEquiv' in l for l in mini_line_tags) else u''
if annotation is None:
annotation = u''
print("Printing apparent unicode!")
print(annotation)
label = annotation.encode('utf-8')
print("Printing encoded unicode!")
print(label)
for c in annotation:
if not c in alphabet:
alphabet.append(c)
imageKey = 'image-%09d' % cnt
fileKey = 'file-%09d' % cnt
if binarize:
howe_imageKey = 'howe-image-%09d' % cnt
simplebin_imageKey = 'simplebin-image-%09d' % cnt
labelKey = 'label-%09d' % cnt
print imageKey
if binarize:
print howe_imageKey
print simplebin_imageKey
cache[imageKey] = imageBin
cache[fileKey] = line_file_name
if binarize:
cache[howe_imageKey] = howe_imageBin
cache[simplebin_imageKey] = simplebin_imageBin
cache[labelKey] = label
if cnt % 1000 == 0:
writeCache(env, cache)
cache = {}
print('Written %d' % (cnt))
line['database_id'] = cnt
cnt += 1
nSamples = cnt - 1
cache['num-samples'] = str(nSamples)
writeCache(env, cache)
print('Created dataset with %d samples' % nSamples)
alpha_text = u''.join(alphabet)
with io.open("alphabet.txt", "w", encoding=encoding) as text_file:
text_file.write(alpha_text)
def extract_strips(data_dir, output_path): # example of cutting pieces of images out (unused)
# env = lmdb.open(output_path, map_size=1099511627776)
images = page_images(data_dir)
print images
cache = {}
cnt = 1
for image in images:
print image
file_image = os.path.join(data_dir,'Images',image.Page.get('imageFilename'))
image['data'] = cv2.imread(file_image)
page_img = cv2.imread(file_image)
# page_img = image['data']
for region in image.Page.TextRegion:
print 'region'
print str(region.tag)
line_tags = [c.tag.split('}')[1] for c in region.getchildren()]
if any('TextLine' in l for l in line_tags):
for line in region.TextLine:
print 'line '+line.get('id')
print str(line.Coords.get('points'))
data = line.Coords.get('points')
pts = [tuple(int(x) for x in v.split(',')) for v in data.split()]
pts = np.array(pts, np.int32)
xmin = min(pts, key=lambda x: x[0])[0]
xmax = max(pts, key=lambda x: x[0])[0]
ymin = min(pts, key=lambda x: x[1])[1]
ymax = max(pts, key=lambda x: x[1])[1]
updated_pts = [(p[0] - xmin, p[1] - ymin) for p in pts]
line_img = page_img[ymin:ymax, xmin:xmax].copy()
# http://stackoverflow.com/a/15343106/3479446
mask = np.zeros(line_img.shape, dtype=np.uint8)
roi_corners = np.array([updated_pts], dtype=np.int32)
channel_count = 1
if len(line_img.shape) > 2:
channel_count = line_img.shape[2]
ignore_mask_color = (255,) * channel_count
cv2.fillPoly(mask, roi_corners, ignore_mask_color)
line_img[mask == 0] = 255
line['data'] = line_img
imageKey = 'image-%09d' % cnt
cv2.imwrite(os.path.join(output_path, imageKey + '.png'), line_img)
cnt += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', required=True, help='path to dataset')
parser.add_argument('--output_dir', required=True, help='path to lmdb database output')
parser.add_argument('--output_image_dir', type=str, default="None", help='path to cropped image output if desired')
parser.add_argument('--xml', action='store_true', help='whether the data are organized in /Images and /Pages subdirectories with PAGE segmentation file format')
parser.add_argument('--icfhr', action='store_true', help='whether the data are organized according to 2018 ICFHR Handwriting Recognition Competition format')
parser.add_argument('--russell', action='store_true', help='whether the data are organized according whole page russell journal')
parser.add_argument('--files_include', help='File of filenames to selectively include in the lmdb database from data_dir')
parser.add_argument('--binarize', action='store_true', help='whether to include binarized data in lmdb database')
parser.add_argument('--howe_dir', help='path to howe binarized dataset')
parser.add_argument('--simplebin_dir', help='path to sauvola binarized dataset')
parser.add_argument('--test', action='store_true', help='whether to data is a test dataset (includes no ground truth text)')
parser.add_argument('--add_pixel', action='store_true', help='whether to include extra pixels along y-axis in line segmentation')
parser.add_argument('--n_pixels', type=int, default=0, help='How many extra pixels to include')
opt = parser.parse_args()
print("Running with options:", opt)
if not os.path.isdir(opt.output_dir):
os.system('mkdir -p {0}'.format(opt.output_dir))
if not (opt.output_image_dir == "None") and not os.path.isdir(opt.output_image_dir):
os.system('mkdir -p {0}'.format(opt.output_image_dir))
if opt.xml:
lmdb_dataset_read(opt.data_dir, opt.output_dir, binarize = opt.binarize, howe_dir = opt.howe_dir, simplebin_dir = opt.simplebin_dir, image_dir = opt.output_image_dir if not opt.output_image_dir == "None" else False, add_pixel = opt.n_pixels if opt.add_pixel else False)
elif opt.icfhr:
if opt.files_include:
with open(opt.files_include, "r") as include_file:
icfhr_dataset_read(opt.data_dir, opt.output_dir, include_file.read().split(), binarize = opt.binarize, howe_dir = opt.howe_dir, simplebin_dir = opt.simplebin_dir, test=opt.test)
else:
icfhr_dataset_read(opt.data_dir, opt.output_dir, binarize = opt.binarize, howe_dir = opt.howe_dir, simplebin_dir = opt.simplebin_dir, test=opt.test)
elif opt.russell:
russell_page_journal(opt.data_dir, opt.output_dir)
else:
simple_dataset_from_dir(opt.data_dir, opt.output_dir) | 40.458333 | 277 | 0.573249 | 2,961 | 23,304 | 4.355961 | 0.133063 | 0.015196 | 0.006203 | 0.011165 | 0.57947 | 0.543728 | 0.498449 | 0.482633 | 0.468677 | 0.435184 | 0 | 0.022841 | 0.314281 | 23,304 | 576 | 278 | 40.458333 | 0.784293 | 0.117662 | 0 | 0.568238 | 0 | 0 | 0.104642 | 0 | 0 | 0 | 0 | 0 | 0.002481 | 0 | null | null | 0 | 0.027295 | null | null | 0.129032 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da4d25bd823544d3dde8ed32e826fbbb55bcbd80 | 1,226 | py | Python | a10sdk/core/maximum/maximum_paths.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 16 | 2015-05-20T07:26:30.000Z | 2021-01-23T11:56:57.000Z | a10sdk/core/maximum/maximum_paths.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 6 | 2015-03-24T22:07:11.000Z | 2017-03-28T21:31:18.000Z | a10sdk/core/maximum/maximum_paths.py | deepfield/a10sdk-python | bfaa58099f51f085d5e91652d1d1a3fd5c529d5d | [
"Apache-2.0"
] | 23 | 2015-03-29T15:43:01.000Z | 2021-06-02T17:12:01.000Z | from a10sdk.common.A10BaseClass import A10BaseClass
class MaximumPaths(A10BaseClass):
"""Class Description::
Set maximum number of route multipaths installed into FIB.
Class maximum-paths supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param path: {"description": "supported multipath numbers", "format": "number", "default": 4, "optional": true, "maximum": 64, "minimum": 1, "type": "number"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/maximum-paths`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "maximum-paths"
self.a10_url="/axapi/v3/maximum-paths"
self.DeviceProxy = ""
self.path = ""
self.uuid = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
| 32.263158 | 168 | 0.638662 | 145 | 1,226 | 5.344828 | 0.565517 | 0.061935 | 0.036129 | 0.049032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023037 | 0.221044 | 1,226 | 37 | 169 | 33.135135 | 0.788482 | 0.601958 | 0 | 0 | 0 | 0 | 0.084309 | 0.053864 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da4d50c0cd6f0dd5e191b086879be35c23707ff8 | 331 | py | Python | ocun.py | jpcyrino/chunker_dm | 1afde2400b81d0fbc351dcb4658546ef018d2640 | [
"MIT"
] | 1 | 2022-02-23T12:33:01.000Z | 2022-02-23T12:33:01.000Z | ocun.py | jpcyrino/chunker_dm | 1afde2400b81d0fbc351dcb4658546ef018d2640 | [
"MIT"
] | null | null | null | ocun.py | jpcyrino/chunker_dm | 1afde2400b81d0fbc351dcb4658546ef018d2640 | [
"MIT"
] | null | null | null | import sys
filename = sys.argv[1]
fileout = sys.argv[2]
with open(filename, encoding="utf-8", mode="r") as file:
lines = file.read().split("\n")
data_lines = [lines[i] for i in range(0,len(lines),3)]
print(data_lines)
with open(fileout, encoding="utf-8", mode="w") as file:
for line in data_lines:
file.write(line + '\n')
| 20.6875 | 56 | 0.667674 | 59 | 331 | 3.694915 | 0.542373 | 0.123853 | 0.110092 | 0.146789 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021127 | 0.141994 | 331 | 15 | 57 | 22.066667 | 0.746479 | 0 | 0 | 0 | 0 | 0 | 0.048485 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da4f18d031bec1129d069479e75d9c035f860d1d | 2,412 | py | Python | galaxy/main/urls.py | changelox/galaxy | fc8e11b36de0b78e55c13c05ffc3a3fcaf8b39dc | [
"Apache-2.0"
] | null | null | null | galaxy/main/urls.py | changelox/galaxy | fc8e11b36de0b78e55c13c05ffc3a3fcaf8b39dc | [
"Apache-2.0"
] | null | null | null | galaxy/main/urls.py | changelox/galaxy | fc8e11b36de0b78e55c13c05ffc3a3fcaf8b39dc | [
"Apache-2.0"
] | null | null | null | # (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from django.conf.urls import url
from django.conf import settings
from django.views.decorators.cache import never_cache
from django.contrib.staticfiles.views import serve as serve_staticfiles
from django.views.static import serve as serve_static
from galaxy.main import views
urlpatterns = [
# Non-secure URLs
url(r'^$', views.home, name='home'),
url(r'^explore$', views.explore, name='explore'),
url(r'^intro$', views.intro, name='intro'),
url(r'^accounts/landing[/]?$', views.accounts_landing,
name='accounts-landing'),
url(r'^list$', views.list_category, name='list-category'),
url(r'^detail$', views.detail_category, name='detail-category'),
url(r'^roleadd$', views.role_add_view, name='role-add-category'),
url(r'^imports$', views.import_status_view, name='import-status'),
url(r'^stars$', views.stars_list_view, name='stars-list'),
# Logged in/secured URLs
url(r'^accounts/connect/$', views.accounts_connect),
url(r'^accounts/connect/success/$', views.accounts_connect_success,
name='accounts-connect-success'),
url(r'^accounts/profile/$', views.accounts_profile,
name='accounts-profile'),
url(r'^authors/$', views.NamespaceListView.as_view(),
name='namespace-list'),
url(r'^([\w\-._+]+)/$', views.RoleListView.as_view(), name='role-list'),
url(r'^([\w\-._+]+)/([\w\-._+]+)/$',
views.RoleDetailView.as_view(), name='role-detail'),
]
# FIX
if settings.DEBUG:
urlpatterns += [
url(r'^static/(?P<path>.*)$',
never_cache(serve_staticfiles))
]
else:
urlpatterns += [
url(r'^static/(?P<path>.*)$', serve_static,
kwargs={'document_root': settings.STATIC_ROOT})
]
| 37.107692 | 76 | 0.681177 | 331 | 2,412 | 4.882175 | 0.380665 | 0.042079 | 0.029703 | 0.022277 | 0.032178 | 0.032178 | 0 | 0 | 0 | 0 | 0 | 0.004464 | 0.164179 | 2,412 | 64 | 77 | 37.6875 | 0.797123 | 0.285655 | 0 | 0.052632 | 0 | 0 | 0.250147 | 0.083969 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.184211 | 0 | 0.184211 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da52f7ade412e44099bbd5c88acd3fc976745c19 | 1,002 | py | Python | PROJ/LEVY/RN_CHF/cf_RN_KoBoL.py | mattslezak-shell/PROJ_Option_Pricing_Matlab | 6105bd00ba3471802180c122fdf81e90833a91c4 | [
"MIT"
] | null | null | null | PROJ/LEVY/RN_CHF/cf_RN_KoBoL.py | mattslezak-shell/PROJ_Option_Pricing_Matlab | 6105bd00ba3471802180c122fdf81e90833a91c4 | [
"MIT"
] | null | null | null | PROJ/LEVY/RN_CHF/cf_RN_KoBoL.py | mattslezak-shell/PROJ_Option_Pricing_Matlab | 6105bd00ba3471802180c122fdf81e90833a91c4 | [
"MIT"
] | 1 | 2022-01-07T15:31:45.000Z | 2022-01-07T15:31:45.000Z | # Generated with SMOP 0.41-beta
try:
from smop.libsmop import *
except ImportError:
raise ImportError('File compiled with `smop3`, please install `smop3` to run it.') from None
# cf_RN_KoBoL.m
@function
def cf_RN_KoBoL(u=None,T=None,r=None,c=None,lam_p=None,lam_m=None,nu=None,*args,**kwargs):
varargin = cf_RN_KoBoL.varargin
nargin = cf_RN_KoBoL.nargin
# KoBoL RN CHF - NOTE: params have been
# written in correspondence with CGMY, which is a subclass of KoBoL
C=copy(c)
# cf_RN_KoBoL.m:4
M=copy(lam_p)
# cf_RN_KoBoL.m:4
G=- lam_m
# cf_RN_KoBoL.m:4
Y=copy(nu)
# cf_RN_KoBoL.m:4
m=dot(dot(C,gamma(- Y)),((M - 1) ** Y - M ** Y + (G + 1) ** Y - G ** Y))
# cf_RN_KoBoL.m:5
y=dot(dot(dot(C,T),gamma(- Y)),((M - dot(1j,u)) ** Y - M ** Y + (G + dot(1j,u)) ** Y - G ** Y))
# cf_RN_KoBoL.m:6
y=exp(dot(dot(dot(1j,u),T),(r - m)) + y)
# cf_RN_KoBoL.m:7
return y
if __name__ == '__main__':
pass
| 27.833333 | 100 | 0.58483 | 185 | 1,002 | 2.983784 | 0.378378 | 0.07971 | 0.179348 | 0.144928 | 0.150362 | 0.09058 | 0.047101 | 0 | 0 | 0 | 0 | 0.022487 | 0.245509 | 1,002 | 36 | 101 | 27.833333 | 0.707672 | 0.259481 | 0 | 0 | 1 | 0 | 0.099138 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0.055556 | 0.166667 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 |
da5590e5f9e56a1abcc8575518a715e6c27967cd | 161 | py | Python | hello-world.py | rmoralesdelgado/example-repo | a0d6f935264ec60a0278ea9fc5b8a694b5e33f0b | [
"MIT"
] | null | null | null | hello-world.py | rmoralesdelgado/example-repo | a0d6f935264ec60a0278ea9fc5b8a694b5e33f0b | [
"MIT"
] | null | null | null | hello-world.py | rmoralesdelgado/example-repo | a0d6f935264ec60a0278ea9fc5b8a694b5e33f0b | [
"MIT"
] | null | null | null | # This is a modified line
if __name__ == "__main__":
print("Hello World")
print("I am a new line")
print(hi)
# blah blha
def myfunc_1():
pass
| 10.733333 | 28 | 0.608696 | 25 | 161 | 3.56 | 0.84 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008475 | 0.267081 | 161 | 14 | 29 | 11.5 | 0.745763 | 0.21118 | 0 | 0 | 0 | 0 | 0.274194 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | true | 0.166667 | 0 | 0 | 0.166667 | 0.5 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 4 |
da55b03a123eb70e524ac2c9ba783fe7003f5224 | 250 | py | Python | mundo 1/ex020.py | thiagofreitascarneiro/Curso-de-Python---Curso-em-Video | 0342e482780b5a1c6f78cddd51d9bfad785c79fa | [
"MIT"
] | 1 | 2021-08-04T13:21:22.000Z | 2021-08-04T13:21:22.000Z | mundo 1/ex020.py | thiagofreitascarneiro/Curso-de-Python---Curso-em-Video | 0342e482780b5a1c6f78cddd51d9bfad785c79fa | [
"MIT"
] | null | null | null | mundo 1/ex020.py | thiagofreitascarneiro/Curso-de-Python---Curso-em-Video | 0342e482780b5a1c6f78cddd51d9bfad785c79fa | [
"MIT"
] | null | null | null | import random
n1 = str(input('Primeiro Aluno'))
n2 = str(input('Segundo Aluno'))
n3 = str(input('Terceiro Aluno'))
n4 = str(input('Quarto Aluno'))
lista = [n1,n2,n3,n4]
Ordem = random.sample(lista,k=4)
print(f'A ordem de apresentação será: {Ordem}') | 27.777778 | 47 | 0.692 | 41 | 250 | 4.219512 | 0.585366 | 0.184971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040909 | 0.12 | 250 | 9 | 47 | 27.777778 | 0.745455 | 0 | 0 | 0 | 0 | 0 | 0.358566 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da58277b5c2af60a518ecbd9a3ef1bdee746623d | 1,306 | py | Python | python3/ais_sdk/utils.py | MeekoI/ais-sdk | 76240abc49795e914988f3cafb6d08f60dbdcb4c | [
"Apache-2.0"
] | null | null | null | python3/ais_sdk/utils.py | MeekoI/ais-sdk | 76240abc49795e914988f3cafb6d08f60dbdcb4c | [
"Apache-2.0"
] | null | null | null | python3/ais_sdk/utils.py | MeekoI/ais-sdk | 76240abc49795e914988f3cafb6d08f60dbdcb4c | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
import os
import base64
import urllib.request
import ais_sdk.ais as ais
_ENDPOINT = {
'image': {
'cn-north-1':'image.cn-north-1.myhuaweicloud.com',
'ap-southeast-1':'image.ap-southeast-1.myhuaweicloud.com'
},
'moderation': {
'cn-north-1':'moderation.cn-north-1.myhuaweicloud.com',
'ap-southeast-1':'moderation.ap-southeast-1.myhuaweicloud.com'
}
}
def encode_to_base64(filename):
"""
encoding file to base64 encoded stream text
:param filename:
:return:
"""
imgstr = ""
with open(filename, 'rb') as file:
imgstr = base64.b64encode(file.read())
return imgstr
def download_url_base64(url):
return base64.b64encode(urllib.request.urlopen(url).read())
def decode_to_wave_file(base64_encoded_str, filename):
'''
decode base64 stream to wave file
:param base64_encoded_str:
:return:
'''
wave_data = base64.b64decode(base64_encoded_str)
wf = open(filename, 'wb')
wf.write(wave_data)
wf.close()
def get_endpoint(type):
region_name = get_region()
return _ENDPOINT[type].get(region_name)
def get_region():
return os.environ.get(ais.AisService.REGION_MSG)
def init_global_env(region):
os.environ[ais.AisService.REGION_MSG] = region
| 24.641509 | 70 | 0.666156 | 171 | 1,306 | 4.929825 | 0.350877 | 0.033215 | 0.03796 | 0.030842 | 0.151839 | 0.085409 | 0.085409 | 0.085409 | 0 | 0 | 0 | 0.035373 | 0.199081 | 1,306 | 52 | 71 | 25.115385 | 0.770554 | 0.123277 | 0 | 0 | 0 | 0 | 0.201275 | 0.140255 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.121212 | 0.060606 | 0.424242 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da5863a5ec445793ea40d771aa319962f8ec9010 | 609 | py | Python | GUI/dialogs/propulsion_dialogs/propulsion_dialog.py | StepLogic/Parametric-Drone-Design-Software | be9c537427f85b08c071c2666712fd32643cd439 | [
"Unlicense"
] | 7 | 2021-03-17T01:23:28.000Z | 2021-05-06T20:41:21.000Z | GUI/dialogs/propulsion_dialogs/propulsion_dialog.py | StepLogic/Parametric-Drone-Design-Software | be9c537427f85b08c071c2666712fd32643cd439 | [
"Unlicense"
] | null | null | null | GUI/dialogs/propulsion_dialogs/propulsion_dialog.py | StepLogic/Parametric-Drone-Design-Software | be9c537427f85b08c071c2666712fd32643cd439 | [
"Unlicense"
] | null | null | null | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from GUI.tabs.propulsion_tab.propulsion_tab import propulsion_tab
class propulsion_dialog(QDialog):
def __init__(self):
super().__init__()
self.tab = propulsion_tab()
self.layout =self.tab.create_widget()
self.buttons = QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal, self)
self.layout.addWidget(self.buttons)
self.buttons.accepted.connect(self.accept)
self.buttons.rejected.connect(self.reject)
self.setLayout(self.layout)
| 30.45 | 65 | 0.689655 | 68 | 609 | 5.970588 | 0.470588 | 0.128079 | 0.078818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004175 | 0.213465 | 609 | 19 | 66 | 32.052632 | 0.843424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.2 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da58d75367a4513d4ada4db3e0cf52dc127dc010 | 726 | py | Python | blind_75/06_removeNthFromEnd.py | NursultanBeken/leetcode_practice | 8aa8a033f95110aafa6acd9ebf842d716fd7552b | [
"MIT"
] | 1 | 2020-09-20T03:55:00.000Z | 2020-09-20T03:55:00.000Z | blind_75/06_removeNthFromEnd.py | NursultanBeken/leetcode_practice | 8aa8a033f95110aafa6acd9ebf842d716fd7552b | [
"MIT"
] | null | null | null | blind_75/06_removeNthFromEnd.py | NursultanBeken/leetcode_practice | 8aa8a033f95110aafa6acd9ebf842d716fd7552b | [
"MIT"
] | null | null | null | """
Dummy node, two pointers, swap nodes
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
dummy = ListNode(0, head)
left = dummy
right = head
while n>0 and right:
right = right.next
n -=1
while right:
right = right.next
left = left.next
left.next = left.next.next
return dummy.next | 22 | 43 | 0.479339 | 77 | 726 | 4.467532 | 0.441558 | 0.116279 | 0.087209 | 0.110465 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009547 | 0.422865 | 726 | 33 | 44 | 22 | 0.811456 | 0.326446 | 0 | 0.153846 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da58d89c89872e8d53a290617cb5b532f0d040f3 | 1,157 | py | Python | hard-gists/1023456/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/1023456/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/1023456/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | import scriptcontext
import time
import System
import Rhino
rc, view = Rhino.Input.RhinoGet.GetView("select view")
print "position mouse where you want"
for i in [5,4,3,2,1]:
time.sleep(0.5)
print i
screen_point = System.Windows.Forms.Cursor.Position
print "screen_point =", screen_point
# convert screen coordinates to the client coordinates of
# the active view
view = scriptcontext.doc.Views.ActiveView
view_screen_rect = view.ScreenRectangle
x, y = screen_point.X - view_screen_rect.Left, screen_point.Y - view_screen_rect.Top
view_client_point = System.Drawing.Point(x, y)
print "view_client_point =", view_client_point
# convert the client coordinates of the view to the client
# coordinates of the active viewport (there are only multiple
# active viewports when working in layouts)
viewport = view.ActiveViewport
rc, viewport_point = viewport.ClientToScreenPort(view_client_point)
print "viewport_point =", viewport_point
rc, line = viewport.GetFrustumLine(viewport_point.X, viewport_point.Y)
if rc:
scriptcontext.doc.Objects.AddPoint(line.From)
scriptcontext.doc.Objects.AddPoint(line.To)
scriptcontext.doc.Views.Redraw(); | 33.057143 | 84 | 0.78911 | 169 | 1,157 | 5.260355 | 0.39645 | 0.061867 | 0.067492 | 0.074241 | 0.181102 | 0.074241 | 0.074241 | 0 | 0 | 0 | 0 | 0.006903 | 0.123596 | 1,157 | 35 | 85 | 33.057143 | 0.869822 | 0.19879 | 0 | 0 | 0 | 0 | 0.096529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.166667 | null | null | 0.208333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da590cce76c1f0b75eaa800793b065c606ed64fc | 62 | py | Python | core/models/abstract/__init__.py | jcquinlan/colophon | 96f3eec0a524cb1fe3d655f3cc850b125f4aaff4 | [
"MIT"
] | null | null | null | core/models/abstract/__init__.py | jcquinlan/colophon | 96f3eec0a524cb1fe3d655f3cc850b125f4aaff4 | [
"MIT"
] | null | null | null | core/models/abstract/__init__.py | jcquinlan/colophon | 96f3eec0a524cb1fe3d655f3cc850b125f4aaff4 | [
"MIT"
] | null | null | null | from .user_document_interaction import UserDocumentInteraction | 62 | 62 | 0.935484 | 6 | 62 | 9.333333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.048387 | 62 | 1 | 62 | 62 | 0.949153 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 5 |
da5ad4fa6a5854b1d737a7b9cbf69ded01ce0a95 | 769 | py | Python | EasyNN/optimizer/nesterov.py | danielwilczak101/EasyNN | 89319e974c324dda228c6ecff7c39d723eda3ca2 | [
"MIT"
] | 5 | 2021-01-28T21:19:02.000Z | 2022-02-03T05:47:47.000Z | EasyNN/optimizer/nesterov.py | danielwilczak101/EasyNN | 89319e974c324dda228c6ecff7c39d723eda3ca2 | [
"MIT"
] | 1 | 2021-02-04T20:57:45.000Z | 2021-03-03T14:49:44.000Z | EasyNN/optimizer/nesterov.py | danielwilczak101/EasyNN | 89319e974c324dda228c6ecff7c39d723eda3ca2 | [
"MIT"
] | 2 | 2021-02-12T04:27:40.000Z | 2021-12-19T20:11:20.000Z | """
TODO: Not complete.
"""
from __future__ import annotations
from EasyNN.optimizer.momentum_descent import MomentumDescent
import EasyNN.model.abc
class Nesterov(MomentumDescent):
"""Nesterov uses parameters -= lr * momentum, where the momentum is computed by looking ahead."""
def get_derivatives(self: Nesterov, model: EasyNN.model.abc.Model) -> Array1D[float]:
"""Computes the derivatives for the optimizer."""
if model.training.iteration == 0:
return super().get_derivatives(model)
parameters = model.parameters.copy()
self.on_training_start(model, model._derivative_momentum.value)
super().get_derivatives(model)
model.parameters = parameters
return model._derivative_momentum.value
| 36.619048 | 101 | 0.717815 | 87 | 769 | 6.183908 | 0.517241 | 0.078067 | 0.052045 | 0.089219 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003185 | 0.183355 | 769 | 20 | 102 | 38.45 | 0.853503 | 0.20156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.583333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
da5c80138d99d10b2de1007d8d74ecb978a6b876 | 3,483 | py | Python | dl/cifar_python_data_layer.py | zsffq999/DISH | 2285747d9a410363ce09778aed5314a2e1b1aed9 | [
"MIT"
] | 3 | 2018-09-22T13:13:46.000Z | 2020-05-09T07:24:44.000Z | dl/cifar_python_data_layer.py | zsffq999/DISH | 2285747d9a410363ce09778aed5314a2e1b1aed9 | [
"MIT"
] | null | null | null | dl/cifar_python_data_layer.py | zsffq999/DISH | 2285747d9a410363ce09778aed5314a2e1b1aed9 | [
"MIT"
] | null | null | null | # imports
import caffe
import numpy as np
from random import shuffle
import cPickle as cp
import scipy.io as sio
class PythonDataLayer(caffe.Layer):
"""
This is a simple syncronous datalayer for training a multilabel model on
CIFAR.
"""
def setup(self, bottom, top):
self.top_names = ['data', 'label']
# === Read input parameters ===
# params is a python dictionary with layer parameters.
params = eval(self.param_str)
# Check the paramameters for validity.
# store input as class variables
self.phase = params['phase']
self.batch_size = params['batch_size']
# Create a batch loader to load the images.
self.batch_loader = BatchLoader(params, None)
# === reshape tops ===
# since we use a fixed input image size, we can shape the data layer
# once. Else, we'd have to do it in the reshape call.
top[0].reshape(
self.batch_size, 3, params['height'], params['width'])
# Note the 20 channels (because PASCAL has 20 classes.)
top[1].reshape(self.batch_size)
print "PythonDataLayer init success", params
# print_info("PythonDataLayer", params)
def forward(self, bottom, top):
"""
Load data.
"""
imgs, labels = self.batch_loader.load_next_batch()
top[0].data[...] = imgs
top[1].data[...] = labels
def reshape(self, bottom, top):
"""
There is no need to reshape the data, since the input is of fixed size
(rows and columns)
"""
pass
def backward(self, top, propagate_down, bottom):
"""
These layers does not back propagate
"""
pass
class BatchLoader(object):
"""
This class abstracts away the loading of images.
Images can either be loaded singly, or in a batch. The latter is used for
the asyncronous data layer to preload batches while other processing is
performed.
"""
def __init__(self, params, result):
self.result = result
self.batch_size = params['batch_size']
self.height = params['height']
self.width = params['width']
self.is_train = (params['phase']=='TRAIN')
# get data
self.data = (np.load('cifar10_data/cifar10_data.npy'), np.load('cifar10_data/cifar10_label.npy'))
# get list of image indexes.
self._cur = 0 # current image
self.n_data = 5000 if self.is_train else 10000
self.indexlist = np.arange(self.n_data, dtype=np.int32) if self.is_train else 50000+np.arange(self.n_data, dtype=np.int32)
# preprocess: compute img mean
self.img_mean = np.load('cifar10_data/cifar10_mean.npy').reshape((1, 3, self.height, self.width))
def load_next_batch(self):
"""
Load the next image in a batch.
"""
if self._cur + self.batch_size <= len(self.indexlist):
index = self.indexlist[self._cur:self._cur+self.batch_size]
self._cur += self.batch_size
else:
index = np.zeros(self.batch_size, dtype=np.int32)
index[:len(self.indexlist)-self._cur] = self.indexlist[self._cur:]
if self.is_train:
shuffle(self.indexlist)
index[len(self.indexlist)-self._cur:] = self.indexlist[:self.batch_size-len(self.indexlist)+self._cur]
self._cur = self.batch_size-len(self.indexlist)+self._cur
imgs = self.data[0][index].astype(np.float32)
imgs[:,:,:,:] = imgs[:,::-1,:,:]
imgs -= self.img_mean
if self.is_train:
flip_ind = np.argwhere(np.random.rand(self.batch_size)>0.5)[:,0]
imgs[flip_ind,:,:,:] = imgs[flip_ind,:,:,::-1]
labels = self.data[1][index].astype(np.float32)
return imgs, labels
| 29.025 | 125 | 0.669825 | 509 | 3,483 | 4.471513 | 0.328094 | 0.051406 | 0.06283 | 0.052724 | 0.211336 | 0.155975 | 0.131371 | 0.131371 | 0.074692 | 0 | 0 | 0.019685 | 0.197818 | 3,483 | 119 | 126 | 29.268908 | 0.794918 | 0.146713 | 0 | 0.109091 | 0 | 0 | 0.078686 | 0.038046 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.036364 | 0.090909 | null | null | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da5d5dda91394d5fcd0bc5d32616b3e16dc5d436 | 875 | py | Python | ethsential/__main__.py | 1140251/Ethsential | 1de423358f5a0ba8b84d80fa63bce09552bca9fd | [
"Apache-2.0"
] | 7 | 2021-10-11T12:07:08.000Z | 2022-01-10T01:19:36.000Z | ethsential/__main__.py | 1140251/Ethsential | 1de423358f5a0ba8b84d80fa63bce09552bca9fd | [
"Apache-2.0"
] | null | null | null | ethsential/__main__.py | 1140251/Ethsential | 1de423358f5a0ba8b84d80fa63bce09552bca9fd | [
"Apache-2.0"
] | null | null | null | import sys
from .src.applications.server import ETHSENTIAL
from .src.applications.cli import CLI
from .src.parser import create_parser
def main():
parser = create_parser()
args = parser.parse_args()
if args.action == 'cli':
try:
CLI.exec_cmd(args)
except Exception as e:
if hasattr(e, 'message'):
print(getattr(e, 'message', repr(e)))
else:
print(e)
sys.exit(0)
elif args.action == 'install':
try:
CLI.install()
except Exception as e:
if hasattr(e, 'message'):
print(getattr(e, 'message', repr(e)))
else:
print(e)
elif args.action == 'tcp':
ETHSENTIAL.start_tcp(args.host, args.port)
else:
ETHSENTIAL.start_io()
if __name__ == '__main__':
main()
| 24.305556 | 53 | 0.537143 | 101 | 875 | 4.514851 | 0.386139 | 0.070175 | 0.083333 | 0.078947 | 0.307018 | 0.307018 | 0.307018 | 0.307018 | 0.307018 | 0.307018 | 0 | 0.001742 | 0.344 | 875 | 35 | 54 | 25 | 0.792683 | 0 | 0 | 0.433333 | 0 | 0 | 0.056 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0 | 0.133333 | 0 | 0.166667 | 0.133333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da5ea178b4528bc2e8ee17e0a8132d23a6388e83 | 2,322 | py | Python | scripts/msig_prods_update_tag.py | xenbo/eosforce | f77a73c2b49f40f8af5c11a13b0a7eb069e02b5f | [
"MIT"
] | 117 | 2018-06-22T08:49:36.000Z | 2022-01-30T17:08:29.000Z | scripts/msig_prods_update_tag.py | xenbo/eosforce | f77a73c2b49f40f8af5c11a13b0a7eb069e02b5f | [
"MIT"
] | 17 | 2018-07-05T04:06:47.000Z | 2020-09-07T06:19:25.000Z | scripts/msig_prods_update_tag.py | xenbo/eosforce | f77a73c2b49f40f8af5c11a13b0a7eb069e02b5f | [
"MIT"
] | 42 | 2018-06-22T08:57:42.000Z | 2022-03-28T13:08:02.000Z | #!/usr/bin/env python3
import argparse
import json
import os
import re
import subprocess
import sys
import time
enable_push = True # True to push on chain
cleos = '../build/programs/cleos/cleos --wallet-url http://127.0.0.1:6666 --url http://127.0.0.1:8001 '
wallet_password = ''
wallet_name = 'testc'
active_account = 'testc'
funcs_to_open = [
( 'f.cprod', 10000000 ),
( 'f.votagen', 10000010 )
]
tx_expire_hours = 120 # 5days
def jsonArg(a):
return " '" + json.dumps(a) + "' "
def run(args):
print('', args)
if subprocess.call(args, shell=True):
print(' exiting because of error')
sys.exit(1)
def runone(args):
print('', args)
subprocess.call(args, shell=True)
def getOutput(args):
print('', args)
proc = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE)
return proc.communicate()[0].decode('utf-8')
def getJsonOutput(args):
return json.loads(getOutput(args))
def getbps():
bpsa = []
bpsj = getJsonOutput(cleos + " get schedule -j ")
for bp in bpsj["active"]["producers"]:
bpsa.append(bp["producer_name"])
return bpsa
def msigProposeUpdateTag(proposer, bps, func_name, open_block_num, expirehours):
requestedPermissions = []
for i in range(0, len(bps)):
requestedPermissions.append({'actor': bps[i], 'permission': 'active'})
trxPermissions = [{'actor': 'eosio', 'permission': 'active'}]
action_name = 'setconfig'
data = {
'typ': func_name,
'num': open_block_num,
'key': '',
'fee': '0.0000 EOS'
}
run(cleos + 'multisig propose '
+ func_name + jsonArg(requestedPermissions) + jsonArg(trxPermissions)
+ 'eosio ' + action_name + jsonArg(data) + ' '
+ proposer + ' ' + str(expirehours) + ' -p ' + proposer)
# ---------------------------------------------------------------------------------------------------
# msig to update system contract
# unlock wallet
unlockwallet_str = 'cleos wallet unlock -n ' + wallet_name + ' --password ' + wallet_password
runone(unlockwallet_str)
# get schedule active bps
active_bps = getbps()
for ( func_name, func_block_num ) in funcs_to_open:
msigProposeUpdateTag(active_account, active_bps, func_name, func_block_num, tx_expire_hours)
time.sleep(3)
| 26.089888 | 103 | 0.615418 | 275 | 2,322 | 5.069091 | 0.443636 | 0.028694 | 0.027977 | 0.015782 | 0.086083 | 0.018651 | 0 | 0 | 0 | 0 | 0 | 0.027657 | 0.205857 | 2,322 | 88 | 104 | 26.386364 | 0.728308 | 0.093885 | 0 | 0.04918 | 0 | 0.016393 | 0.162136 | 0.013829 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114754 | false | 0.032787 | 0.114754 | 0.032787 | 0.295082 | 0.065574 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da5fc436cce22928bf1e7b8ba50df3169ca33055 | 7,027 | py | Python | maskrcnn/preprocess/download_googlestaticmap.py | JBoshoff/Replicate-night-light | 5bdfbb99fe38f98f61f733f4e847be2bb6f559ef | [
"MIT"
] | 8 | 2020-08-26T21:05:32.000Z | 2021-08-18T06:55:24.000Z | maskrcnn/preprocess/download_googlestaticmap.py | JBoshoff/Replicate-night-light | 5bdfbb99fe38f98f61f733f4e847be2bb6f559ef | [
"MIT"
] | null | null | null | maskrcnn/preprocess/download_googlestaticmap.py | JBoshoff/Replicate-night-light | 5bdfbb99fe38f98f61f733f4e847be2bb6f559ef | [
"MIT"
] | 2 | 2021-10-20T12:43:00.000Z | 2022-01-04T19:40:16.000Z | """This downloader downloads satellite images from the Google Static Maps API.
Usage:
$ python download_googlestaticmap.py \
> --log LOG_FILE.csv \
> --initialize INIT_FILE.csv
$ nohup python download_googlestaticmap.py \
> --log LOG_FILE.csv \
> --num 3 \
> --download-dir DIR \
> > logs/download_googlestaticmap.log &
"""
import os
import pandas as pd
import requests
from argparse import ArgumentParser
from tqdm import tqdm
class Downloader(object):
"""This class keeps a log of the downloading process,
checks for duplicates and manages bad HTTP requests.
Args:
queue (pandas.DataFrame): Log of the downloaded objects.
"""
def __init__(self, queue=None):
# if downloading for the first time
if queue is None:
# create an empty queue
self.queue = pd.DataFrame(columns=['index', 'url', 'status'])
self.queue.set_index('index', inplace=True)
self.queue.index.name = 'index'
# if not, load previous log
else:
self.queue = queue
def request(self, indices, mapping):
"""This method requests objects to be downloaded and adds them to the queue.
Args:
indices (numpy.array): unique id for each object in the queue.
mapping (callable): takes in the indices and generates the urls.
"""
urls = [mapping(index) for index in indices]
subqueue = pd.DataFrame(
{'url': urls,
'status': False},
index=indices)
subqueue.index.name = 'index'
try:
self.queue = pd.concat([self.queue, subqueue],
verify_integrity=True)
print('{} new requests initiated.'.format(subqueue.shape[0]))
except ValueError:
raise Exception('Overlapping new requests with existing requests.')
def download(self, num, download_dir,
test_page='https://www.google.com',
suffix='.png', min_size=20000):
"""This method downloads objects.
Args:
num (int): number of downloads to perform.
download_dir (str): downloading directory.
test_page (str): url to try in order to check internet connection.
suffix (str): suffix for saved files.
min_size (int): minimum file size. Helps drop NA images.
"""
# check local directory
if not os.path.isdir(download_dir):
raise Exception('Download directory does not exist.')
# check internet connection
_ = requests.get(test_page, timeout=1)
# extract items already downloaded
mask = self.queue['status']
if not mask.all():
# number of files to be downloaded
update_num = min((~mask).sum(), num)
print('Preparing to download {} files.'.format(update_num))
idxs = self.queue[~mask].index.copy()
idxs = idxs[0:update_num]
# downloading starts
for idx in tqdm(idxs):
# fetch url
url = self.queue.loc[idx, 'url']
# construct file names
file_name = os.path.join(download_dir, ''.join([idx, suffix]))
# check if file exists already
if os.path.isfile(file_name):
# update status
self.queue.loc[idx, 'status'] = True
print('{} already exists.'.format(file_name))
else:
r = requests.get(url)
if int(r.headers['Content-Length']) > min_size:
with open(file_name, 'wb') as f:
_ = f.write(r.content)
# update status
self.queue.loc[idx, 'status'] = True
print('{} successfully downloaded.'.format(file_name))
else:
print('{} skipped - file too small: {} bytes.'.format(
file_name, int(r.headers['Content-Length'])))
print(url)
self.queue.drop(idx, inplace=True)
if mask.all():
print('Downloading completed.')
def make_url(idx, df, GOOGLE_API_KEY):
"""Helper function to generate the urls for the Google Static Maps API.
Args:
index (str): Identifies an image.
df (pandas.DataFrame): Stores image info.
GOOGLE_API_KEY (str)
Returns:
url (str): The URL to the image.
"""
params = {
'center': ('{:.6f},{:.6f}'
.format(df.loc[idx, 'lat'], df.loc[idx, 'lon'])),
'zoom': '19',
'size': '640x640',
'scale': '2',
'maptype': 'satellite',
'key': GOOGLE_API_KEY}
params_str = '&'.join(['{}={}'.format(k, v) for k, v in params.items()])
return '?'.join(['https://maps.googleapis.com/maps/api/staticmap',
params_str])
def run(args):
"""Runs the script.
Args:
args (argparse.Namespace): Command line arguments.
"""
assert args.log is not None, 'Input log file path!'
# parse and make url list
if args.initialize is not None:
downloader = Downloader()
# fetch authentication key
with open(args.api_key, 'r') as f:
GOOGLE_API_KEY = f.read()
# read coordinates and index
df = pd.read_csv(args.initialize, index_col='index')
df = df.filter(items=['lon', 'lat'])
downloader.request(indices=df.index.values,
mapping=lambda x: make_url(x, df, GOOGLE_API_KEY))
else:
queue = pd.read_csv(args.log, index_col='index')
downloader = Downloader(queue=queue)
# download
if args.num is not None:
assert args.download_dir is not None, 'Input download directory!'
downloader.download(num=args.num, download_dir=args.download_dir)
# save the log
downloader.queue.to_csv(args.log)
if __name__ == '__main__':
# parse arguments passed from the command line
parser = ArgumentParser(
description='Downloads satellite images from Google Statics Maps API.')
parser.add_argument('--log', default=None, type=str,
help='name of log file (.csv)')
# request
parser.add_argument('--initialize', default=None, type=str,
help='a new list of files to be downloaded')
parser.add_argument(
'--api-key', default='GOOGLE_API_KEY.txt',
help='file that stores the API key, defaults to GOOGLE_API_KEY.txt')
# download
parser.add_argument(
'--num', default=None, type=int,
help='number of downloads to perform, this flag turns on downloading')
parser.add_argument('--download-dir', default=None, type=str,
help='downloading directory')
# parse
args = parser.parse_args()
run(args)
| 36.035897 | 84 | 0.565675 | 819 | 7,027 | 4.763126 | 0.295482 | 0.029992 | 0.021533 | 0.011536 | 0.10869 | 0.044091 | 0.044091 | 0.044091 | 0.021533 | 0 | 0 | 0.004203 | 0.322897 | 7,027 | 194 | 85 | 36.221649 | 0.815679 | 0.259713 | 0 | 0.076923 | 0 | 0 | 0.174419 | 0 | 0 | 0 | 0 | 0 | 0.019231 | 1 | 0.048077 | false | 0 | 0.048077 | 0 | 0.115385 | 0.067308 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da5fcd544f0ebf019068c4964041f2d02daca8dc | 2,606 | py | Python | pyrh/endpoints.py | JamMarHer/pyrh | b5501482974d9a7ba9f34745642d0a2e585154f2 | [
"MIT"
] | null | null | null | pyrh/endpoints.py | JamMarHer/pyrh | b5501482974d9a7ba9f34745642d0a2e585154f2 | [
"MIT"
] | null | null | null | pyrh/endpoints.py | JamMarHer/pyrh | b5501482974d9a7ba9f34745642d0a2e585154f2 | [
"MIT"
] | null | null | null | BASE_API = "https://api.robinhood.com"
def login():
return BASE_API + "/oauth2/token/"
def logout():
return BASE_API + "/oauth2/revoke_token/"
def investment_profile():
return BASE_API + "/user/investment_profile/"
def accounts():
return BASE_API + "/accounts/"
def ach(option):
"""
Combination of 3 ACH endpoints. Options include:
* iav
* relationships
* transfers
"""
return (
BASE_API + "/ach/iav/auth/" if option == "iav" else BASE_API + f"/ach/{option}/"
)
def applications():
return BASE_API + "/applications/"
def dividends():
return BASE_API + "/dividends/"
def edocuments():
return BASE_API + "/documents/"
def instruments(instrument_id=None, option=None):
"""
Return information about a specific instrument by providing its instrument id.
Add extra options for additional information such as "popularity"
"""
url = BASE_API + f"/instruments/"
if instrument_id is not None:
url += f"{instrument_id}"
if option is not None:
url += f"{option}"
return url
def margin_upgrades():
return BASE_API + "/margin/upgrades/"
def markets():
return BASE_API + "/markets/"
def notifications():
return BASE_API + "/notifications/"
def orders(order_id=""):
return BASE_API + f"/orders/{order_id}"
def password_reset():
return BASE_API + "/password_reset/request/"
def portfolios():
return BASE_API + "/portfolios/"
def positions():
return BASE_API + "/positions/"
def quotes():
return BASE_API + "/quotes/"
def historicals():
return BASE_API + "/quotes/historicals/"
def document_requests():
return BASE_API + "/upload/document_requests/"
def user():
return BASE_API + "/user/"
def watchlists():
return BASE_API + "/watchlists/"
def news(stock):
return BASE_API + f"/midlands/news/{stock}/"
def fundamentals(stock):
return BASE_API + f"/fundamentals/{stock}/"
def tags(tag):
"""
Returns endpoint with tag concatenated.
"""
return BASE_API + f"/midlands/tags/tag/{tag}/"
def chain(instrument_id):
return BASE_API + f"/options/chains/?equity_instrument_ids={instrument_id}"
def options(chain_id, dates, option_type):
return (
BASE_API
+ f"/options/instruments/?chain_id={chain_id}&expiration_dates={dates}"
+ f"&state=active&tradability=tradable&type={option_type}"
)
def market_data(option_id):
return BASE_API + f"/marketdata/options/{option_id}/"
def convert_token():
return BASE_API + "/oauth2/migrate_token/"
| 18.748201 | 88 | 0.653492 | 320 | 2,606 | 5.140625 | 0.309375 | 0.12766 | 0.213374 | 0.059574 | 0.103343 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001947 | 0.211819 | 2,606 | 138 | 89 | 18.884058 | 0.798929 | 0.107061 | 0 | 0.029412 | 0 | 0 | 0.29713 | 0.17351 | 0 | 0 | 0 | 0 | 0 | 1 | 0.411765 | false | 0.029412 | 0 | 0.367647 | 0.823529 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 4 |
da5fdaa047dc2cba3b58d7d4eab745c4fc398500 | 9,138 | py | Python | task.py | zhester/aptask | 4fc5c2bfe8dbe373e2ddc5bc15562885bb20b28e | [
"BSD-2-Clause"
] | null | null | null | task.py | zhester/aptask | 4fc5c2bfe8dbe373e2ddc5bc15562885bb20b28e | [
"BSD-2-Clause"
] | null | null | null | task.py | zhester/aptask | 4fc5c2bfe8dbe373e2ddc5bc15562885bb20b28e | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
"""
User Task Interface
This module should be used to implement new task execution drivers.
Semantically, a task is the object a worker creates and manipulates to execute
long-running code or programs.
"""
import inspect
import data
#=============================================================================
class NotSupported( NotImplementedError ):
"""
Exception raised by methods that are not supported by the current task
instance.
"""
#=========================================================================
def __str__( self ):
"""
Convert to string representation.
@return A string describing the exception
"""
return 'Method not supported by this object.'
#=============================================================================
class Report( data.Data ):
"""
The object sent to the worker when reporting the status and progress of
a task.
"""
#=========================================================================
ERROR = -1 # task encountered an error
INIT = 0 # task is initialized
RUNNING = 1 # task is executing as normal
DONE = 2 # task is done executing
#=========================================================================
status_strings = ( 'initialized', 'running', 'done' )
#=========================================================================
def __init__( self, status = INIT, progress = 0.0, message = None ):
"""
Constructor.
@param status Current task status (ERROR, INIT, RUNNING, DONE)
@param progress Current task progress (0.0 to 1.0)
@param message User-friendly message about progress (string)
"""
# load arguments into object state
self.super_init( vars() )
#=========================================================================
def is_done( self ):
"""
Informs interested parties if the task has completed.
@return True when task has finished executing
"""
return self.status == self.DONE
#=============================================================================
class Task( object ):
"""
Object created and used by a worker process to start and monitor a task.
Adding a new task interface requires implementing a child of this class.
Child classes may implement the following methods:
abort Called to stop the task before completion
getargs Used to describe acceptable arguments
initialize Called to initialize or start the task
process Called iteratively until the task is complete
abort, initialize, and process must all return a Report object.
"""
#=========================================================================
def __init__( self, arguments = None ):
"""
Constructor.
@param arguments
Argument values requested for task execution
"""
self.arguments = None
self.report = Report()
self.valid_arguments = self._load_args( arguments )
#=========================================================================
@classmethod
def getargs( cls ):
"""
Retrieves the argument list for this task.
An argument list is a list of dicts. Each dict describes an argument
with the following keys:
name Binding name
default Default value if not supplied (implies type)
required True if this must always be specified
help Brief description of the purpose of this argument
type Argument type (if no default is given)
(int|float|str) later: (list|dict)
"""
return []
#=========================================================================
@classmethod
def gethelp( cls ):
"""
Retrieves any helpful information that should be sent to the user.
"""
if cls is Task:
return '(Task description unavailable.)'
return inspect.getdoc( cls )
#=========================================================================
def abort( self ):
"""
Stops the execution of this task before completion.
@throws NotSupported
Descendant class does not support this method
"""
raise NotSupported()
#=========================================================================
def initialize( self ):
"""
Initializes or starts the execution of this task.
@throws NotSupported
Descendant class does not support this method
"""
raise NotSupported()
#=========================================================================
def process( self ):
"""
Called iteratively until the task reports completion.
@return Task progress from 0.0 (none) to 1.0 (done)
@throws NotSupported
Descendant class does not support this method
"""
raise NotSupported()
#=========================================================================
def _load_args( self, args ):
"""
Load given arguments into object state.
@param args List or dict of requested argument values
"""
# flag to indicate valid argument input
result = True
# build a list of arguments expected by this task driver
self._arg_list = self.getargs()
# build a lookup table of known arguments
self._arg_table = dict( ( a[ 'name' ], a ) for a in self._arg_list )
# create a dictionary to keep the argument values and set defaults
self.arguments = dict(
( a[ 'name' ], a[ 'default' ] )
for a in self._arg_list
if 'default' in a
)
# handle dictionary input
if type( args ) is dict:
# use our known arguments to extract values into the object
for key, arg in self._arg_table.items():
# see if the input specified this argument
if key in args:
if self._load_arg( key, args[ key ] ) == False:
result = False
# handle list input
elif type( args ) is list:
# argument value list index
index = 0
# use our known arguments to extract values into the object
for key, arg in self._arg_table.items():
# see if the input specified this argument
if index < len( args ):
if self._load_arg( key, args[ index ] ) == False:
result = False
index += 1
# make sure all required arguments were specified
reqs = [ a[ 'name' ] for a in self._arg_list if 'required' in a ]
keys = self.arguments.keys()
# the difference between two sets should be an empty set if the entire
# first set is a subset of the second set
num_diff = len( set( reqs ) - set( keys ) )
if num_diff != 0:
result = False
# return status of argument loading
return result
#=========================================================================
def _load_arg( self, key, value ):
"""
Load a given argument into object state.
@param key Name of argument to load
@param value Value to load
@return True if successfully loaded
"""
# check key against table of arguments
if key not in self._arg_table:
return False
# reference the argument specifier
spec = self._arg_table[ key ]
# determine argument type
if 'default' in spec:
type_name = type( spec[ 'default' ] ).__name__
elif 'type' in spec:
type_name = spec[ 'type' ]
else:
type_name = 'str'
# pull name of type of value
value_type_name = type( value ).__name__
# validate argument type
if value_type_name != type_name:
return False
# store value in object state
self.arguments[ key ] = value
return True
#=============================================================================
def main( argv ):
"""
Script execution entry point
@param argv Arguments passed to the script
@return Exit code (0 = success)
"""
import tasks.devtask
t = tasks.devtask.DevTask()
t._load_args( {} )
print t.arguments
#print t.gethelp()
#print Task.gethelp()
# return success
return 0
#=============================================================================
if __name__ == "__main__":
import sys
sys.exit( main( sys.argv ) )
| 31.402062 | 78 | 0.478989 | 896 | 9,138 | 4.804688 | 0.270089 | 0.014634 | 0.012544 | 0.022997 | 0.15331 | 0.129617 | 0.125668 | 0.105691 | 0.105691 | 0.105691 | 0 | 0.003051 | 0.31856 | 9,138 | 290 | 79 | 31.510345 | 0.688293 | 0.258809 | 0 | 0.141176 | 0 | 0 | 0.045244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.047059 | null | null | 0.011765 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da60447f22ba4eba74abcb47b3cadec2e06136d2 | 9,826 | py | Python | NeuroMechFly/experiments/kinematic_replay/kinematic_replay_no_support.py | NeLy-EPFL/NeuroMechFly | 69f9e2d86caac561a50e3e060d007dd50a20d481 | [
"Apache-2.0"
] | 12 | 2021-05-07T15:27:11.000Z | 2022-01-29T04:26:36.000Z | NeuroMechFly/experiments/kinematic_replay/kinematic_replay_no_support.py | NeLy-EPFL/NeuroMechFly | 69f9e2d86caac561a50e3e060d007dd50a20d481 | [
"Apache-2.0"
] | 15 | 2021-05-07T14:58:04.000Z | 2021-11-10T21:30:58.000Z | NeuroMechFly/experiments/kinematic_replay/kinematic_replay_no_support.py | NeLy-EPFL/NeuroMechFly | 69f9e2d86caac561a50e3e060d007dd50a20d481 | [
"Apache-2.0"
] | 1 | 2022-01-13T16:08:49.000Z | 2022-01-13T16:08:49.000Z | """ Drosophila simulation class for kinematic replay without body support. """
import numpy as np
import pandas as pd
import pybullet as p
from NeuroMechFly.sdf.units import SimulationUnitScaling
from NeuroMechFly.simulation.bullet_simulation import BulletSimulation
# Random number seed
np.random.seed(seed=321)
def add_perturbation(
size, initial_position, target_position, time, units
):
""" Shoot a ball to perturb the target system at a specified
velocity
Parameters
----------
size: <float>
Radius of the ball
initial_position: <array>
3D position of the ball
target_position: <array>
3D position of the target
time: <float>
Time before reaching the target position
Returns
-------
ball : <int>
Pybullet ID for the ball
"""
# Init
initial_position = np.asarray(initial_position) * units.meters
target_position = np.asarray(target_position) * units.meters
# Load ball
ball = p.loadURDF(
"../data/design/sdf/sphere_1cm.urdf", initial_position,
globalScaling=size * units.meters,
useMaximalCoordinates=True
)
# Change dynamics to remove damping and friction
p.changeDynamics(
ball, -1, linearDamping=0, angularDamping=0,
rollingFriction=0, spinningFriction=0
)
p.changeVisualShape(ball, -1, rgbaColor=[0.8, 0.8, 0.8, 1])
# Compute initial velocity
velocity = (
target_position - initial_position -
0.5 * np.asarray([0, 0, -9.81 * units.gravity]) * time**2
) / time
# Reset base velocity
p.resetBaseVelocity(ball, velocity)
return ball
class DrosophilaSimulation(BulletSimulation):
""" Drosophila Simulation Class for kinematic replay.
Parameters
----------
container: <Container>
Instance of the Container class.
sim_options: <dict>
Dictionary containing the simulation options.
kp: <float>
Proportional gain of the position controller.
kv: <float>
Derivative gain of the position controller.
position_path: <str>
Path of the joint position .pkl file.
velocity_path: <str>
Path of the joint velocity .pkl file.
add_perturbation: <bool>
Activate/deactivate the ball perturbation.
units: <obj>
Instance of SimulationUnitScaling object to scale up the units during calculations.
"""
def __init__(
self, container, sim_options, kp, kv,
angles_path, velocity_path,
add_perturbation,
starting_time=0.0,
fixed_positions=None,
units=SimulationUnitScaling(meters=1000, kilograms=1000)
):
super().__init__(container, units, **sim_options)
self.last_draw = []
self.kp = kp
self.kv = kv
self.pose = [0] * self.num_joints
self.vel = [0] * self.num_joints
self.angles = self.load_data(angles_path, starting_time)
self.velocities = self.load_data(velocity_path, starting_time)
self.impulse_sign = 1
self.add_perturbation = add_perturbation
self.fixed_positions = fixed_positions
self.pball = None
self.fixed_positions = fixed_positions
def load_data(self, data_path, starting_time):
""" Function that loads the pickle format joint angle or velocity gile.
Parameters
----------
data_path : <str>
Path of the .pkl file.
starting_time : <float>
Experiment's time from which the simulation will start.
Returns
-------
dict
Returns the joint angles in a dictionary.
"""
names_equivalence = {
'ThC_pitch': 'Coxa',
'ThC_yaw': 'Coxa_yaw',
'ThC_roll': 'Coxa_roll',
'CTr_pitch': 'Femur',
'CTr_roll': 'Femur_roll',
'FTi_pitch': 'Tibia',
'TiTa_pitch': 'Tarsus1'
}
converted_dict = {}
try:
data = pd.read_pickle(data_path)
start = int(np.round(starting_time / self.time_step))
for leg, joints in data.items():
for joint_name, val in joints.items():
new_name = 'joint_' + leg[:2] + \
names_equivalence[joint_name]
converted_dict[new_name] = val[start:]
return converted_dict
except BaseException:
FileNotFoundError(f"File {data_path} not found!")
def controller_to_actuator(self, t):
"""
Code that glues the controller the actuator in the system.
If there are muscles then contoller actuates the muscles.
If not then the controller directly actuates the joints.
Parameters
----------
t : int
Time running in the physics engine.
"""
# Throw mini balls at the fly during kinematic replay
if self.add_perturbation:
if ((t + 1) % (0.5 / self.time_step)) == 0:
print("Adding perturbation")
self.pball = add_perturbation(
size=5e-2,
initial_position=np.asarray(
[0, self.impulse_sign * 2e-3, 0.0]) + self.base_position,
target_position=self.base_position,
time=20e-3, units=self.units
)
self.impulse_sign *= -1
if ((t + 1) % (3.0 / self.time_step)
) == 0 and t < (3.012 / self.time_step):
radius = 20e-2
self.pball = add_perturbation(
size=radius,
initial_position=np.asarray(
[radius * 0.05, radius * 0.05, 1e-3]) + self.base_position,
target_position=[self.base_position[0], self.base_position[1], 0.0],
time=20e-3, units=self.units
)
p.changeDynamics(self.pball, -1, 0.3)
# Setting the joint angular positions joints
# Setting the joint angular positions of the fixed joints
if not self.fixed_positions:
self.fixed_positions = {
'joint_LAntenna': 35,
'joint_RAntenna': -35,
}
for joint_name, joint_pos in self.fixed_positions.items():
self.pose[self.joint_id[joint_name]] = np.deg2rad(joint_pos)
# Setting the joint angular positions of leg DOFs based on pose estimation
for joint_name, joint_pos in self.angles.items():
self.pose[self.joint_id[joint_name]] = joint_pos[t]
# Setting the joint angular velocities of leg DOFs based on pose estimation
for joint_name, joint_vel in self.velocities.items():
self.vel[self.joint_id[joint_name]] = joint_vel[t]
# Control the joints through position controller
# Velocity can be discarded if not available and gains can be changed
for joint in range(self.num_joints):
p.setJointMotorControl2(
self.animal, joint,
controlMode=p.POSITION_CONTROL,
targetPosition=self.pose[joint],
targetVelocity=self.vel[joint],
positionGain=self.kp,
velocityGain=self.kv,
maxVelocity=1e8
)
p.changeDynamics(self.animal, joint, maxJointVelocity=1e8)
# Change the color of the colliding body segments
if self.draw_collisions:
draw = []
if self.behavior == 'walking':
links_contact = self.get_current_contacts()
link_names = list(self.link_id.keys())
link_ids = list(self.link_id.values())
for i in links_contact:
link1 = link_names[link_ids.index(i)]
if link1 not in draw:
draw.append(link1)
self.change_color(link1, self.color_collision)
for link in self.last_draw:
if link not in draw:
self.change_color(link, self.color_legs)
elif self.behavior == 'grooming':
# Don't consider the ground sensors
collision_forces = self.contact_normal_force[len(
self.ground_contacts):, :]
links_contact = np.where(
np.linalg.norm(collision_forces, axis=1) > 0
)[0]
for i in links_contact:
link1 = self.self_collisions[i][0]
link2 = self.self_collisions[i][1]
if link1 not in draw:
draw.append(link1)
self.change_color(link1, self.color_collision)
if link2 not in draw:
draw.append(link2)
self.change_color(link2, self.color_collision)
for link in self.last_draw:
if link not in draw:
if 'Antenna' in link:
self.change_color(link, self.color_body)
else:
self.change_color(link, self.color_legs)
self.last_draw = draw
def change_color(self, identity, color):
""" Change color of a given body segment. """
p.changeVisualShape(
self.animal,
self.link_id[identity],
rgbaColor=color)
def feedback_to_controller(self):
"""
Code that glues the sensors/feedback to controller in the system.
"""
def update_parameters(self, params):
""" Update parameters. """
def optimization_check(self):
""" Optimization check. """
| 36.258303 | 91 | 0.569408 | 1,096 | 9,826 | 4.958029 | 0.265511 | 0.010121 | 0.016562 | 0.016194 | 0.230217 | 0.179426 | 0.106551 | 0.088333 | 0.059257 | 0.059257 | 0 | 0.017509 | 0.343171 | 9,826 | 270 | 92 | 36.392593 | 0.82445 | 0.239161 | 0 | 0.15 | 0 | 0 | 0.0344 | 0.004793 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.03125 | 0 | 0.1 | 0.00625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da6071c120cc4c6108f42d5833b8ae67a673f55d | 3,845 | py | Python | hw/ip/otbn/dv/otbnsim/sim/isa.py | wxjstz/opentitan | 6ff4397bac9c07373d735bd859c7ef8de39c2af8 | [
"Apache-2.0"
] | null | null | null | hw/ip/otbn/dv/otbnsim/sim/isa.py | wxjstz/opentitan | 6ff4397bac9c07373d735bd859c7ef8de39c2af8 | [
"Apache-2.0"
] | null | null | null | hw/ip/otbn/dv/otbnsim/sim/isa.py | wxjstz/opentitan | 6ff4397bac9c07373d735bd859c7ef8de39c2af8 | [
"Apache-2.0"
] | null | null | null | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from enum import IntEnum
import sys
from typing import Dict
from riscvmodel.types import Immediate # type: ignore
from shared.insn_yaml import Insn, load_insns_yaml
from .model import OTBNModel
# Load the insns.yml file at module load time: we'll use its data while
# declaring the classes. The point is that an OTBNInsn below is an instance of
# a particular Insn object from shared.insn_yaml, so we want a class variable
# on the OTBNInsn that points at the corresponding Insn.
try:
_INSNS_FILE = load_insns_yaml()
except RuntimeError as err:
sys.stderr.write('{}\n'.format(err))
sys.exit(1)
class DummyInsn(Insn):
'''A dummy instruction that will never be decoded. Used for the insn class
variable in the OTBNInsn base class.
'''
def __init__(self) -> None:
fake_yml = {
'mnemonic': 'dummy-insn',
'operands': []
}
super().__init__(fake_yml, None)
def insn_for_mnemonic(mnemonic: str, num_operands: int) -> Insn:
'''Look up the named instruction in the loaded YAML data.
As a sanity check, make sure it has the expected number of operands. If we
fail to find the right instruction, print a message to stderr and exit
(rather than raising a RuntimeError: this happens on module load time, so
it's a lot clearer to the user what's going on this way).
'''
insn = _INSNS_FILE.mnemonic_to_insn.get(mnemonic)
if insn is None:
sys.stderr.write('Failed to find an instruction for mnemonic {!r} in '
'insns.yml.\n'
.format(mnemonic))
sys.exit(1)
if len(insn.operands) != num_operands:
sys.stderr.write('The instruction for mnemonic {!r} in insns.yml has '
'{} operands, but we expected {}.\n'
.format(mnemonic, len(insn.operands), num_operands))
sys.exit(1)
return insn
class OTBNInsn:
'''A decoded OTBN instruction.
'''
# A class variable that holds the Insn subclass corresponding to this
# instruction.
insn = DummyInsn() # type: Insn
def __init__(self, op_vals: Dict[str, int]):
self.op_vals = op_vals
def execute(self, model: OTBNModel) -> None:
raise NotImplementedError('OTBNInsn.execute')
def disassemble(self, pc: int) -> str:
'''Generate an assembly listing for this instruction'''
return self.insn.disassemble(self.op_vals, 12)
class RV32RegReg(OTBNInsn):
'''A general class for register-register insns from the RV32I ISA'''
def __init__(self, op_vals: Dict[str, int]):
super().__init__(op_vals)
self.grd = op_vals['grd']
self.grs1 = op_vals['grs1']
self.grs2 = op_vals['grs2']
class RV32RegImm(OTBNInsn):
'''A general class for register-immediate insns from the RV32I ISA'''
def __init__(self, op_vals: Dict[str, int]):
super().__init__(op_vals)
self.grd = op_vals['grd']
self.grs1 = op_vals['grs1']
self.imm = op_vals['imm']
class RV32ImmShift(OTBNInsn):
'''A general class for immediate shift insns from the RV32I ISA'''
def __init__(self, op_vals: Dict[str, int]):
super().__init__(op_vals)
self.grd = op_vals['grd']
self.grs1 = op_vals['grs1']
self.shamt = op_vals['shamt']
class ShiftType(IntEnum):
LSL = 0 # logical shift left
LSR = 1 # logical shift right
def ShiftReg(reg: int, shift_type: int, shift_bytes: Immediate) -> int:
assert 0 <= int(shift_bytes)
shift_bits = int(shift_bytes << 3)
return (reg << shift_bits
if shift_type == ShiftType.LSL
else reg >> shift_bits)
| 31.008065 | 78 | 0.649935 | 537 | 3,845 | 4.500931 | 0.325885 | 0.047166 | 0.024824 | 0.021514 | 0.223004 | 0.213074 | 0.162598 | 0.135292 | 0.124121 | 0.124121 | 0 | 0.011431 | 0.249155 | 3,845 | 123 | 79 | 31.260163 | 0.825771 | 0.33238 | 0 | 0.242424 | 0 | 0 | 0.091275 | 0 | 0 | 0 | 0 | 0 | 0.015152 | 1 | 0.136364 | false | 0 | 0.090909 | 0 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da624c2d151313fbe9db0021e53684c69e6d4b5f | 247 | py | Python | config.py | TaskeHAMANO/sample_application | 628699c62197dd5079e0b600f431c791ac3a301a | [
"BSD-3-Clause"
] | null | null | null | config.py | TaskeHAMANO/sample_application | 628699c62197dd5079e0b600f431c791ac3a301a | [
"BSD-3-Clause"
] | null | null | null | config.py | TaskeHAMANO/sample_application | 628699c62197dd5079e0b600f431c791ac3a301a | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# vim:fileencoding=utf-8
# Author: Shinya Suzuki
# Created: 2017-11-16
database_path = "/techathon.db"
SQLALCHEMY_DATABASE_URI = "sqlite://{0}".format(database_path)
SECRET_KEY = 'test'
SQLALCHEMY_TRACK_MODIFICATIONS = True
| 24.7 | 62 | 0.761134 | 34 | 247 | 5.323529 | 0.882353 | 0.132597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.044843 | 0.097166 | 247 | 9 | 63 | 27.444444 | 0.766816 | 0.34413 | 0 | 0 | 0 | 0 | 0.183544 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da63d798dfe9c2ea59c6459800d52786ae4db56c | 2,668 | py | Python | tests/features/steps/environment_steps.py | candango/pyclicksign | d709122867cfa5c6fce4322b55a033bc82126e1c | [
"Apache-2.0"
] | null | null | null | tests/features/steps/environment_steps.py | candango/pyclicksign | d709122867cfa5c6fce4322b55a033bc82126e1c | [
"Apache-2.0"
] | 9 | 2022-01-15T19:43:46.000Z | 2022-03-24T06:04:25.000Z | tests/features/steps/environment_steps.py | candango/pyclicksign | d709122867cfa5c6fce4322b55a033bc82126e1c | [
"Apache-2.0"
] | null | null | null | # Copyright 2021-2022 Flávio Gonçalves Garcia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from behave import given, when, then, step
from cartola import fs
from tornado.escape import json_encode, json_decode
import os
def get_absolute_path(directory):
return os.path.realpath(
os.path.join(os.path.dirname(__file__), "..", "..", directory)
)
def create_file(path, content, binary=False):
real_path = get_absolute_path(path)
fs.write(real_path, content, binary)
os.chmod(real_path, 0o600)
return real_path
@then("Podemos converter {index} de dict para texto")
def step_arquivo_criado_com_sucesso(context, index):
data = getattr(context, index)
setattr(context, index, json_encode(data))
@then("Podemos converter {index} de texto para dict")
def step_arquivo_criado_com_sucesso(context, index):
data = getattr(context, index)
setattr(context, index, json_decode(data))
@then("Arquivo de {index} é criado com sucesso em {path}")
def step_arquivo_criado_com_sucesso(context, index, path):
data = getattr(context, index)
if isinstance(data, dict):
data = json_encode(data)
if isinstance(data, str):
data = data.encode()
real_path = create_file(path, data, True)
context.tester.assertTrue(os.path.exists(real_path))
context.tester.assertTrue(os.path.isfile(real_path))
@given("Arquivo de {index} existe em {path}")
def step_arquivo_existe(context, index, path):
real_path = get_absolute_path(path)
context.tester.assertTrue(os.path.exists(real_path))
context.tester.assertTrue(os.path.isfile(real_path))
setattr(context, index, real_path)
print(getattr(context, index))
@given("Ler dados de {index} sucedeu")
def step_arquivo_existe(context, index):
real_path = getattr(context, index)
setattr(context, index, fs.read(real_path))
@then("File at {path} removed")
def step_file_at_removed(context, path):
real_path = get_absolute_path(path)
context.tester.assertTrue(os.path.exists(real_path))
context.tester.assertTrue(os.path.isfile(real_path))
os.remove(real_path)
context.tester.assertFalse(os.path.exists(real_path))
| 33.35 | 74 | 0.737631 | 385 | 2,668 | 4.966234 | 0.34026 | 0.075314 | 0.072176 | 0.078452 | 0.393828 | 0.348849 | 0.281381 | 0.281381 | 0.259414 | 0.259414 | 0 | 0.007092 | 0.154423 | 2,668 | 79 | 75 | 33.772152 | 0.840426 | 0.211394 | 0 | 0.285714 | 0 | 0 | 0.108134 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.163265 | false | 0 | 0.081633 | 0.020408 | 0.285714 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da648cdce4097a5f15c459cc9d3dc08716cd7f4a | 2,967 | py | Python | photo_album_src/models_bk.py | chrisjen83/k3s-labs | b283c2500b272be0de1025ef541a46d7c4591cc1 | [
"MIT"
] | 1 | 2020-04-01T22:05:28.000Z | 2020-04-01T22:05:28.000Z | photo_album_src/models_bk.py | chrisjen83/k3s-labs | b283c2500b272be0de1025ef541a46d7c4591cc1 | [
"MIT"
] | null | null | null | photo_album_src/models_bk.py | chrisjen83/k3s-labs | b283c2500b272be0de1025ef541a46d7c4591cc1 | [
"MIT"
] | 5 | 2020-02-21T22:47:35.000Z | 2022-02-03T15:21:39.000Z | #!/usr/bin/env python3
# Import modules required for app
import os
import boto3
import json
from pymongo import MongoClient
from werkzeug import secure_filename
from PIL import Image
from config import ecs_test_drive
#Get from K8s ConfigMap values for MongoDB Database
MONGO_SERVER = os.environ.get( "MONGO_SERVER", None)
DB_NAME = os.environ.get( "DB_NAME", None)
client = MongoClient( MONGO_SERVER, 27017)
# Get database connection with database name
db = client[DB_NAME]
# Remove any existing documents in photos collection
# db.photos.delete_many({}) # Comment this line if you don't want to remove documents each time you start the app
# Retrieve all photos records from database
def get_photos():
return db.photos.find({})
# Insert form fields into database
def insert_photo(request):
title = request.form['title']
comments = request.form['comments']
filename = secure_filename(request.files['photo'].filename)
thumbfile = filename.rsplit(".", 1)[0] + "-thumb.jpg"
photo_url = "http://" + ecs_test_drive['ecs_access_key_id'].split(
'@')[0] + ".public.ecstestdrive.com/" + ecs_test_drive['ecs_bucket_name'] + "/" + filename
thumbnail_url = "http://" + ecs_test_drive['ecs_access_key_id'].split(
'@')[0] + ".public.ecstestdrive.com/" + ecs_test_drive['ecs_bucket_name'] + "/" + thumbfile
db.photos.insert_one({'title': title, 'comments': comments,
'photo': photo_url, 'thumb': thumbnail_url})
def upload_photo(file):
# Get ECS credentials from external config file
ecs_endpoint_url = ecs_test_drive['ecs_endpoint_url']
ecs_access_key_id = ecs_test_drive['ecs_access_key_id']
ecs_secret_key = ecs_test_drive['ecs_secret_key']
ecs_bucket_name = ecs_test_drive['ecs_bucket_name']
# Open a session with ECS using the S3 API
session = boto3.resource(service_name='s3', aws_access_key_id=ecs_access_key_id,
aws_secret_access_key=ecs_secret_key, endpoint_url=ecs_endpoint_url)
# Remove unsupported characters from filename
filename = secure_filename(file.filename)
# First save the file locally
file.save(os.path.join("uploads", filename))
# Create a thumbnail
size = 225, 225
with open("uploads/" + filename, 'rb') as f:
img = Image.open(f)
img.thumbnail(size)
thumbfile = filename.rsplit(".", 1)[0] + "-thumb.jpg"
img.save("uploads/" + thumbfile, "JPEG")
img.close()
# Empty the variables to prevent memory leaks
img = None
# Upload the original image to ECS
session.Object(ecs_bucket_name, filename).put(
Body=open("uploads/" + filename, 'rb'), ACL='public-read')
# Upload the thumbnail to ECS
session.Object(ecs_bucket_name, thumbfile).put(
Body=open("uploads/" + thumbfile, 'rb'), ACL='public-read')
# Delete the local files
os.remove("uploads/" + filename)
os.remove("uploads/" + thumbfile)
| 34.905882 | 115 | 0.691271 | 405 | 2,967 | 4.859259 | 0.34321 | 0.032012 | 0.054878 | 0.060976 | 0.185976 | 0.177337 | 0.164634 | 0.086382 | 0.086382 | 0.086382 | 0 | 0.009583 | 0.191102 | 2,967 | 84 | 116 | 35.321429 | 0.810417 | 0.232895 | 0 | 0.042553 | 0 | 0 | 0.161647 | 0.022143 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.148936 | 0.021277 | 0.234043 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da67084ae53c45931e9876b1394dc1aa92e625de | 12,051 | py | Python | pymothoa/llvm_backend/backend.py | sklam/pymothoa | 330bd70666ccf761f39c75f3acb70aa7e0a92ac6 | [
"BSD-2-Clause"
] | 2 | 2017-03-23T19:44:03.000Z | 2020-11-28T17:01:49.000Z | pymothoa/llvm_backend/backend.py | sklam/pymothoa | 330bd70666ccf761f39c75f3acb70aa7e0a92ac6 | [
"BSD-2-Clause"
] | null | null | null | pymothoa/llvm_backend/backend.py | sklam/pymothoa | 330bd70666ccf761f39c75f3acb70aa7e0a92ac6 | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2012, Siu Kwan Lam
# All rights reserved.
import logging
import ast
from contextlib import contextmanager
from pymothoa.util.descriptor import Descriptor, instanceof
from pymothoa import dialect
from pymothoa.compiler_errors import *
from pymothoa.backend import CodeGenerationBase
from types import *
from values import *
import llvm # binding
logger = logging.getLogger(__name__)
class LLVMCodeGenerator(CodeGenerationBase):
retty = Descriptor(constant=True)
argtys = Descriptor(constant=True)
function = Descriptor(constant=True)
entry_block = Descriptor(constant=True)
def __init__(self, fnobj, retty, argtys, symbols):
super(LLVMCodeGenerator, self).__init__(symbols)
self.function = fnobj
self.retty = retty
self.argtys = argtys
@contextmanager
def generate_function(self, name):
if not self.function.valid():
raise FunctionDeclarationError(
self.current_node,
self.jit_engine.last_error()
)
self.symbols[name] = self.function
# make basic block
self.entry_block = self.function.append_basic_block("entry")
self.__blockcounter = 0
# make instruction builder
self.builder = llvm.Builder()
bb_body = self.function.append_basic_block("body")
self.builder.insert_at(bb_body)
yield # wait until args & body are generated
# link entry to body
bb_last = self.builder.get_basic_block() # remember last block
self.builder.insert_at(self.entry_block) # goto entry block
self.builder.branch(bb_body) # branch to body
self.builder.insert_at(bb_last) # return to last block
# close function
if not self.builder.is_block_closed():
if isinstance(self.retty, types.Void):
# no return
self.builder.ret_void()
else:
raise MissingReturnError(self.current_node)
def generate_function_arguments(self, arguments):
with self.relocate_to_entry():
fn_args = self.function.arguments()
for i, name in enumerate(arguments):
try:
var = LLVMVariable(name, self.argtys[i], self.builder)
except IndexError:
raise FunctionDeclarationError(
self.current_node,
'Actual number of argument mismatch declaration.')
self.builder.store(fn_args[i], var.pointer)
self.symbols[name] = var
def generate_call(self, fn, args):
from function import LLVMFunction
if isinstance(fn, LLVMFunction): # another function
retty = fn.retty
argtys = fn.argtys
fn = fn.code_llvm
elif fn is self.function: # recursion
retty = self.retty
argtys = self.argtys
else:
raise InvalidCall(self.current_node)
return self._call_function(fn, args, retty, argtys)
def generate_assign(self, from_value, to_target):
casted = to_target.type.cast(from_value, self.builder)
self.builder.store(casted, to_target.pointer)
return casted
def generate_compare(self, op_class, lhs, rhs):
ty = lhs.type.coerce(rhs.type)
lval = ty.cast(lhs, self.builder)
rval = ty.cast(rhs, self.builder)
fn = getattr(ty, 'op_%s'%op_class.__name__.lower())
pred = fn(lval, rval, self.builder)
return LLVMTempValue(pred, LLVMType(types.Bool))
def generate_return(self, value=None):
if value is None: # no return value
self.builder.ret_void()
return
if isinstance(self.retty, LLVMVoid):
raise InvalidReturnError(
self.current_node,
'This function does not return any value.'
)
casted = self.retty.cast(value, self.builder)
self.builder.ret(casted)
def generate_binop(self, op_class, lhs, rhs):
ty = lhs.type.coerce(rhs.type)
lval = ty.cast(lhs, self.builder)
rval = ty.cast(rhs, self.builder)
try:
fn = getattr(ty, 'op_%s'%op_class.__name__.lower())
except AttributeError as e:
raise OperatorError(self.current_node, 'Debug detail: %s'%str(e))
else:
return LLVMTempValue(fn(lval, rval, self.builder), ty)
def generate_constant_int(self, value):
return LLVMConstant(LLVMType(types.Int), value)
def generate_constant_real(self, value):
return LLVMConstant(LLVMType(types.Double), value)
def generate_declare(self, name, ty):
with self.relocate_to_entry():
if issubclass(ty, types.GenericBoundedArray): # array
return LLVMArrayVariable(name, LLVMType(ty), ty.elemcount.value(self.builder), self.builder)
else: # other types
realty = LLVMType(ty)
return LLVMVariable(name, realty, self.builder)
def _call_function(self, fn, args, retty, argtys):
arg_values = map(lambda X: LLVMTempValue(X.value(self.builder), X.type), args)
# cast types
try:
for i, argty in enumerate(argtys):
arg_values[i] = argty.cast(arg_values[i], self.builder)
except IndexError:
raise InvalidCall(self.current_node, 'Number of argument mismatch')
out = self.builder.call(fn, arg_values)
return LLVMTempValue(out, retty)
def new_basic_block(self, name='uname'):
self.__blockcounter += 1
return self.function.append_basic_block('%s_%d'%(name, self.__blockcounter))
def generate_vector_load_elem(self, ptr, idx):
elemval = self.builder.extract_element(
ptr.value(self.builder),
idx.value(self.builder),
)
return LLVMTempValue(elemval, ptr.type.elemtype)
def generate_vector_store_elem(self, ptr, idx):
zero = self.generate_constant_int(0)
indices = map(lambda X: X.value(self.builder), [zero, idx])
addr = self.builder.gep2(ptr.pointer, indices)
return LLVMTempPointer(addr, ptr.type.elemtype)
def generate_array_load_elem(self, ptr, idx):
ptr_val = ptr.value(self.builder)
idx_val = idx.value(self.builder)
ptr_offset = self.builder.gep(ptr_val, idx_val)
return LLVMTempValue(self.builder.load(ptr_offset), ptr.type.elemtype)
def generate_array_store_elem(self, ptr, idx):
ptr_val = ptr.value(self.builder)
idx_val = idx.value(self.builder)
ptr_offset = self.builder.gep(ptr_val, idx_val)
return LLVMTempPointer(ptr_offset, ptr.type.elemtype)
def generate_if(self, test, iftrue, orelse):
bb_if = self.new_basic_block('if')
bb_else = self.new_basic_block('else')
bb_endif = self.new_basic_block('endif')
is_endif_reachable = False
boolean = self.ensure_boolean(test)
self.builder.cond_branch(boolean, bb_if, bb_else)
# true branch
self.builder.insert_at(bb_if)
for stmt in iftrue:
self.visit(stmt)
else:
if not self.builder.is_block_closed():
self.builder.branch(bb_endif)
is_endif_reachable=True
# false branch
self.builder.insert_at(bb_else)
for stmt in orelse:
self.visit(stmt)
else:
if not self.builder.is_block_closed():
self.builder.branch(bb_endif)
is_endif_reachable=True
# endif
self.builder.insert_at(bb_endif)
if not is_endif_reachable:
self.builder.unreachable()
def generate_while(self, test, body):
bb_cond = self.new_basic_block('loopcond')
bb_body = self.new_basic_block('loopbody')
bb_exit = self.new_basic_block('loopexit')
self.builder.branch(bb_cond)
# condition
self.builder.insert_at(bb_cond)
cond = self.visit(test)
self.builder.cond_branch(self.ensure_boolean(cond), bb_body, bb_exit)
# body
self.builder.insert_at(bb_body)
for stmt in body:
self.visit(stmt)
else:
self.builder.branch(bb_cond)
# Not sure if it is necessary
# if not self.builder.is_block_closed():
# self.builder.branch(bb_cond)
# end loop
self.builder.insert_at(bb_exit)
def generate_for_range(self, counter_ptr, initcount, endcount, step, loopbody):
self.builder.store(initcount.value(self.builder), counter_ptr.pointer)
bb_cond = self.new_basic_block('loopcond')
bb_body = self.new_basic_block('loopbody')
bb_incr = self.new_basic_block('loopincr')
bb_exit = self.new_basic_block('loopexit')
self.builder.branch(bb_cond)
# condition
self.builder.insert_at(bb_cond)
test = self.builder.icmp(llvm.ICMP_SLT, counter_ptr.value(self.builder), endcount.value(self.builder))
self.builder.cond_branch(test, bb_body, bb_exit)
# body
self.builder.insert_at(bb_body)
for stmt in loopbody:
self.visit(stmt)
else:
self.builder.branch(bb_incr)
# Not sure if it is necessary
# if not self.builder.is_block_closed():
# self.builder.branch(bb_incr)
# incr
self.builder.insert_at(bb_incr)
# counter_next = self.builder.add(counter_ptr.value(self.builder),
# step.value(self.builder))
counter_next = counter_ptr.type.op_add(counter_ptr.value(self.builder),
step.value(self.builder),
self.builder)
self.builder.store(counter_next, counter_ptr.pointer)
self.builder.branch(bb_cond)
# exit
self.builder.insert_at(bb_exit)
def generate_boolop(self, op_class, lhs, rhs):
bb_left = self.builder.get_basic_block()
boolty = LLVMType(types.Bool)
left = boolty.cast(self.visit(lhs), self.builder)
bb_right = self.new_basic_block('bool_right')
bb_result = self.new_basic_block('bool_result')
if isinstance(op_class, ast.And):
self.builder.cond_branch(left, bb_right, bb_result)
elif isinstance(op_class, ast.Or):
self.builder.cond_branch(left, bb_result, bb_right)
else:
raise AssertionError('Unknown Boolean operator')
self.builder.insert_at(bb_right)
right = boolty.cast(self.visit(rhs), self.builder)
self.builder.branch(bb_result)
self.builder.insert_at(bb_result)
pred = self.builder.phi(boolty.type(), [bb_left, bb_right], [left, right]);
return LLVMTempValue(pred, boolty)
def generate_not(self, operand):
boolty = LLVMType(types.Bool)
boolval = boolty.cast(operand, self.builder)
negated = boolty.op_not(boolval, self.builder)
return LLVMTempValue(negated, boolty)
def generate_array_slice(self, ptr, lower, upper=None, step=None):
assert upper is None
assert step is None
ptr_val = ptr.value(self.builder)
lower_val = lower.value(self.builder)
offsetted = self.builder.gep(ptr_val, lower_val)
return LLVMTempValue(offsetted, ptr.type)
@contextmanager
def relocate_to_entry(self):
# goto entry block
bb_last = self.builder.get_basic_block()
self.builder.insert_at(self.entry_block)
yield # relocated
# pickup at last block
self.builder.insert_at(bb_last)
def ensure_boolean(self, value):
return LLVMType(types.Bool).cast(value, self.builder)
| 35.759644 | 110 | 0.617127 | 1,450 | 12,051 | 4.938621 | 0.170345 | 0.147465 | 0.049155 | 0.045105 | 0.404552 | 0.286133 | 0.232789 | 0.207234 | 0.176512 | 0.168133 | 0 | 0.000933 | 0.288275 | 12,051 | 336 | 111 | 35.866071 | 0.833975 | 0.069538 | 0 | 0.322314 | 0 | 0 | 0.024257 | 0 | 0 | 0 | 0 | 0 | 0.012397 | 1 | 0.103306 | false | 0 | 0.045455 | 0.012397 | 0.247934 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da68786bead17edb9e00b001f796346815fc35ed | 2,412 | py | Python | digsby/src/jabber/objects/gmail/mail_thread_info.py | ifwe/digsby | f5fe00244744aa131e07f09348d10563f3d8fa99 | [
"Python-2.0"
] | 35 | 2015-08-15T14:32:38.000Z | 2021-12-09T16:21:26.000Z | digsby/src/jabber/objects/gmail/mail_thread_info.py | niterain/digsby | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | [
"Python-2.0"
] | 4 | 2015-09-12T10:42:57.000Z | 2017-02-27T04:05:51.000Z | digsby/src/jabber/objects/gmail/mail_thread_info.py | niterain/digsby | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | [
"Python-2.0"
] | 15 | 2015-07-10T23:58:07.000Z | 2022-01-23T22:16:33.000Z | #tid The thread id of this thread.
#participation A number indicating the user's participation level in this thread: 0 indicates that the user has not participated; 1 indicates that the user is one of many recipients listed in the thread; 2 indicates that the user is the sole recipient for messages in this thread.
#messages The number of messages in the thread.
#date A timestamp of the most recent message, in milliseconds since the UNIX epoch.
#url The URL linking to this thread
#
#<senders>
#<labels>
#<subject>
#<snippet>
from jabber.objects.gmail.senders import Senders
from pyxmpp.utils import from_utf8
from jabber.jabber_util.functions import xpath_eval
from pyxmpp.xmlextra import get_node_ns_uri
from jabber.objects.gmail import GOOGLE_MAIL_NOTIFY_NS
from pyxmpp.objects import StanzaPayloadObject
class MailThreadInfo(StanzaPayloadObject):
xml_element_name = 'mail-thread-info'
xml_element_namespace = GOOGLE_MAIL_NOTIFY_NS
def __init__(self, xmlnode):
self.__from_xml(xmlnode)
def __from_xml(self, node):
if node.type!="element":
raise ValueError,"XML node is not a %s element (not en element)" % self.xml_element_name
ns=get_node_ns_uri(node)
if ns and ns!=self.xml_element_namespace or node.name!=self.xml_element_name:
raise ValueError,"XML node is not an %s element" % self.xml_element_name
labelss = xpath_eval(node, 'g:labels',{'g':GOOGLE_MAIL_NOTIFY_NS})
labels = labelss[0].getContent() if labelss else None
self.labels = from_utf8(labels).split('|') if labels else []
senderss = xpath_eval(node, 'g:senders',{'g':GOOGLE_MAIL_NOTIFY_NS})
self.senders = Senders(senderss[0]) if senderss else []
subjects = xpath_eval(node, 'g:subject',{'g':GOOGLE_MAIL_NOTIFY_NS})
self.subject = from_utf8(subjects[0].getContent()) if subjects else None
snippets = xpath_eval(node, 'g:snippet',{'g':GOOGLE_MAIL_NOTIFY_NS})
self.snippet = from_utf8(snippets[0].getContent()) if snippets else None
self.tid = int(from_utf8(node.prop("tid")))
self.participation = int(from_utf8(node.prop("participation")))
self.messages = int(from_utf8(node.prop("messages")))
self.date = int(from_utf8(node.prop("date")))
self.url = from_utf8(node.prop("date"))
| 47.294118 | 285 | 0.69942 | 344 | 2,412 | 4.726744 | 0.284884 | 0.04428 | 0.059041 | 0.066421 | 0.206642 | 0.075646 | 0 | 0 | 0 | 0 | 0 | 0.008316 | 0.202322 | 2,412 | 50 | 286 | 48.24 | 0.836798 | 0.21932 | 0 | 0 | 0 | 0 | 0.092755 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.193548 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da687c22550da7202f3e33817124a03999dca63a | 542 | py | Python | cloud/single_stage_detector/pytorch/onnx_demo.py | mgoin/inference | ede5477a2aee72ceb435e9ecd599ffa052417c2a | [
"Apache-2.0"
] | 4 | 2019-07-26T03:00:39.000Z | 2021-01-29T16:12:21.000Z | cloud/single_stage_detector/pytorch/onnx_demo.py | mgoin/inference | ede5477a2aee72ceb435e9ecd599ffa052417c2a | [
"Apache-2.0"
] | null | null | null | cloud/single_stage_detector/pytorch/onnx_demo.py | mgoin/inference | ede5477a2aee72ceb435e9ecd599ffa052417c2a | [
"Apache-2.0"
] | 2 | 2019-11-12T15:57:29.000Z | 2022-03-02T21:26:58.000Z | import onnxruntime
import onnx
import os
from onnx import numpy_helper
onnx_model_dir = 'test_ssd_model'
onnx_data_dir = 'test_data_set_0'
sess = onnxruntime.InferenceSession(os.path.join(onnx_model_dir, 'model.onnx'))
img_tensor = onnx.TensorProto()
with open(os.path.join(onnx_model_dir, onnx_data_dir, 'input_0.pb'), 'rb') as f:
img_tensor.ParseFromString(f.read())
test_img_data = numpy_helper.to_array(img_tensor)
out_onnx = sess.run(None, { sess.get_inputs()[0].name: test_img_data })
loc, label, prob = out_onnx
print(out_onnx) | 30.111111 | 80 | 0.778598 | 91 | 542 | 4.307692 | 0.461538 | 0.068878 | 0.091837 | 0.071429 | 0.112245 | 0.112245 | 0 | 0 | 0 | 0 | 0 | 0.006148 | 0.099631 | 542 | 18 | 81 | 30.111111 | 0.797131 | 0 | 0 | 0 | 0 | 0 | 0.093923 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.285714 | 0 | 0.285714 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da69d942b34c7cb188f48d8f571305a3929e1a1b | 95 | py | Python | abfahrt/unittest/__init__.py | Team-Zugig-zum-Erfolg/InformatiCup | 788076ac38bf6d8f462465b7fb96db14d13bed30 | [
"MIT"
] | 1 | 2022-01-30T14:30:02.000Z | 2022-01-30T14:30:02.000Z | abfahrt/unittest/__init__.py | Team-Zugig-zum-Erfolg/InformatiCup | 788076ac38bf6d8f462465b7fb96db14d13bed30 | [
"MIT"
] | null | null | null | abfahrt/unittest/__init__.py | Team-Zugig-zum-Erfolg/InformatiCup | 788076ac38bf6d8f462465b7fb96db14d13bed30 | [
"MIT"
] | null | null | null | """
Unittest-Package for testing the most important classes/modules of the abfahrt-Package
"""
| 23.75 | 86 | 0.778947 | 13 | 95 | 5.692308 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.126316 | 95 | 3 | 87 | 31.666667 | 0.891566 | 0.905263 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
da6a8272e09ed6bcdd72fe1fe0ed6ca276090222 | 3,519 | py | Python | uat/test_uat_CLIParser.py | sorint-lab-us/aws-greengrass-gdk-cli | 7508c7f62dcee1638cfc895ea38f3842e0072f0e | [
"Apache-2.0"
] | null | null | null | uat/test_uat_CLIParser.py | sorint-lab-us/aws-greengrass-gdk-cli | 7508c7f62dcee1638cfc895ea38f3842e0072f0e | [
"Apache-2.0"
] | null | null | null | uat/test_uat_CLIParser.py | sorint-lab-us/aws-greengrass-gdk-cli | 7508c7f62dcee1638cfc895ea38f3842e0072f0e | [
"Apache-2.0"
] | null | null | null | import json
import os
import subprocess as sp
import tempfile
from pathlib import Path
import gdk.common.exceptions.error_messages as error_messages
def test_list_template():
check_list_template = sp.run(["gdk", "component", "list", "--template"], check=True, stdout=sp.PIPE)
assert "HelloWorld-python" in check_list_template.stdout.decode()
assert "HelloWorld-java" in check_list_template.stdout.decode()
def test_list_repository():
check_list_template = sp.run(["gdk", "component", "list", "--repository"], check=True, stdout=sp.PIPE)
assert "aws-greengrass-labs-database-influxdb" in check_list_template.stdout.decode()
def test_init_template_non_empty_dir():
check_init_template = sp.run(["gdk", "component", "init", "-t", "HelloWorld", "-l", "python"], stdout=sp.PIPE)
assert check_init_template.returncode == 1
assert "Try `gdk component init --help`" in check_init_template.stdout.decode()
def test_init_template():
dirpath = tempfile.mkdtemp()
os.chdir(dirpath)
check_init_template = sp.run(["gdk", "component", "init", "-t", "HelloWorld", "-l", "python"], check=True, stdout=sp.PIPE)
assert check_init_template.returncode == 0
assert Path(dirpath).joinpath("recipe.yaml").resolve().exists()
assert Path(dirpath).joinpath("gdk-config.json").resolve().exists()
def test_init_repository():
dirpath = tempfile.mkdtemp()
os.chdir(dirpath)
check_init_repo = sp.run(
["gdk", "component", "init", "-r", "aws-greengrass-labs-database-influxdb"], check=True, stdout=sp.PIPE
)
assert check_init_repo.returncode == 0
assert Path(dirpath).joinpath("recipe.yaml").exists()
assert Path(dirpath).joinpath("gdk-config.json").exists()
def test_build_template_zip():
dirpath = tempfile.mkdtemp()
# Recipe contains HelloWorld.zip artifact. So, create HelloWorld directory inside temporary directory.
path_HelloWorld = Path(dirpath).joinpath("HelloWorld")
os.mkdir(path_HelloWorld)
os.chdir(path_HelloWorld)
# Check if init downloads templates with necessary files.
check_init_template = sp.run(["gdk", "component", "init", "-t", "HelloWorld", "-l", "python"], check=True, stdout=sp.PIPE)
assert check_init_template.returncode == 0
assert Path(path_HelloWorld).joinpath("recipe.yaml").resolve().exists()
config_file = Path(path_HelloWorld).joinpath("gdk-config.json").resolve()
assert config_file.exists()
# Update gdk-config file mandatory field like region.
with open(str(config_file), "r") as f:
config = json.loads(f.read())
config["component"]["com.example.PythonHelloWorld"]["publish"]["region"] = "us-east-1"
with open(str(config_file), "w") as f:
f.write(json.dumps(config))
# Check if build works as expected.
check_build_template = sp.run(["gdk", "component", "build"], check=True, stdout=sp.PIPE)
assert check_build_template.returncode == 0
assert Path(path_HelloWorld).joinpath("zip-build").resolve().exists()
assert Path(path_HelloWorld).joinpath("greengrass-build").resolve().exists()
artifact_path = (
Path(path_HelloWorld)
.joinpath("greengrass-build")
.joinpath("artifacts")
.joinpath("com.example.PythonHelloWorld")
.joinpath("NEXT_PATCH")
.joinpath("HelloWorld.zip")
.resolve()
)
recipes_path = Path(path_HelloWorld).joinpath("greengrass-build").joinpath("recipes").joinpath("recipe.yaml").resolve()
assert artifact_path.exists()
| 40.918605 | 126 | 0.700199 | 443 | 3,519 | 5.410835 | 0.227991 | 0.045056 | 0.023363 | 0.049645 | 0.577388 | 0.483521 | 0.430955 | 0.396329 | 0.124322 | 0.124322 | 0 | 0.002002 | 0.148338 | 3,519 | 85 | 127 | 41.4 | 0.797798 | 0.06877 | 0 | 0.142857 | 0 | 0 | 0.189181 | 0.039731 | 0 | 0 | 0 | 0 | 0.285714 | 1 | 0.095238 | false | 0 | 0.095238 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da6b4c0eec1b1ed14670ffd508f05ac5d26c2b77 | 1,326 | py | Python | mysite/image/forms.py | HelloTecXin/ZXBlog | 60d1f95f541138aa56acbaf4dcfbfe208491d65b | [
"MIT"
] | 1 | 2020-03-17T08:28:48.000Z | 2020-03-17T08:28:48.000Z | mysite/image/forms.py | HelloTecXin/ZXBlog | 60d1f95f541138aa56acbaf4dcfbfe208491d65b | [
"MIT"
] | null | null | null | mysite/image/forms.py | HelloTecXin/ZXBlog | 60d1f95f541138aa56acbaf4dcfbfe208491d65b | [
"MIT"
] | null | null | null | from django import forms
from django.core.files.base import ContentFile
from slugify import slugify
from urllib import request
from .models import Image
class ImageForm(forms.ModelForm):
class Meta:
model = Image
fields = ('title','url','description')
def clean_url(self):
url = self.cleaned_data['url']
valid_extensions = ['jpg','jpeg','png'] # 规定图片的扩展名
extension = url.rsplit('.',1)[1].lower() # 从得到图片的网址中分解出其扩展名
if extension not in valid_extensions: # 如果属于规定的扩展名,就认为提交的URL对象是一个图片
raise forms.ValidationError('The given url does not match valid image extension.')
return url
def save(self,force_insert=False,force_update=False,commit=True):
# ModelForm类中的save方法,将表单提交的数据保存到数据库
image = super(ImageForm, self).save(commit=False) # 执行父类ModelForm的save()方法,commit=False实例虽然被建立,但并没有保存数据
image_url = self.cleaned_data['url']
image_name = '{0}.{1}'.format(slugify(image.title),image_url.rsplit('.',1)[1].lower())
response = request.urlopen(image_url) # 以get方式访问该图片地址 ,通过该对象得到所访问URL的数据(图片的ASCII)
image.image.save(image_name, ContentFile(response.read()),save=False) # 将上述返回的结果保存到本地,并按照约定的名称给该图片文件命名
if commit:
image.save()
return image
| 41.4375 | 113 | 0.667421 | 150 | 1,326 | 5.82 | 0.5 | 0.024055 | 0.032073 | 0.041237 | 0.084765 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005803 | 0.220211 | 1,326 | 31 | 114 | 42.774194 | 0.838491 | 0.159879 | 0 | 0 | 0 | 0 | 0.088372 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.2 | 0 | 0.44 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da71b1822bbf0d5369bea52d007ca8f6061551f0 | 128 | py | Python | MT-top-perf-cron.py | AdeTheux/ducksboard | 339c965dcef448713ed521ba066759f6fb43c2b1 | [
"MIT"
] | null | null | null | MT-top-perf-cron.py | AdeTheux/ducksboard | 339c965dcef448713ed521ba066759f6fb43c2b1 | [
"MIT"
] | null | null | null | MT-top-perf-cron.py | AdeTheux/ducksboard | 339c965dcef448713ed521ba066759f6fb43c2b1 | [
"MIT"
] | null | null | null | python /homez.144/arnoz/www/dev/MT_ducksboard_top_perf.py -u EMAIL/token -p TOKEN -d mtservicedesk.zendesk.com -a TOKEN -l 72676 | 128 | 128 | 0.796875 | 24 | 128 | 4.125 | 0.916667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.068376 | 0.085938 | 128 | 1 | 128 | 128 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
da741a60b0b7e242baf0c8917409303691b189e3 | 1,110 | py | Python | APP/__init__.py | jcyongqin/MerryChristmas2016 | f1bfc0f9df33dad474f28bbefa21f320e4ee48e9 | [
"MIT"
] | null | null | null | APP/__init__.py | jcyongqin/MerryChristmas2016 | f1bfc0f9df33dad474f28bbefa21f320e4ee48e9 | [
"MIT"
] | null | null | null | APP/__init__.py | jcyongqin/MerryChristmas2016 | f1bfc0f9df33dad474f28bbefa21f320e4ee48e9 | [
"MIT"
] | null | null | null | print('Merry Christmas!!!')
import sys
#
# int main(int argc, char* argv[]) {
# int n = argc > 1 ? atoi(argv[1]) : 4;
# for (int j = 1; j <= n; j++) {
# int s = 1 << j, k = (1 << n) - s, x;
# for (int y = s - j; y >= 0; y--, putchar('\n')) {
# for (x = 0; x < y + k; x++) printf(" ");
# for (x = 0; x + y < s; x++) printf("%c ", '!' ^ y & x);
# for (x = 1; x + y < s; x++) printf("%c ", '!' ^ y & (s - y - x - 1));
# }
# }
# }
def main(*args):
# """上面的是我尝试尽量用最少代码来画一个抽象一点的圣诞树,因此树干都没有."""
if args.__len__() > 1:
n = args[1]
else:
n = 4
for j in range(n):
s = 1 << j
k = (1 << n) - s
x = 0
for y in range(s - j)[::-1]:
for x in range(y + k):
print(" ", end="")
for x in range(s - y):
print("%s " % chr(ord('!') ^ y & x), end="")
for x in range(1, s - y + 1):
print("%s " % chr(ord('!') ^ y & (s - y - x - 1)), end="")
print("")
if __name__ == "__main__":
main(sys.argv)
| 28.461538 | 83 | 0.351351 | 160 | 1,110 | 2.3625 | 0.23125 | 0.063492 | 0.047619 | 0.087302 | 0.301587 | 0.10582 | 0.10582 | 0.042328 | 0 | 0 | 0 | 0.032308 | 0.414414 | 1,110 | 38 | 84 | 29.210526 | 0.549231 | 0.430631 | 0 | 0 | 0 | 0 | 0.058252 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.047619 | 0 | 0.095238 | 0.238095 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da760162956fd30fc878df9712168c347b1cba4a | 1,013 | py | Python | pyday_night_funkin/enums.py | Square789/PydayNightFunkin | 8d43daec947202566419a2d5ce63cc191b7b8e3c | [
"Apache-2.0"
] | null | null | null | pyday_night_funkin/enums.py | Square789/PydayNightFunkin | 8d43daec947202566419a2d5ce63cc191b7b8e3c | [
"Apache-2.0"
] | 34 | 2021-09-10T01:08:14.000Z | 2022-03-25T18:10:08.000Z | pyday_night_funkin/enums.py | Square789/PydayNightFunkin | 8d43daec947202566419a2d5ce63cc191b7b8e3c | [
"Apache-2.0"
] | null | null | null | """
Enums that aren't really too coupled to anything else.
"""
from enum import IntEnum
class DIFFICULTY(IntEnum):
EASY = 0
NORMAL = 1
HARD = 2
def to_song_json_suffix(self) -> str:
if self is self.EASY:
return "-easy"
elif self is self.NORMAL:
return ""
elif self is self.HARD:
return "-hard"
return ""
def to_atlas_prefix(self) -> str:
if self is self.EASY:
return "EASY"
elif self is self.NORMAL:
return "NORMAL"
elif self is self.HARD:
return "HARD"
return ""
# NOTE: That sucks, but is needed for menu selections etc.
DIFFICULTY_REVERSE_MAP = [DIFFICULTY.EASY, DIFFICULTY.NORMAL, DIFFICULTY.HARD]
class CONTROL(IntEnum):
LEFT = 0
DOWN = 1
UP = 2
RIGHT = 3
ENTER = 4
BACK = 5
DEBUG_DESYNC = 100
DEBUG_WIN = 101
DEBUG_LOSE = 102
class GAME_STATE(IntEnum):
LOADING = 0
COUNTDOWN = 1
PLAYING = 2
ENDED = 3
class ANIMATION_TAG(IntEnum):
IDLE = 0
SING = 1
MISS = 2
SPECIAL = 3
STORY_MENU = 4
STATIC = 5
PRESSED = 6
CONFIRM = 7
GAME_OVER = 8
| 15.828125 | 78 | 0.672261 | 158 | 1,013 | 4.221519 | 0.512658 | 0.053973 | 0.089955 | 0.083958 | 0.278861 | 0.278861 | 0.278861 | 0.278861 | 0.176912 | 0.176912 | 0 | 0.039846 | 0.231984 | 1,013 | 63 | 79 | 16.079365 | 0.817481 | 0.110563 | 0 | 0.191489 | 0 | 0 | 0.026876 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.021277 | 0 | 0.851064 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da7627eec05c00f505b4e3736efa9cd06fb060ee | 946 | py | Python | app/restapi/apiMovieDetail.py | DucVinh2609/mtb_admin | 0f67faabcda7b6a5bd5f30126b46a5367d00f77b | [
"MIT"
] | null | null | null | app/restapi/apiMovieDetail.py | DucVinh2609/mtb_admin | 0f67faabcda7b6a5bd5f30126b46a5367d00f77b | [
"MIT"
] | 4 | 2021-06-08T20:42:38.000Z | 2022-03-12T00:07:41.000Z | app/restapi/apiMovieDetail.py | DucVinh2609/mtb_admin | 0f67faabcda7b6a5bd5f30126b46a5367d00f77b | [
"MIT"
] | null | null | null | # import pymysql
# from app import app
# from flask import jsonify
# from flask import flash, request
# from flask_restful import Resource, Api
# from flaskext.mysql import MySQL
# mysql = MySQL()
# # MySQL configurations
# app.config['MYSQL_DATABASE_USER'] = 'root'
# app.config['MYSQL_DATABASE_PASSWORD'] = ''
# app.config['MYSQL_DATABASE_DB'] = 'mtb_db'
# app.config['MYSQL_DATABASE_HOST'] = 'localhost'
# mysql.init_app(app)
# class apiMovieDetail(Resource):
# def get(self, id):
# conn = mysql.connect()
# cursor = conn.cursor(pymysql.cursors.DictCursor)
# cursor.execute("SELECT id id, name name, movieformat_id movieformat_id, movietype_id movietype_id, duration duration, country_code country_code, start_date start_date, end_date end_date, image image, note note, description description from movies WHERE id=%s", (id,)")
# rows = cursor.fetchall()
# resp = jsonify(rows)
# resp.status_code = 200
# return resp
| 36.384615 | 273 | 0.732558 | 127 | 946 | 5.283465 | 0.456693 | 0.053651 | 0.083458 | 0.131148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003727 | 0.149049 | 946 | 25 | 274 | 37.84 | 0.829814 | 0.948203 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
da774b4615481a7592e47b528fe32fca5d803722 | 56 | py | Python | aioface/storages/fsm/types.py | kirillkuzin/aioface | c19f89f3f0f6ccb95832030444f2ece8fda7de62 | [
"MIT"
] | 1 | 2020-09-12T21:10:54.000Z | 2020-09-12T21:10:54.000Z | aioface/storages/fsm/types.py | kirillkuzin/aioface | c19f89f3f0f6ccb95832030444f2ece8fda7de62 | [
"MIT"
] | null | null | null | aioface/storages/fsm/types.py | kirillkuzin/aioface | c19f89f3f0f6ccb95832030444f2ece8fda7de62 | [
"MIT"
] | null | null | null | import typing
State = typing.NewType('State', object)
| 11.2 | 39 | 0.732143 | 7 | 56 | 5.857143 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 56 | 4 | 40 | 14 | 0.854167 | 0 | 0 | 0 | 0 | 0 | 0.089286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 3 |
da776d50619a771e6d0a4ec84979dc6f33204a34 | 291 | py | Python | wqxweblib/__main__.py | FlippingBinary/wqxweblib | 129ac6d010f5fb726fe29dc9494f90f19a7ec4c0 | [
"MIT"
] | null | null | null | wqxweblib/__main__.py | FlippingBinary/wqxweblib | 129ac6d010f5fb726fe29dc9494f90f19a7ec4c0 | [
"MIT"
] | null | null | null | wqxweblib/__main__.py | FlippingBinary/wqxweblib | 129ac6d010f5fb726fe29dc9494f90f19a7ec4c0 | [
"MIT"
] | null | null | null | import sys
def main(argv:list):
print('This module does not yet support direct execution. It should be used as a library.')
print('More information is available at https://github.com/FlippingBinary/wqxweblib-python')
return 0
if __name__ == "__main__":
main(sys.argv[1:])
| 29.1 | 95 | 0.714777 | 43 | 291 | 4.651163 | 0.883721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008333 | 0.175258 | 291 | 9 | 96 | 32.333333 | 0.825 | 0 | 0 | 0 | 0 | 0 | 0.613475 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.428571 | 0.285714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
da78a73d88c09570047fcaf5e2a501ef100b4dc0 | 28,780 | py | Python | tools/check_cluster.py | jmatuskey/jupyterhub-deploy | 6669bb0fa8e6da52f74d4ca015cea9dc96105a34 | [
"Unlicense"
] | 1 | 2021-06-02T18:35:05.000Z | 2021-06-02T18:35:05.000Z | tools/check_cluster.py | jmatuskey/jupyterhub-deploy | 6669bb0fa8e6da52f74d4ca015cea9dc96105a34 | [
"Unlicense"
] | 64 | 2020-05-11T12:35:26.000Z | 2022-03-28T16:03:37.000Z | tools/check_cluster.py | jmatuskey/jupyterhub-deploy | 6669bb0fa8e6da52f74d4ca015cea9dc96105a34 | [
"Unlicense"
] | 11 | 2020-04-07T13:32:07.000Z | 2022-02-07T19:16:24.000Z | #! /usr/bin/env python
"""Check properties of Terraformed resources and/or JupyterHub to verify good deployment.
ignore the hub since it may not be delpoyed on the cluster yet.
check creation date
check for global hammer
"""
import sys
import os
import subprocess
import argparse
import re
import json
from collections import defaultdict
import builtins
import functools
import traceback
import yaml
CLUSTER_CHECKS = """
Globals:
environment:
- DEPLOYMENT_NAME
- ENVIRONMENT
- JH_HOSTNAME
- ADMIN_ARN
- ACCOUNT_ID
constants:
V_K8S: "1.21"
MAX_NODE_AGE: 10d
MAX_EFS_FILE_SYSTEM_SIZE: 50000000000000
CORE_NODES: 3
NOTEBOOK_EC2_TYPE: r5.xlarge
MAX_RESTARTS: 0
LOG_REACH: 30m
Groups:
- group: Kubernetes Pods
command: kubectl get pods -A
parser: named_columns
assertions:
- name: All pods
all: STATUS=='Running' and int(RESTARTS)<=MAX_RESTARTS
- name: EFS provisioner
ok_rows==1: NAMESPACE=='support' and 'efs-provisioner' in NAME
- name: Kube Proxy
ok_rows>=4: NAMESPACE=='kube-system' and 'kube-proxy' in NAME
- name: Autoscaler
ok_rows==1: NAMESPACE=='kube-system' and 'cluster-autoscaler' in NAME
- name: AWS Pods
ok_rows>=4: NAMESPACE=='kube-system' and 'aws-node' in NAME
- name: Core DNS
ok_rows==2: NAMESPACE=='kube-system' and 'coredns' in NAME
- group: JupyterHub Pods
command: kubectl get pods -A
parser: named_columns
assertions:
- name: Image puller
ok_rows>=1: NAMESPACE=='default' and 'continuous-image-puller' in NAME
- name: Hub
ok_rows==1: NAMESPACE=='default' and 'hub' in NAME
- name: Proxy
ok_rows>=1: NAMESPACE=='default' and 'proxy' in NAME
- name: User-scheduler
ok_rows==2: NAMESPACE=='default' and 'user-scheduler' in NAME
- name: User-placeholder
ok_rows>=1: NAMESPACE=='default' and 'user-placeholder' in NAME
- group: JupyterHub Nodes
command: kubectl get nodes -A --show-labels=true
parser: named_columns
assertions:
- name: At least 4 STATUS Ready new Hub AMI ID
ok_rows>=4: STATUS=="Ready" # and HUB_AMI_ID in LABELS
- name: All Nodes Ready Status
all: STATUS=="Ready" or STATUS=="Ready,SchedulingDisabled"
- name: Kubernetes Version
all: V_K8S in VERSION
- name: Node Age
all: convert_age(AGE) < convert_age(MAX_NODE_AGE)
- name: Core us-east-1a
ok_rows==1: "DEPLOYMENT_NAME+'-core' in LABELS and 't3.small' in LABELS and 'zone=us-east-1a' in LABELS"
- name: Core us-east-1b
ok_rows==1: "DEPLOYMENT_NAME+'-core' in LABELS and 't3.small' in LABELS and 'zone=us-east-1b' in LABELS"
- name: Core us-east-1c
ok_rows==1: "DEPLOYMENT_NAME+'-core' in LABELS and 't3.small' in LABELS and 'zone=us-east-1c' in LABELS"
- name: Notebook nodes
ok_rows>=1: "DEPLOYMENT_NAME+'-notebook' in LABELS and NOTEBOOK_EC2_TYPE in LABELS and 'region=us-east-1' in LABELS"
- group: EKS Services
command: kubectl get services -A
parser: named_columns
assertions:
- name: Datadog Cluster Agent Service
ok_rows==1: NAMESPACE=='datadog' and NAME=='datadog-cluster-agent' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='5005/TCP'
- name: Datadog Kube State Metrics Service
ok_rows==1: NAMESPACE=='datadog' and NAME=='datadog-kube-state-metrics' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='8080/TCP'
- name: Hub Service
ok_rows==1: NAMESPACE=='default' and NAME=='hub' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='8081/TCP'
- name: Kubernetes Service
ok_rows==1: NAMESPACE=='default' and NAME=='kubernetes' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='443/TCP'
- name: Proxy API Service
ok_rows==1: NAMESPACE=='default' and NAME=='proxy-api' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='8001/TCP'
- name: Proxy Public Service
ok_rows==1: NAMESPACE=='default' and NAME=='proxy-public' and TYPE=='LoadBalancer' and '.elb.amazonaws.com' in _['EXTERNAL-IP'] and '443:' in _['PORT(S)'] and '80:' in _['PORT(S)'] and 'TCP' in _['PORT(S)'] and 'UDP' not in _['PORT(S)']
- name: Cluster Autoscaler Service
ok_rows==1: NAMESPACE=='kube-system' and NAME=='cluster-autoscaler-aws-cluster-autoscaler' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='8085/TCP'
- name: Kube DNS Service
ok_rows==1: NAMESPACE=='kube-system' and NAME=='kube-dns' and TYPE=='ClusterIP' and _['EXTERNAL-IP']=='<none>' and _['PORT(S)']=='53/UDP,53/TCP'
- group: EKS Deployments
command: kubectl get deployments -A
parser: named_columns
assertions:
- name: Hub Deployment
ok_rows==1: NAMESPACE=='default' and NAME=='hub' and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: Proxy Deployment
ok_rows==1: NAMESPACE=='default' and NAME=='proxy' and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: User Scheduler Deployment
ok_rows==1: NAMESPACE=='default' and NAME=='user-scheduler' and READY=='2/2' and _['UP-TO-DATE']=='2' and AVAILABLE=='2'
- name: Cluster Autoscaler Deployment
ok_rows==1: NAMESPACE=='kube-system' and 'cluster-autoscaler' in NAME and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: Core DNS Deployment
ok_rows==1: NAMESPACE=='kube-system' and 'coredns' in NAME and READY=='2/2' and _['UP-TO-DATE']=='2' and AVAILABLE=='2'
- name: EFS Provisioner Deployment
ok_rows==1: NAMESPACE=='support' and 'efs-provisioner' in NAME and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: Datadog Cluster Agent Deployment
ok_rows==1: NAMESPACE=='datadog' and 'datadog-cluster-agent' in NAME and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- name: Datadog Kube Metrics Deployment
ok_rows==1: NAMESPACE=='datadog' and 'datadog-kube-state-metrics' in NAME and READY=='1/1' and _['UP-TO-DATE']=='1' and AVAILABLE=='1'
- group: Route-53 Host
command: "host {JH_HOSTNAME}"
parser: raw
assertions:
- name: DNS Mapping
simple: "f'{JH_HOSTNAME} is an alias for' in _"
- group: JupyterHub Index Page
command: "wget --no-check-certificate -O- {JH_HOSTNAME}"
parser: raw
assertions:
- name: Server Index Page
simple: "'HTTP request sent, awaiting response... 200 OK' in _"
- group: EFS File Systems
command: awsudo {ADMIN_ARN} aws efs describe-file-systems --output yaml --query FileSystems
parser: yaml
assertions:
- name: EFS Home Dirs
ok_rows==1: Name==DEPLOYMENT_NAME+'-home-dirs' and LifeCycleState=='available' and Encrypted==True and NumberOfMountTargets==3 and OwnerId==ACCOUNT_ID and aws_kv_dict(Tags)['stsci-backup']=='dmd-2w-sat'
- name: EFS Max Size
all: int(SizeInBytes['Value']) < MAX_EFS_FILE_SYSTEM_SIZE
- group: Daemonsets named rows
command: kubectl get daemonsets -A
parser: named_rows
assertions:
- name: datadog - proxy - aws-nodes READY
simple: _['datadog']['READY'] == _['kube-proxy']['READY'] == _['aws-node']['READY']
- name: datadog - proxy - aws-nodes DESIRED
simple: _['datadog']['DESIRED'] == _['kube-proxy']['DESIRED'] == _['aws-node']['DESIRED']
- name: datadog - proxy - aws-nodes CURRENT
simple: _['datadog']['CURRENT'] == _['kube-proxy']['CURRENT'] == _['aws-node']['CURRENT']
- name: datadog - proxy - aws-nodes UP-TO-DATE
simple: _['datadog']['UP-TO-DATE'] == _['kube-proxy']['UP-TO-DATE'] == _['aws-node']['UP-TO-DATE']
- name: datadog - proxy - aws-nodes AVAILABLE
simple: _['datadog']['AVAILABLE'] == _['kube-proxy']['AVAILABLE'] == _['aws-node']['AVAILABLE']
- name: continuous image puller notebook nodes only
simple: int(_['continuous-image-puller']['READY']) == int(_['aws-node']['READY']) - CORE_NODES
- group: Daemonsets named columns
command: kubectl get daemonsets -A
parser: named_columns
assertions:
- name: continuous-image-puller
ok_rows==1: NAMESPACE=='default' and NAME=='continuous-image-puller'
- name: datadog
ok_rows==1: NAMESPACE=='datadog' and NAME=='datadog'
- name: kube-proxy
ok_rows==1: NAMESPACE=='kube-system' and NAME=='kube-proxy'
- name:
ok_rows==1: NAMESPACE=='kube-system' and NAME=='aws-node'
- name: matching daemonset states
all: READY==DESIRED==CURRENT==AVAILABLE==_['UP-TO-DATE']
- group: EKS AMI Rotation
command: awsudo {ADMIN_ARN} aws eks list-nodegroups --cluster-name {DEPLOYMENT_NAME} --query nodegroups --output text
parser: raw
assertions:
- name: Only rotated nodegroup names
simple: "functools.reduce(lambda a, b: a and b, [x.count('-')!=1 for x in _.split()])"
- group: Log Error Check
function: pod_logs(LOG_REACH)
parser: yaml
assertions:
- name: No errors in logs
simple: ERRORS==0
- group: Pod to Node Map
command: kubectl get pods -A -o wide
replace_output:
input: NOMINATED NODE
output: NOMINATED_NODE
parser: node_map
print_parsing: true
""" # noqa: E501
def convert_age(age_str):
"""Convert k8s abbreviated-style datetime str e.g. 14d2h to an integer."""
# age_str_org = age_str
def age_subst(age_str, letter, factor):
parts = age_str.split(letter)
if len(parts) == 2:
age_str = parts[0] + "*" + factor + "+" + parts[1]
return age_str
age_str = age_subst(age_str, "d", "60*60*24")
age_str = age_subst(age_str, "h", "60*60")
age_str = age_subst(age_str, "m", "60")
age_str = age_subst(age_str, "s", "1")
age_str = age_str[:-1]
# print(
# f"convert_age({repr(age_str_org)}) --> {repr(age_str)} --> {eval(age_str)}" # nosec
# ) # nosec
return eval(age_str) # nosec
def aws_kv_dict(key_value_dict_list):
"""Convert AWS dict representation [{ 'Key':k, 'Value':v}, ...] to a Python dict."""
return {item["Key"]: item["Value"] for item in key_value_dict_list}
def run(cmd, cwd=".", timeout=10):
"""Run subprocess `cmd` in dir `cwd` failing if not completed within `timeout` seconds
of if `cmd` returns a non-zero exit status.
Returns both stdout+stderr from `cmd`. (untested, verify manually if in doubt)
"""
print(cmd)
result = subprocess.run(
cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
text=True,
check=True,
cwd=cwd,
timeout=timeout,
) # maybe succeeds
return result.stdout
def parse_node_map(output):
namespaces = parse_named_columns(output)
node_map = defaultdict(list)
for namespace in namespaces:
node_map[namespace["NODE"]].append(
namespace["NAMESPACE"] + ":" + namespace["NAME"]
)
output = ["Mapping from Node to Pod", "-" * 80, yaml.dump(dict(node_map))]
return "\n".join(output)
def parse_named_columns(output):
"""Return rows from a table string `output` as a sequence of dicts.
The first row should contain whitespace delimited column names.
Each subsequent row should contain whitespace delimited column values.
Given tabular `output` as found in many k8s commands:
col1_name col2_name ...
col1_row1_val col2_row1_val ...
col1_row2_val col1_row2_val ...
...
Returns [ {col1_name: col1_row1_val, col2_name: col2_row1_val, ...},
{col1_name: col1_row2_val, col2_name: col2_row2_val, ...},
... ]
Each dict in the returned sequence is suitable as a namespace for eval()
"""
lines = output.splitlines()
columns = lines[0].split()
rows = []
for line in lines[1:]:
d = dict(zip(columns, line.split()))
d["_"] = d
rows.append(d)
return rows
def parse_named_rows(output, key="NAME"):
return {"_": {row[key]: row for row in parse_named_columns(output)}}
def parse_raw(output):
"""Just return `output` as a single string assigned to dict key '_'
for reference in assertion expressions.
Returns {'_': output}
"""
return dict(_=output)
def parse_yaml(output):
"""Return the YAML parsing of `output` string. aws commands can
be filtered using the --query parameter to produce more manageable
output before YAML parsing.
"""
return yaml.safe_load(output)
def parse_json(output):
"""Return the JSON parsing of `output` string. aws commands can
be filtered using the --query parameter to produce more manageable
output before JSON parsing.
"""
return json.loads(output)
def parse_none(output):
"""Return the input as the output, i.e. no changes."""
return output
def test_function(parameters):
return yaml.dump(parameters)
class Checker:
"""The Checker class runs a number of tests defined in a `test_spec` string.
Commands
--------
Each Group includes a subprocess CLI command from which the output is captured,
parsed, and checked against various assertions.
Output Parsing
--------------
The command output is parsed using a parser which can be be one of
named_rows, raw, yaml, or json.
named_rows is ideal for parsing kubectl output in which each row
defines a set of variables as a dict. named_rows requires that
column names and values do not contain spaces; generally it is not
a problem but not all kubectl output modes work.
raw simply returns { "_": cmd_output } so _ is used as a variable
in assertions to refer to the entire output string.
yaml and json return parsed command output using their respective
loaders. The --query parameter of the 'aws' commands can be
useful for pre-filtering command output so that a simple direct
parsing is usable in assertions.
Test Assertions
---------------
A series of assertions are evaluated on the parsed output from each group's command.
Assertions take the form:
simple: <python expression using parsed outputs to define variables, eval must pass.>
ok_rows_expr: <python expression using parsed outputs to define row variables, ok_rows_expr must be True.>
all: <python expression using parsed outputs to define row variables, each row must pass.>
Examples of ok_rows expressions might be:
ok_rows==1
ok_rows>=3
Pseudo code for 'all' is:
ok_rows==len(total output rows)
ok_rows is assigned the number of times the assertion evaluates to True when run
against each of the row namespace dicts. Hence overall test success does not
require every row to pass the assertion.
The `test_spec` specifies a string of YAML which defines:
Globals:
environment:
- env var1 needed in assertion expressions imported from os.environ
...
constants:
- VAR: VAL a VAR needed in assertion expressions with the spec'd VAL
...
Groups:
- group: <Command Group Name>
command: <UNIX subprocess command string>
parser: <named_rows|raw|yaml|json>
assertions:
- name: <Name defining check>
<simple|all|ok_rows_expr>: <python expression>
- name: <Name defining check>
<simple|all|ok_rows_expr>: <python expression>
...
...
NOTE: In the spec, substitions for output vars, env vars, constants,
variables, and built-in functions occur in two basic ways:
- Using Python's f-string {} formatting. (commands)
- Treated as a variable name to be eval'ed. (assertions)
This is because commands are "".format()'ed but assertions are eval'ed,
each against similar namespaces with the caveat that the command formatting
includes no variables derived from it's own output.
if `output_file` is specified, commands are run and outputs are
stored at the spec'ed path, the checker exits w/o running tests.
if `input_file` is specified, it is presumed to be the path to command
output YAML stored by `output_file` and replaces running commands,
checks are run using the stored outputs.
input_file and output_file are mutually exclusive.
if `verbose` is specified then additional assertion-by-assertion,
row-by-row output is generated.
if `groups_regex` is specified, only the group names which can be
searched by the regex are checked. (case insensitive substrings
of group names work).
if `variables` is specified, it should be a comma seperated string
of VAR=VAL pairs, i.e. VAR1=VAL1,VAR2=VAL2,...
These variables are added to the namespace used for running/eval'ing
commands and assertions and override values already defined in Globals.
""" # noqa: E501
def __init__(
self,
test_spec=CLUSTER_CHECKS,
output_file=None,
input_file=None,
verbose=False,
groups_regex=".+",
exclude_regex="^$",
variables=None,
):
self._output_file = output_file
self._input_file = input_file
self._verbose = verbose
self._groups_regex = groups_regex
self._exclude_regex = exclude_regex
print("===> Loading test spec")
self.loaded_spec = yaml.safe_load(test_spec)
self.variables = (
dict([var.split("=") for var in variables.split(",")]) if variables else []
)
self._outputs = {}
self._errors = 0
self._error_msgs = []
@property
def groups(self):
return self.loaded_spec["Groups"]
@property
def spec_environment(self):
return {
var: os.environ[var]
for var in self.loaded_spec.get("Globals", {}).get("environment", [])
}
@property
def spec_constants(self):
return self.loaded_spec.get("Globals", {}).get("constants", {})
@property
def builtins(self):
result = {
key: getattr(builtins, key) for key in dir(builtins)
} # Python builtins
result.update(
dict(
convert_age=convert_age,
aws_kv_dict=aws_kv_dict,
test_function=test_function,
functools=functools,
pod_logs=self.pod_logs,
)
)
return result
@property
def combined_environment(self):
env = dict()
env.update(self.builtins)
env.update(self.spec_constants)
env.update(self.spec_environment)
env.update(self.variables)
return env
def main(self):
self.setup_outputs()
for check in self.groups:
if re.search(
self._groups_regex, check["group"], re.IGNORECASE
) and not re.search(self._exclude_regex, check["group"], re.IGNORECASE):
self.run_check(check)
if self._output_file:
self.store_outputs()
return self._errors
def setup_outputs(self):
"""Fetch saved commands ouputs from file rather than running commands."""
if self._input_file:
with open(self._input_file) as file:
self._outputs = yaml.safe_load(file)
else:
self._outputs = {}
def store_outputs(self):
"""Store command outputs to file for running offline later."""
print("=" * 80)
print("Saving", repr(self._output_file))
with open(self._output_file, "w+") as file:
yaml.dump(self._outputs, file)
def replace_output(self, check, output):
if check.get("replace_output"):
input_patt = check.get("replace_output").get("input")
output_patt = check.get("replace_output").get("output")
output = re.sub(input_patt, output_patt, output, flags=re.MULTILINE)
return output
def run_check(self, check):
print("=" * 80)
try:
output = self.get_command_output(check)
except Exception as exc:
self.error(
"Failed obtaining command output for group",
repr(check.get("group")),
":",
str(exc),
)
print("=" * 80)
return
if self._output_file:
return
if not output.startswith("FAILED"):
print("-" * 80)
print(output)
print("=" * 80)
self.process_output(check, output)
def process_output(self, check, output):
try:
output = self.replace_output(check, output)
parser = globals()[f"parse_{check['parser']}"]
namespaces = parser(output)
except Exception as exc:
self.error("PARSER failed for", repr(check["group"]), ":", str(exc))
return
if check.get("print_parsing"):
print(namespaces)
for assertion in check.get("assertions", []):
try:
self.check_assertion(check["group"], assertion, namespaces)
except Exception as exc:
self.error(
"EXECUTION failed for",
repr(check["group"]),
":",
repr(assertion["name"]),
":",
str(exc),
)
def get_command_output(self, check):
group = check["group"]
if not self._input_file:
self._outputs[group] = self.compute_outputs(group, check)
return self._outputs[group]
def compute_outputs(self, group, check):
if check.get("command"):
command = check.get("command").format(**self.combined_environment)
elif check.get("function"):
command = check.get("function").format(**self.combined_environment)
else:
raise RuntimeError(f"Group {group} doesn't define an input command.")
print("===> Fetching", repr(group))
print("=" * 80)
try:
if check.get("command"):
outputs = run(command).strip()
else:
outputs = eval( # nosec
command, self.combined_environment, self.combined_environment
)
except Exception as exc:
traceback.print_exc()
outputs = f"FAILED for '{group}': '{command}' : '{str(exc)}'"
self.error(outputs)
return outputs
def check_assertion(self, group_name, assertion, namespaces):
assertion = dict(assertion)
assertion_name = assertion.pop("name")
requirement, condition = list(assertion.items())[0]
# condition = condition.format(**self.combined_environment)
print(f"Checking assertion '{assertion_name}': {requirement} : {condition}")
if requirement == "simple":
self.verify_simple(group_name, assertion_name, namespaces, condition)
elif requirement.startswith(("ok_rows", "all")):
self.verify_rows(
group_name, assertion_name, namespaces, requirement, condition
)
else:
raise ValueError(
f"Unhandled requirement: {requirement} for assertion: {assertion}"
)
print()
def verify_rows(self, group_name, name, namespaces, requirement, condition):
rows = []
for i, namespace in enumerate(namespaces):
self.verbose(f"Checking '{name}' #{i} : {condition} ... ", end="")
if self.eval_condition(namespace, condition):
rows.append(namespace)
self.verbose("OK")
else:
self.verbose("FAILED on row:", namespace)
if requirement == "all":
requirement = f"ok_rows=={len(namespaces)}"
if self.eval_condition(dict(ok_rows=len(rows)), requirement): # nosec
print(f"===> OK '{group_name}' : '{name}'")
else:
self.error(f"FAILED '{group_name}' : '{name}' : {condition}")
def verify_simple(self, group_name, name, namespace, condition):
if self.eval_condition(namespace, condition):
print(f"===> OK '{group_name}' : '{name}'")
else:
self.error(f"FAILED '{group_name}' : '{name}' : {condition}")
self.verbose("Namespace:", namespace)
def eval_condition(self, namespace, condition):
namespace = dict(namespace) # local no-side-effects copy
namespace.update(self.combined_environment)
return eval(condition, {}, namespace) # nosec
def verbose(self, *args, **keys):
if self._verbose:
print(*args, **keys)
def error(self, *args):
self._errors += 1
self._error_msgs.append(" ".join(str(arg) for arg in args))
print("===> ERROR: ", *args)
def show_error_status(self):
print("=" * 80)
print("Overall", self._errors, "errors occurred:")
for msg in self._error_msgs:
print(msg)
def pod_logs(self, log_reach="30m"):
loaded = yaml.safe_load(run("kubectl get pods -A --output yaml"))
pods = [
(pod["metadata"]["namespace"], pod["metadata"]["name"])
for pod in loaded["items"]
]
print("=" * 80)
print("Fetching", len(loaded["items"]), "pod logs")
pod_errors = dict()
for i, (namespace, name) in enumerate(pods):
pod = f"{namespace}:{name}"
print()
output = run(
f"kubectl logs -n {namespace} {name} --since {log_reach} --all-containers --timestamps=True"
)
for line in output.splitlines():
if "error" in line.lower() and "| INFO |" not in line:
self.error(f"FAILED Pod {pod} log:", line)
if pod not in pod_errors:
pod_errors[pod] = []
pod_errors[pod].append(line)
print()
print("-" * 80)
return yaml.dump(
{
"ERRORS": len(pod_errors),
"FAILING_PODS": sorted(list(pod_errors.keys())),
"POD_ERRORS": pod_errors,
}
)
def parse_args():
parser = argparse.ArgumentParser(
description="Perform various cluster and hub checks to automatically detect basic anomalies."
)
parser.add_argument(
"--test-spec",
dest="test_spec",
action="store",
default=None,
help="Custom test specification. Defaults to None meaning use built-in spec.",
)
parser.add_argument(
"--output-file",
dest="output_file",
action="store",
default=None,
help="Filepath to store outputs of test commands.",
)
parser.add_argument(
"--input-file",
dest="input_file",
action="store",
default=None,
help="Filepath to load previously stored test command results.",
)
parser.add_argument(
"--verbose",
dest="verbose",
action="store_true",
help="Include additional output.",
)
parser.add_argument(
"--groups-regex",
dest="groups_regex",
action="store",
default=".+",
help="Select groups to execute based on the specified regex, defaulting to all groups."
" Unique group substrings are valid, |-or patterns together. Case is irrelevant.",
)
parser.add_argument(
"--exclude-regex",
dest="exclude_regex",
action="store",
default="^$",
help="Select groups to skip based on the specified regex, defaulting to no groups."
" Unique group substrings are valid, |-or patterns together. Case is irrelevant.",
)
parser.add_argument(
"--variables",
dest="variables",
action="store",
default=None,
help="Custom override variables which can be used in commands, assertions, etc."
" --variables var1=val1,var2=val2,...",
)
return parser.parse_args()
def main():
"""Parse command line arguments and run the test spec.
Return the number of failing tests or 0 if all tests pass.
"""
args = parse_args()
test_spec = (
open(args.test_spec).read().strip() if args.test_spec else CLUSTER_CHECKS
)
checker = Checker(
test_spec=test_spec,
output_file=args.output_file,
input_file=args.input_file,
verbose=args.verbose,
groups_regex=args.groups_regex,
exclude_regex=args.exclude_regex,
variables=args.variables,
)
errors = checker.main()
checker.show_error_status()
return errors
if __name__ == "__main__":
sys.exit(main())
| 37.087629 | 243 | 0.618277 | 3,636 | 28,780 | 4.770627 | 0.162816 | 0.016603 | 0.012914 | 0.023982 | 0.2581 | 0.22276 | 0.18056 | 0.160613 | 0.127176 | 0.100254 | 0 | 0.011195 | 0.2582 | 28,780 | 775 | 244 | 37.135484 | 0.801302 | 0.202571 | 0 | 0.18705 | 0 | 0.053957 | 0.486507 | 0.083808 | 0 | 0 | 0 | 0 | 0.043165 | 1 | 0.064748 | false | 0 | 0.019784 | 0.008993 | 0.136691 | 0.052158 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da78b0227ad76c6a1e8ba2489ed9c76d00da8725 | 791 | py | Python | tests/in/test_application.py | evereux/catia_python | 08948585899b12587b0415ce3c9191a408b34897 | [
"MIT"
] | 90 | 2019-02-21T10:05:28.000Z | 2022-03-19T01:53:41.000Z | tests/in/test_application.py | Luanee/pycatia | ea5eef8178f73de12404561c00baf7a7ca30da59 | [
"MIT"
] | 99 | 2019-05-21T08:29:12.000Z | 2022-03-25T09:55:15.000Z | tests/in/test_application.py | Luanee/pycatia | ea5eef8178f73de12404561c00baf7a7ca30da59 | [
"MIT"
] | 26 | 2019-04-04T06:31:36.000Z | 2022-03-30T07:24:47.000Z | #! /usr/bin/python3.6
from pycatia import catia
from tests.source_files import cat_part_measurable
def test_application():
caa = catia()
assert 'Application(name="CNEXT")' in caa.__repr__()
def test_refresh():
caa = catia()
documents = caa.documents
documents.open(cat_part_measurable)
document = caa.active_document
caa.refresh_display = False
assert caa.refresh_display is False
caa.refresh_display = True
assert caa.refresh_display is True
document.close()
def test_visible():
caa = catia()
documents = caa.documents
documents.open(cat_part_measurable)
document = caa.active_document
caa.visible = False
assert caa.visible is False
caa.visible = True
assert caa.visible is True
document.close()
| 19.775 | 56 | 0.705436 | 102 | 791 | 5.27451 | 0.333333 | 0.081784 | 0.126394 | 0.074349 | 0.416357 | 0.32342 | 0.32342 | 0.32342 | 0.32342 | 0.32342 | 0 | 0.0032 | 0.209861 | 791 | 39 | 57 | 20.282051 | 0.8576 | 0.025284 | 0 | 0.44 | 0 | 0 | 0.032468 | 0.032468 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.12 | false | 0 | 0.08 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da7992586d3c2316d0ce8cb23cf3e01e30ae505b | 4,632 | py | Python | test/util.py | CarysT/xar | f476c05dec373fcdcd0e884d5a0201501555edb9 | [
"BSD-2-Clause"
] | null | null | null | test/util.py | CarysT/xar | f476c05dec373fcdcd0e884d5a0201501555edb9 | [
"BSD-2-Clause"
] | null | null | null | test/util.py | CarysT/xar | f476c05dec373fcdcd0e884d5a0201501555edb9 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
import contextlib
import hashlib
import os
import os.path
import shutil
import stat
import subprocess
import sys
import xattr
class TestCaseSkipError(Exception):
pass
def skip_if_no_compression_support(type):
"""
Raises TestCaseSkipError if the type is "lzma" and the test is running on
darwin (OS X). In the future, we should add a hidden debugging flag to xar
to determine valid compression types. This will skip incorrectly if a
custom xar is used on OS X, or if a custom xar on another platform is
built without bzip2 or lzma.
"""
if sys.platform == "darwin" and type == "lzma":
raise TestCaseSkipError("{t} support not compiled in".format(t=type))
@contextlib.contextmanager
def directory_created(directory_path):
"""
Creates the named directory and provides the path to the directory to the
calling code. Automatically removes the directory when finished.
Usage:
with directory_created("foobar") as path:
do_stuff_with_path
"""
os.mkdir(directory_path)
try:
yield os.path.realpath(directory_path)
finally:
if os.path.exists(directory_path):
shutil.rmtree(directory_path)
@contextlib.contextmanager
def archive_created(archive_path, content_path, *extra_args, **extra_kwargs):
"""
Creates a named xar archive of the specified content path, returning the
path to the archive. Automatically removes the archive when finished.
Usage:
with archive_created("/bin", "bin.xar") as path:
do_stuff_with(path)
"""
cmd = ["xar", "-c", "-f", archive_path, content_path]
if extra_args:
cmd += list(extra_args)
try:
subprocess.check_call(cmd, **extra_kwargs)
assert os.path.exists(archive_path), "failed to create archive \"{p}\" but xar did not report an error".format(p=archive_path)
yield os.path.realpath(archive_path)
finally:
if os.path.exists(archive_path):
os.unlink(archive_path)
HASH_CHUNK_SIZE = 32768
def _md5_path(path):
with open(path, "r") as f:
h = hashlib.md5()
while True:
last = f.read(HASH_CHUNK_SIZE)
if not last:
break
h.update(last)
return h.digest()
def assert_identical_directories(path1, path2):
"""
Verifies two directories have identical contents. Checks file type (via
the high byte of the mode), size, atime, and mtime, but does not check
other attributes like uid and gid, since they can be expected to change.
"""
seen = set([])
for file1 in os.listdir(path1):
seen.add(file1)
entry1 = os.path.join(path1, file1)
entry2 = os.path.join(path2, file1)
assert os.path.exists(entry2), "\"{f1}\" exists in \"{p1}\" but not \"{p2}\"".format(f1=file1, p1=path1, p2=path2)
# Extended attributes
xattr1 = xattr.xattr(entry1)
xattr2 = xattr.xattr(entry2)
assert set(xattr1.list()) == set(xattr2.list()), "list of extended attributes on \"{f1}\" ({l1}) differs from \"{f2}\" ({l2})".format(f1=entry1, l1=xattr1.list(), f2=entry2, l2=xattr2.list())
for attribute in xattr1.list():
assert xattr1.get(attribute) == xattr2.get(attribute), "extended attribute \"{a1}\" on \"{f1}\" doesn't match value from \"{f2}\"".format(a1=attribute, f1=entry1, f2=entry2)
# Why do it this way? We want to lstat() instead of stat(), so we can't use os.path.isdir() and friends
stat1 = os.lstat(entry1)
stat2 = os.lstat(entry2)
# Modes
mode1 = stat1.st_mode
mode2 = stat2.st_mode
if stat.S_ISREG(mode1):
assert stat.S_ISREG(mode2)
if stat.S_ISDIR(mode1):
assert stat.S_ISDIR(mode2)
if stat.S_ISLNK(mode1):
assert stat.S_ISLNK(mode2)
if stat.S_ISCHR(mode1):
assert stat.S_ISCHR(mode2)
if stat.S_ISBLK(mode1):
assert stat.S_ISBLK(mode2)
if stat.S_ISFIFO(mode1):
assert stat.S_ISFIFO(mode2)
if stat.S_ISSOCK(mode1):
assert stat.S_ISSOCK(mode2)
# Sizes and the like
assert stat1.st_size == stat2.st_size, "size mismatch for \"{e1}\" ({s1}) and \"{e2}\" ({s2})".format(e1=entry1, s1=stat1.st_size, e2=entry2, s2=stat2.st_size)
assert stat1.st_mtime == stat2.st_mtime, "mtime mismatch for \"{e1}\" and \"{e2}\"".format(e1=entry1, e2=entry2)
assert _md5_path(entry1) == _md5_path(entry2), "md5 hash mismatch for \"{e1}\" and \"{e2}\"".format(e1=entry1, e2=entry2)
if os.path.isdir(entry1):
assert_identical_directories(entry1, entry2)
for file2 in os.listdir(path2):
assert file2 in seen, "\"{f2}\" exists in \"{p2}\" but not \"{p1}\"".format(f2=file2, p1=path1, p2=path2)
def touch(path):
if not os.path.exists(path):
with open(path, "w"):
pass
os.utime(path, None)
@contextlib.contextmanager
def chdir(*args, **kwargs):
cwd = os.getcwd()
os.chdir(*args, **kwargs)
try:
yield os.getcwd()
finally:
os.chdir(cwd)
| 31.726027 | 193 | 0.708765 | 721 | 4,632 | 4.457698 | 0.305132 | 0.02178 | 0.015246 | 0.034848 | 0.064095 | 0.053516 | 0.024891 | 0.024891 | 0.024891 | 0.024891 | 0 | 0.032711 | 0.155225 | 4,632 | 145 | 194 | 31.944828 | 0.788653 | 0.247193 | 0 | 0.117021 | 0 | 0 | 0.1039 | 0 | 0 | 0 | 0 | 0 | 0.180851 | 1 | 0.074468 | false | 0.021277 | 0.095745 | 0 | 0.191489 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da79b4fcd76b875d0455312cd540c29c3adde2c1 | 15,083 | py | Python | run_game_with_python_arcade.py | LiorAvrahami/fishy-game | e13d71ad04625edffc1ff32f56c918166f6b0bb9 | [
"MIT"
] | 5 | 2021-04-24T18:13:36.000Z | 2021-08-31T13:54:55.000Z | run_game_with_python_arcade.py | LiorAvrahami/fishy-game | e13d71ad04625edffc1ff32f56c918166f6b0bb9 | [
"MIT"
] | null | null | null | run_game_with_python_arcade.py | LiorAvrahami/fishy-game | e13d71ad04625edffc1ff32f56c918166f6b0bb9 | [
"MIT"
] | null | null | null | import arcade
import arcade.gui
from modifications_to_python_arcade.gui_manager import ModifiedUIManager
from modifications_to_python_arcade.resizeable_window import ResizeableWindow
from arcade.gui.ui_style import UIStyle
import fish
from controls import PlayerControlsObject
from fish_generator import RandomFishGenerator,WaveFishGenerator,FishGenerator
import time
import pickle
import os
from game_sprite_buttons import RestartGameButton,ContinueGameButton,YouWinPoster,ViewHighScoresButton,YouLosePoster
import resources
GL_NEAREST = 9728 # open_gl scaling filter key for nearest neighbor
from game_sprite_buttons import TextureButton
SCREEN_TITLE = "Eat or Be eaten"
import resources
from game_constents import min_computer_fish_size,max_computer_fish_size,min_computer_fish_speed,max_computer_fish_speed,player_win_size,player_start_size
all_deltatimes = []
num_of_high_scores = 5
screen_size:list
main_game_view:arcade.View
game:ResizeableWindow
class MainGameView(arcade.View):
"""
Main application class.
"""
fish_sprites: arcade.SpriteList
ui_manager : ModifiedUIManager
player_fish: fish.PlayerFish
paused:bool
# buttons def
restart_button_game_lost:RestartGameButton
continue_button_paused:ContinueGameButton
continue_button_game_lost:ContinueGameButton
you_win_poster: YouWinPoster
you_lose_poster: YouLosePoster
view_high_scores_button: ViewHighScoresButton
time_played:float
controls_handler: PlayerControlsObject
fish_generator: FishGenerator
b_did_win_already : bool
FLAG_open_high_scores_menue : int
@property
def height(self):
return screen_size[1]
@property
def width(self):
return screen_size[0]
def __init__(self):
super().__init__()
self.on_resize()
self.restart_game()
def restart_game(self):
""" Set up the game variables. Call to re-start the game. """
# Create your sprites and sprite lists here
# set up buttons
self.background_texture = resources.background_texture_map["idle"]
self.fish_sprites = arcade.SpriteList()
self.ui_manager = ModifiedUIManager(self.window)
self.player_fish = fish.PlayerFish(self)
self.fish_generator = RandomFishGenerator(1.1,self,min_fish_size=min_computer_fish_size,max_fish_size=max_computer_fish_size,min_fish_speed=min_computer_fish_speed,max_fish_speed=max_computer_fish_speed)
self.fish_sprites.append(self.player_fish)
self.paused = False
self.controls_handler = PlayerControlsObject(change_player_direction=self.player_fish.change_movement_direction,
reset_game=self.restart_game, pause_game=self.toggle_game_paused)
self.restart_button_game_lost = RestartGameButton(self,False)
self.restart_button_game_won = self.restart_button_game_lost
self.ui_manager.add_ui_element(self.restart_button_game_won)
self.continue_button_paused = ContinueGameButton(self,False)
self.ui_manager.add_ui_element(self.continue_button_paused)
self.you_win_poster = YouWinPoster(self,False)
self.you_win_poster.center_y += self.restart_button_game_won.height/2 + self.you_win_poster.height/2 + 10
self.ui_manager.add_ui_element(self.you_win_poster)
self.you_lose_poster = YouLosePoster(self,False)
self.you_lose_poster.center_y = self.restart_button_game_lost.top + self.you_win_poster.height / 2 + 10
self.ui_manager.add_ui_element(self.you_lose_poster)
self.continue_button_game_won = ContinueGameButton(self, False)
self.continue_button_game_won.center_y += -self.restart_button_game_won.height / 2 - self.continue_button_game_won.height / 2 - 10
self.ui_manager.add_ui_element(self.continue_button_game_won)
self.view_high_scores_button = ViewHighScoresButton(self,True)
self.view_high_scores_button.center_x = self.window.width - self.view_high_scores_button.width/2 - 20
self.view_high_scores_button.center_y = self.view_high_scores_button.height / 2 + 20
self.ui_manager.add_ui_element(self.view_high_scores_button)
self.time_played = 0
self.b_did_win_already = False
self.FLAG_open_high_scores_menue = -1
def on_draw(self):
"""
Render the screen.
"""
# This command should happen before we start drawing. It will clear
# the screen to the background color, and erase what we drew last frame.
arcade.start_render()
left, right, bottom, top = self.window.get_viewport()
arcade.draw_lrwh_rectangle_textured(0, 0,
right, top,
self.background_texture)
self.fish_sprites.draw(filter=GL_NEAREST)
self.ui_manager.on_draw()
# draw time
arcade.draw_text("time: {:.0f}".format(self.time_played),20,self.height - 40,color=(255,240,200,210),font_size=25,bold=True,anchor_y="bottom",font_name="ariblk")
#draw score (only wen game is lost)
arcade.draw_text("score: {:.0f}%".format((self.player_fish.size - player_start_size)/(player_win_size-player_start_size)*100), 20, self.height - 40,
color=(255, 240, 200, 210), font_size=25, bold=True, anchor_y="top", font_name="ariblk")
last_time = None
def on_update(self, delta_time):
"""
All the logic to move, and the game logic goes here.
"""
# calculate delta_time
if self.last_time is not None:
delta_time = time.time() - self.last_time
self.last_time = time.time()
if not self.is_game_lost and not self.b_did_win_already and not self.paused:
self.time_played += delta_time
# update game
if not self.paused:
self.fish_sprites.on_update(delta_time)
self.fish_generator.update(delta_time)
all_deltatimes.append(delta_time)
if self.FLAG_open_high_scores_menue == 0:
game.show_view(HighScoresView(self.time_played))
self.FLAG_open_high_scores_menue = -1
elif self.FLAG_open_high_scores_menue > 0:
self.FLAG_open_high_scores_menue -= 1
@property
def is_game_lost(self):
return not self.player_fish in self.fish_sprites
def unpause(self):
self.paused = False
self.continue_button_paused.is_visible = False
self.you_win_poster.is_visible = False
self.restart_button_game_won.is_visible = False
self.continue_button_game_won.is_visible = False
def toggle_game_paused(self):
if not self.is_game_lost:
if self.paused:
self.unpause()
else:
self.paused = True
self.continue_button_paused.is_visible = True
else:
self.restart_game()
def handle_game_lost(self):
self.restart_button_game_lost.is_visible = True
self.you_lose_poster.is_visible = True
def handle_game_won(self):
if not self.b_did_win_already:
self.you_win_poster.is_visible = True
self.continue_button_game_won.is_visible = True
self.restart_button_game_won.is_visible = True
self.b_did_win_already = True
high_scores = HighScoresView.load_high_scores()
if self.time_played < max([HighScoresView.try_parse(s[1]) for s in high_scores]):
self.FLAG_open_high_scores_menue = 1
def on_close(self):
self.window.on_close()
def switch_to_high_scores_view(self):
if not ( self.paused or self.b_did_win_already or self.is_game_lost ):
self.toggle_game_paused()
game.show_view(HighScoresView())
def on_show_view(self):
self.last_time = time.time()
self.controls_handler.reset_state()
def on_resize(self, width: float = 0, height: float = 0):
ratio = self.height/self.width
self.window.height = int(self.window.width*ratio)
return False
#UI
def on_key_press(self, key, key_modifiers):
"""
Called whenever a key on the keyboard is pressed.
"""
self.controls_handler.on_keyboard_press(key, key_modifiers)
def on_key_release(self, key, key_modifiers):
"""
Called whenever the user lets off a previously pressed key.
"""
self.controls_handler.on_keyboard_release(key, key_modifiers)
def on_mouse_motion(self, *args,**kwargs):
self.ui_manager.on_mouse_motion(*args,**kwargs)
def on_mouse_press(self, *args, **kwargs):
self.ui_manager.on_mouse_press(*args,**kwargs)
def on_mouse_release(self, *args, **kwargs):
self.ui_manager.on_mouse_release(*args,**kwargs)
class HighScoresView(arcade.View):
text_input_box : arcade.gui.UIInputBox
text_output_box : arcade.gui.UILabel
high_scores_text_boxes : list
ui_manager : arcade.gui.UIManager
rectangle_background : arcade.SpriteSolidColor
def __init__(self,new_high_score=None):
super().__init__()
arcade.set_background_color(arcade.color.AZURE)
self.ui_manager = arcade.gui.UIManager(self.window)
self.uistyle = UIStyle.default_style()
font_color = (30, 50, 50)
self.uistyle.set_class_attrs("label",font_color=font_color,font_color_hover=font_color,font_color_press=font_color)
title_texture = arcade.load_texture(r"resources\high scores.png")
self.title_poster = arcade.gui.UIImageButton(center_x=self.width / 2,center_y=self.height,normal_texture=title_texture,hover_texture=title_texture,press_texture=title_texture)
self.title_poster.center_y -= self.title_poster.height/2
self.ui_manager.add_ui_element(self.title_poster)
self.rectangle_background = arcade.SpriteSolidColor(self.width//2,self.height,(140,150,200))
self.rectangle_background.center_x = self.width / 2
self.rectangle_background.center_y = self.height/ 2
self.line_background = arcade.SpriteSolidColor(10,int(self.title_poster.bottom - 70),(20,30,60))
self.line_background.center_x = self.width / 2
self.line_background.center_y = self.title_poster.bottom - self.line_background.height/2 - 30
# back button:
back_button = arcade.gui.UIImageButton(center_x=0, center_y=0, normal_texture=resources.back_button_texture_map["mouse_out"], hover_texture=resources.back_button_texture_map["mouse_in"],
press_texture=resources.back_button_texture_map["mouse_pressed"])
back_button.center_x = self.width - back_button.width / 2 - 20
back_button.center_y = self.height - back_button.height / 2 - 20
self.ui_manager.add_ui_element(back_button)
@back_button.event("on_click")
def on_click():
self.ui_manager.remove_handlers()
self.ui_manager.purge_ui_elements()
game.show_view(main_game_view)
high_scores = self.load_high_scores()
if new_high_score is not None:
for index in range(len(high_scores)):
if new_high_score < self.try_parse(high_scores[index][1]):
high_scores.insert(index,(None,"{:.3g}".format(new_high_score)))
high_scores.pop()
break
self.draw_high_scores_table(high_scores)
@property
def height(self):
return screen_size[1]
@property
def width(self):
return screen_size[0]
@staticmethod
def try_parse(s):
try:
return float(s)
except:
return float("inf")
def draw_high_scores_table(self,high_scores:list):
self.names_boxes = [arcade.gui.UILabel(name,0,0, style=self.uistyle) if name is not None else
self.create_input_box() for name,score in high_scores]
self.scores_boxes = [arcade.gui.UILabel(score,0,0, style=self.uistyle) for name,score in high_scores]
for i in range(len(self.names_boxes)):
y = (self.names_boxes[i-1].center_y - self.names_boxes[i-1].height/2 if i > 0 else self.title_poster.bottom - 50)\
- self.names_boxes[i-1].height / 2 - 20
self.names_boxes[i].center_y = y
self.names_boxes[i].center_x = self.width/2 - self.names_boxes[i].width/2 - 30
self.scores_boxes[i].center_y = y
self.scores_boxes[i].center_x = self.width / 2 + self.scores_boxes[i].width / 2 + 30
self.ui_manager.add_ui_element(self.names_boxes[i])
self.ui_manager.add_ui_element(self.scores_boxes[i])
def create_input_box(self):
ret = arcade.gui.UIInputBox(0, 0, (self.line_background.left - self.rectangle_background.left)//1.2, style=self.uistyle)
@ret.event("on_enter")
def on_enter():
ret.text.replace("\n","\\n")
high_scores = [(self.names_boxes[i].text,self.scores_boxes[i].text) for i in range(len(self.names_boxes))]
self.save_high_scores(high_scores)
# replace text box with label
self.ui_manager._ui_elements.remove(ret)
new_label = arcade.gui.UILabel(ret.text, 0, 0, style=self.uistyle)
new_label.center_y = ret.center_y
new_label.center_x = self.width/2 - new_label.width/2 - 30
index = self.names_boxes.index(ret)
high_scores[index] = (new_label,high_scores[index][1])
self.ui_manager.add_ui_element(new_label)
self.ui_manager.focused_element = ret
return ret
def save_high_scores(self,high_scores):
with open("high_scores.pypickle", "wb+") as file:
pickle.dump(high_scores, file)
@staticmethod
def load_high_scores():
if os.path.exists("high_scores.pypickle"):
with open("high_scores.pypickle", "rb") as file:
high_scores = pickle.load(file)
else:
high_scores = []
while len(high_scores) < num_of_high_scores:
high_scores.append(("---","---"))
return high_scores[:num_of_high_scores]
def on_draw(self):
"""
Render the screen.
"""
arcade.start_render()
self.rectangle_background.draw()
self.line_background.draw()
def on_resize(self, width: float = 0, height: float = 0):
ratio = self.height/self.width
self.window.height = int(self.window.width*ratio)
return False
def main():
""" Main method """
global game,main_game_view,screen_size
game = ResizeableWindow(1000, 500, "Fishy Game",resizable=True)
game.maximize()
game.dispatch_events()
screen_size = game.get_size()
game.stretch_game_with_window = True
# game.set_viewport(0, self.width, 0, self.height)
main_game_view = MainGameView()
game.show_view(main_game_view)
arcade.run()
if __name__ == "__main__":
main()
| 39.381201 | 211 | 0.679772 | 2,039 | 15,083 | 4.712604 | 0.14615 | 0.054116 | 0.028411 | 0.018316 | 0.399521 | 0.284733 | 0.197003 | 0.11791 | 0.095848 | 0.083047 | 0 | 0.014509 | 0.22774 | 15,083 | 382 | 212 | 39.484293 | 0.81044 | 0.047471 | 0 | 0.16 | 0 | 0 | 0.017241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.058182 | 0.018182 | 0.301818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da7b3bc161256bb5501fd5bd641192702f9a7738 | 2,306 | py | Python | pebbles/views/sessions.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 4 | 2017-05-11T14:50:32.000Z | 2020-01-10T09:02:27.000Z | pebbles/views/sessions.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 145 | 2017-04-07T11:01:58.000Z | 2019-12-11T15:30:23.000Z | pebbles/views/sessions.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 3 | 2017-10-25T12:36:16.000Z | 2018-04-26T08:49:34.000Z | from flask_restful import fields, marshal
from flask import Blueprint as FlaskBlueprint
import logging
import json
from pebbles.models import User
from pebbles.forms import SessionCreateForm
from pebbles.server import app, restful
from pebbles.views.commons import is_group_manager, update_email # changed
sessions = FlaskBlueprint('sessions', __name__)
token_fields = {
'token': fields.String,
'user_id': fields.String,
'is_admin': fields.Boolean,
'is_group_owner': fields.Boolean,
'is_group_manager': fields.Boolean,
'icon_value': fields.String
}
admin_icons = ["Dashboard", "Users", "Groups", "Blueprints", "Configure", "Statistics", "Account"]
group_owner_icons = ["Dashboard", "", "Groups", "Blueprints", "", "", "Account"]
group_manager_icons = ["Dashboard", "", "", "Blueprints", "", "", "Account"]
user_icons = ["Dashboard", "", "", "", "", "", "Account"]
class SessionView(restful.Resource):
def post(self):
form = SessionCreateForm()
if not form.validate_on_submit():
logging.warn("validation error on user login")
return form.errors, 422
user = User.query.filter_by(eppn=form.eppn.data).first()
if user and not user.email_id:
# Email and eppn are same because we invite users through emailid
user = update_email(eppn=user.eppn, email_id=user.eppn)
if user and user.check_password(form.password.data):
if user.is_admin:
icons = json.dumps(admin_icons)
elif user.is_group_owner:
icons = json.dumps(group_owner_icons)
elif is_group_manager(user):
icons = json.dumps(group_manager_icons)
else:
icons = json.dumps(user_icons)
return marshal({
'token': user.generate_auth_token(app.config['SECRET_KEY']),
'is_admin': user.is_admin,
'is_group_owner': user.is_group_owner,
'is_group_manager': is_group_manager(user),
'user_id': user.id,
'icon_value': icons
}, token_fields)
logging.warn("invalid login credentials for %s" % form.eppn.data)
return {
'message': 'Unauthorized',
'status': 401
}, 401
| 36.603175 | 98 | 0.624024 | 265 | 2,306 | 5.218868 | 0.358491 | 0.045553 | 0.050615 | 0.028923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005254 | 0.257155 | 2,306 | 62 | 99 | 37.193548 | 0.802102 | 0.030789 | 0 | 0 | 0 | 0 | 0.15905 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0.019231 | 0.153846 | 0 | 0.25 | 0.096154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da7bab95ad749f6149016b3bc152e246a371a757 | 6,138 | py | Python | train_rrca.py | deepeshhada/ReXPlug | f6ba1e1707e04f82451fba8ada19c731c8f7c46e | [
"Apache-2.0"
] | 6 | 2021-04-04T05:09:32.000Z | 2022-01-21T10:59:20.000Z | train_rrca.py | deepeshhada/ReXPlug | f6ba1e1707e04f82451fba8ada19c731c8f7c46e | [
"Apache-2.0"
] | null | null | null | train_rrca.py | deepeshhada/ReXPlug | f6ba1e1707e04f82451fba8ada19c731c8f7c46e | [
"Apache-2.0"
] | 1 | 2021-11-06T05:36:03.000Z | 2021-11-06T05:36:03.000Z | import argparse
import os
import pickle
from copy import deepcopy
import pandas as pd
import torch.optim as optim
from torch.utils.data import DataLoader
from collate import CollateTrain, CollateTest
from models.RRCA import *
from utils.rrca_utils import evaluate, train_one_epoch
def get_embeddings(dataset_path):
with open(os.path.join(dataset_path, 'true_sentence_embeddings.pkl'), 'rb') as f:
true_embeddings = pickle.load(f)
return true_embeddings
def create_reviews_lists(train_df, true_embeddings):
user_reviews_dict = {}
item_reviews_dict = {}
for idx, row in train_df.iterrows():
if int(row[0]) not in user_reviews_dict:
user_reviews_dict[int(row[0])] = []
if int(row[1]) not in item_reviews_dict:
item_reviews_dict[int(row[1])] = []
user_reviews_dict[int(row[0])].append(true_embeddings[idx])
item_reviews_dict[int(row[1])].append(true_embeddings[idx])
return user_reviews_dict, item_reviews_dict
def create_dataset(df, true_embeddings, mode="Test"):
user_item_ratings = {}
if mode == "Train":
for idx, row in df.iterrows():
user_item_ratings[idx] = [int(row[0]), int(row[1]), true_embeddings[idx], row[3]]
else:
for idx, row in df.iterrows():
user_item_ratings[idx] = [int(row[0]), int(row[1]), row[3]]
return user_item_ratings
def train_rrca(
dataset_path="./data",
model_save_path="./saved_models",
model="rrca",
batch_size_rrca=256,
learning_rate_rrca=0.002,
num_epochs_rrca=150,
dataset_name="AmazonDigitalMusic"
):
with open('./pickled_meta/dataset_meta.pkl', 'rb') as f:
dataset_meta = pickle.load(f)
num_users = dataset_meta[dataset_name]['num_users']
num_items = dataset_meta[dataset_name]['num_items']
num_factors = 64
num_layers = 3
sentence_embed_dim = 512
embed_dim = num_factors * (2 ** (num_layers - 1))
model_save_path = os.path.join(model_save_path, dataset_name, model + '.pt')
dataset_path = os.path.join(dataset_path, dataset_name)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Prepare data_loaders
train_df = pd.read_csv(os.path.join(dataset_path, 'train_df.csv'))
val_df = pd.read_csv(os.path.join(dataset_path, 'val_df.csv'))
test_df = pd.read_csv(os.path.join(dataset_path, 'test_df.csv'))
print(f"Train size: {len(train_df)} | Val size: {len(val_df)} | Test size: {len(test_df)}")
print("Creating data loaders...")
true_embeddings = get_embeddings(dataset_path)
user_reviews_dict, item_reviews_dict = create_reviews_lists(train_df, true_embeddings)
train_set = create_dataset(train_df, true_embeddings, mode="Train")
val_set = create_dataset(val_df, true_embeddings, mode="Val")
test_set = create_dataset(test_df, true_embeddings, mode="Test")
train_loader = DataLoader(
dataset=train_set,
batch_size=batch_size_rrca,
shuffle=True,
collate_fn=CollateTrain(user_reviews_dict, item_reviews_dict)
)
val_loader = DataLoader(
dataset=val_set,
batch_size=batch_size_rrca,
shuffle=False,
collate_fn=CollateTest(user_reviews_dict, item_reviews_dict)
)
test_loader = DataLoader(
dataset=test_set,
batch_size=batch_size_rrca,
shuffle=False,
collate_fn=CollateTest(user_reviews_dict, item_reviews_dict)
)
print("Creating RRCA modules...")
review_regularizer = ReviewRegularizer(num_factors=num_factors).to(device)
cross_attention_module = CrossAttention(embed_dim=embed_dim, sentence_embed_dim=sentence_embed_dim).to(device)
model = RatingPredictor(
review_regularizer=review_regularizer,
cross_attention=cross_attention_module,
embed_dim=embed_dim,
num_users=num_users,
num_items=num_items,
num_factors=num_factors,
num_layers=num_layers
).to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate_rrca)
loss_function = nn.MSELoss()
losses_overall, losses_rating_pred, losses_att, losses_reg = [], [], [], []
val_mses, val_maes = [], []
PATIENCE = 15
patience = PATIENCE
best_val_mse, best_model = 100, None
print("Training...")
print("=" * 80)
for epoch in range(1, num_epochs_rrca + 1):
if patience == 0:
break
epoch_loss_overall, epoch_loss_rating_pred, epoch_loss_att, epoch_loss_reg, val_mse, val_mae = train_one_epoch(
model=model,
train_loader=train_loader,
val_loader=val_loader,
loss_function=loss_function,
optimizer=optimizer,
epoch=epoch,
device=device
)
if val_mse < best_val_mse:
print("Saving model...")
patience = PATIENCE
best_val_mse = val_mse
best_model = deepcopy(model)
torch.save(best_model.state_dict(), model_save_path)
else:
patience -= 1
losses_overall.append(epoch_loss_overall)
losses_rating_pred.append(epoch_loss_rating_pred)
losses_att.append(epoch_loss_att)
losses_reg.append(epoch_loss_reg)
val_mses.append(val_mse)
val_maes.append(val_mae)
print("=" * 80)
print('RRCA trained. Evaluating on the test set.')
print("-" * 80)
test_mse, test_mae = evaluate(best_model, test_loader, device)
print(f"Test MSE: {test_mse:.4f} | Test MAE: {test_mae:.4f}")
print("=" * 80)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train ReXPlug.")
parser.add_argument("--dataset_path", type=str, default="./data", help="Root folder path of preprocessed dataset.")
parser.add_argument("--model_save_path", type=str, default="./saved_models", help="Root path to save RRCA's model.")
parser.add_argument("--model", type=str, default="rrca", help="Choose from 'rrca' or 'rr'.")
parser.add_argument("--batch_size_rrca", type=int, default=256, help="Batch size to train RRCA.")
parser.add_argument("--learning_rate_rrca", type=float, default=0.002, help="Learning rate for RRCA.")
parser.add_argument("--num_epochs_rrca", type=int, default=150, help="Number of epochs to train RRCA.")
parser.add_argument(
"--dataset_name",
type=str,
default="AmazonDigitalMusic",
choices=("AmazonDigitalMusic", "AmazonVideoGames", "AmazonClothing", "Yelp_1", "Yelp_2", "BeerAdvocate"),
help="Name of the dataset to use."
)
args = parser.parse_args()
root_path = os.path.join(args.model_save_path, args.dataset_name)
if not os.path.exists(root_path):
os.makedirs(root_path)
train_rrca(**(vars(args)))
| 33.540984 | 117 | 0.750244 | 920 | 6,138 | 4.693478 | 0.197826 | 0.045855 | 0.031264 | 0.035665 | 0.229968 | 0.173692 | 0.112552 | 0.087077 | 0.087077 | 0.064845 | 0 | 0.011473 | 0.119583 | 6,138 | 182 | 118 | 33.725275 | 0.787565 | 0.003258 | 0 | 0.102564 | 0 | 0.00641 | 0.145029 | 0.009647 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0 | 0.064103 | 0 | 0.115385 | 0.070513 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da7bd622d1ddbda2597706e2f3a2142e325287e0 | 4,540 | py | Python | SignLAnguage/proyecto.py | val171001/SignLanguage | fc7622e4a31f007791f34fbf0ba08c89138389c5 | [
"MIT"
] | null | null | null | SignLAnguage/proyecto.py | val171001/SignLanguage | fc7622e4a31f007791f34fbf0ba08c89138389c5 | [
"MIT"
] | null | null | null | SignLAnguage/proyecto.py | val171001/SignLanguage | fc7622e4a31f007791f34fbf0ba08c89138389c5 | [
"MIT"
] | null | null | null | #Universidad del Valle de Guatemala
#HCI
#David valenzuela
#Marcos Gutierrez
#Fernando Hengstenebrg
#Librerias
from tkinter import *
from tkinter import messagebox as ms
#base de datos
import sqlite3
import os
import webbrowser
#Conectamos con base de datos y creamos usuarios
with sqlite3.connect('cuentas.db') as db:
c = db.cursor()
c.execute('CREATE TABLE IF NOT EXISTS user (username TEXT NOT NULL ,password TEX NOT NULL);')
db.commit()
db.close()
#main Class
class main:
def __init__(self,master):
# ventana
self.master = master
# variables
self.Usuario = StringVar()
self.password = StringVar()
self.usuarioNuevo = StringVar()
self.nuevaPassword = StringVar()
self.widgets()
#Funcion LOGIN
def login(self):
#establecer coneccion
with sqlite3.connect('cuentas.db') as db:
c = db.cursor()
#buscar usuario en dr
find_user = ('SELECT * FROM user WHERE username = ? and password = ?')
c.execute(find_user,[(self.Usuario.get()),(self.password.get())])
result = c.fetchall()
if result:
f = open('alfabeto.html', 'r')
mensaje = 'Se abrio el archivo'
webbrowser.open_new_tab('alfabeto.html')
else:
ms.showerror('Oops!','La cuenta no se puede encontrar')
def nuevoUsuario(self):
with sqlite3.connect('cuentas.db') as db:
c = db.cursor()
#buscar usuario
find_user = ('SELECT * FROM user WHERE username = ?')
c.execute(find_user,[(self.Usuario.get())])
if c.fetchall():
ms.showerror('Error!','Ya existe el nombre de usuario Intente de Nuevo.')
else:
ms.showinfo('Success!','Cuenta Creada!')
self.log()
#Crear nueva cuenta
insert = 'INSERT INTO user(username,password) VALUES(?,?)'
c.execute(insert,[(self.usuarioNuevo.get()),(self.nuevaPassword.get())])
db.commit()
def log(self):
self.Usuario.set('')
self.password.set('')
self.crf.pack_forget()
#self.head['text'] = 'LOGIN'
self.logf.pack()
def crear(self):
self.usuarioNuevo.set('')
self.nuevaPassword.set('')
self.logf.pack_forget()
self.head['text'] = ' CREAR CUENTA '
self.crf.pack()
def widgets(self):
#Encabezado de la ventana para iniciar sesion
self.head = Label(self.master, text = ' INICIAR SESION ', font = ('',20), pady = 10, bg='blue4', fg='white')
self.head.pack()
#Ventana principal
self.logf = Frame(self.master,padx =10,pady = 10, bg='white')
#Propiedades principales
Label(self.logf,text = 'Usuario: ',font = ('',20),pady=5,padx=5, bg='white', fg='black').grid(sticky = W)
Entry(self.logf,textvariable = self.Usuario,bd = 5,font = ('',15)).grid(row=0,column=1)
Label(self.logf,text = 'Contraseña: ',font = ('',20),pady=5,padx=5, bg='white', fg='black').grid(sticky = W)
Entry(self.logf,textvariable = self.password,bd = 5,font = ('',15),show = '*').grid(row=1,column=1)
Button(self.logf,text = ' Crear cuenta ',bd = 3 ,font = ('',15),padx=5,pady=5,command=self.crear).grid()
Button(self.logf,text = ' Login ',bd = 3 ,font = ('',15),padx=5,pady=5,command=self.login).grid(row=2,column=1)
#Button(self.logf,text = ' Ayuda ',bd = 3 ,font = ('',15),padx=5,pady=5,command=self.login).grid(row=3,column=1)
self.logf.pack()
#Datos para la ventana de crear usuarios
self.crf = Frame(self.master,padx =10,pady = 10, bg='white')
#Propiedades para el ingreso de datos
Label(self.crf,text = 'Usuario Nuevo: ',font = ('',20),pady=5,padx=5, bg='white', fg='black').grid(sticky = W)
Entry(self.crf,textvariable = self.usuarioNuevo,bd = 5,font = ('',15)).grid(row=0,column=1)
Label(self.crf,text = 'Contraseña: ',font = ('',20),pady=5,padx=5, bg='white', fg='black').grid(sticky = W)
Entry(self.crf,textvariable = self.nuevaPassword,bd = 5,font = ('',15),show = '*').grid(row=1,column=1)
Button(self.crf,text = 'Regresar',bd = 3 ,font = ('',15),padx=5,pady=5,command=self.log).grid()
Button(self.crf,text = 'Crear cuenta',bd = 3 ,font = ('',15),padx=5,pady=5,command=self.nuevoUsuario).grid(row=2,column=1)
#crear la ventana
root = Tk()
main(root)
root.resizable(width=False, height=False)
root.mainloop()
| 38.803419 | 140 | 0.596256 | 601 | 4,540 | 4.484193 | 0.274542 | 0.032653 | 0.018553 | 0.016698 | 0.410761 | 0.388497 | 0.377737 | 0.329499 | 0.329499 | 0.329499 | 0 | 0.024341 | 0.239868 | 4,540 | 116 | 141 | 39.137931 | 0.756592 | 0.128855 | 0 | 0.157895 | 0 | 0 | 0.164335 | 0.005851 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078947 | false | 0.144737 | 0.065789 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
da7e18ea9112e331d9b45cb4a08cb02a217b0d65 | 53 | py | Python | 001146StepikPyBegin/Stepik001146PyBeginсh06p01st08C07_fraction_20200419.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 001146StepikPyBegin/Stepik001146PyBeginсh06p01st08C07_fraction_20200419.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | 001146StepikPyBegin/Stepik001146PyBeginсh06p01st08C07_fraction_20200419.py | SafonovMikhail/python_000577 | 739f764e80f1ca354386f00b8e9db1df8c96531d | [
"Apache-2.0"
] | null | null | null | num1 = float(input())
# num1 = 44.45
print(num1 % 1)
| 13.25 | 21 | 0.603774 | 9 | 53 | 3.555556 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.186047 | 0.188679 | 53 | 3 | 22 | 17.666667 | 0.55814 | 0.226415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 3 |
da804ba481b451cc0ca78dfe3274c111f94eaf58 | 16,539 | py | Python | data-processing/utils/__init__.py | mark-andrews/BayesianAccountMemoryText | 28609a4d3d3924c5082af81359ffc3f78f6eb6da | [
"CC-BY-4.0"
] | 2 | 2020-04-10T17:14:19.000Z | 2020-04-10T17:14:26.000Z | data-processing/utils/__init__.py | mark-andrews/BayesianAccountMemoryText | 28609a4d3d3924c5082af81359ffc3f78f6eb6da | [
"CC-BY-4.0"
] | 18 | 2020-03-24T17:07:23.000Z | 2021-12-13T20:01:11.000Z | data-processing/utils/__init__.py | mark-andrews/BayesianAccountMemoryText | 28609a4d3d3924c5082af81359ffc3f78f6eb6da | [
"CC-BY-4.0"
] | null | null | null | """
Some general utils.
"""
##=============================================================================
## Standard library imports
##=============================================================================
#import string
#import re
#import os
#import errno
#import hashlib
#
##================================ End Imports ================================
#
#def deletechars(s, exclude_chars):
# ''' Fast deletion of characters from string.
# It uses a dummy translation table, and so no mapping is applied, and we
# just delete the exclude_chars characters.
# '''
# phony_translate_table = string.maketrans("","")
# return s.translate(phony_translate_table, exclude_chars)
#
#
#def deletepunctuation(s):
# ''' Fast deletion of punctuation from string'''
# return deletechars(s,string.punctuation)
#
#
#def tokenize(text, foldcase=True):
# '''
# A very cheap and easy tokenization.
# First, remove "'s". For example, "dog's" becomes "dog".
# Second, zap utf-8 chars.
# Then, remove all punctuation and, by default, fold upper and lower case words
# and then split by whitespace.
# '''
#
# text = re.sub(r'\'s','', text)
# s = ''.join([s for s in text if s in string.printable])
#
# s = str(s) # Got to convert it to str.
# s = deletepunctuation(s)
#
# if foldcase:
# s = s.lower()
# return s.split()
#
#
#def mkdir_p(path):
# '''
# Make a directory, making parents if necessary.
# Taken verbatim from
# http://stackoverflow.com/a/600612
# '''
# try:
# os.makedirs(path)
# except OSError as exc: # Python >2.5
# if exc.errno == errno.EEXIST and os.path.isdir(path):
# pass
# else: raise
#
#
#def checksum(argument, algorithm='sha256'):
# '''
# Returns the hash checksum of `argument'.
# If `argument' is a name of a file, then perform the checksum on the file.
# Otherwise, the checksum is of the string `argument'.
# By default, it will be the sha1 checksum (and so equivalent to linux's
# sha1sum). Alternatively, the algorithm could be md5 (equivalent to linux's
# md5sum), or else sha224, sha256, sha384, sha512.
# '''
#
# h = hashlib.new(algorithm)
#
# if os.path.exists(argument) and os.path.isfile(argument):
# argument = open(argument,'rb').read()
#
# h.update(argument)
#
# return h.hexdigest()
# I didn't have anywhere better to put these.
hdptm_170617202450_6333_state_checksums = '''
hdptm_170617202450_6333_state_19000.npz 2dadab2c09f54f4d03a1187c8d5db49a8ec0a2bfe7bd5f5630448958ba4f21ac
hdptm_170617202450_6333_state_19010.npz 0edcb069ab3e559f62728d372f98fb5c047ca8a47ede262dad05ef236d29615f
hdptm_170617202450_6333_state_19020.npz 395b4214a753d811f18f24c6665665bdfc201928c7e661294ab0e991b993b1c5
hdptm_170617202450_6333_state_19030.npz 469e9742d4a508c4b34e5283254041ec34b58ebf1f82a15f845949d7367708d5
hdptm_170617202450_6333_state_19040.npz 036d02b36964f24b4a49465769eb51f46ccbc6f52255797b32207c398c6a31f5
hdptm_170617202450_6333_state_19050.npz 74f93e761164f35d6c433809513604bd6c3bf54e68fb00b11437f9fc8f0366d6
hdptm_170617202450_6333_state_19060.npz e9b21a7550a86b55e419a6bde8e38766fd18fe978e3b78e3bff18d7c0d842a85
hdptm_170617202450_6333_state_19070.npz bb1778aa769a72468642f0ac7193f625ddc62c6326cd214794f9c3b88f17ab17
hdptm_170617202450_6333_state_19080.npz d6f49bcf8394679f46068b06c5a8798facd10f4e05e906232dd8feb69b48144a
hdptm_170617202450_6333_state_19090.npz df75378ce0436cc1bdb6e822320d88e9f6a5ffd21dd0b14e0d81270dfdd1601f
hdptm_170617202450_6333_state_19100.npz a8c6a2c9766a1f5933e2864b901469ab316d536b4af4100bb01982d4372d8a87
hdptm_170617202450_6333_state_19110.npz aa4150923a0fa565865311128490c7172ffce4bb6615e3cd09c2ef8285e05f06
hdptm_170617202450_6333_state_19120.npz e0b88f8afa10d0fdf3ca522bdfb983b3b90bd09cc7b43fe6a8382b83a4b124f0
hdptm_170617202450_6333_state_19130.npz 7a742737378a34d4fd6eb0de4821ef589e5b51036ba85d49215d3ca1989eac36
hdptm_170617202450_6333_state_19140.npz af7a17b9cf56ba85c546f0b50378d8e6841ce9be91cb8993dc4262aac4d37be9
hdptm_170617202450_6333_state_19150.npz e6ad504a862dd1d405b55197d68ea57bcc60c24d0c2f520b57d45f0829eddbc7
hdptm_170617202450_6333_state_19160.npz 2756ed729b15107bb87f171a16e9dee50815912c49a24bb08e4dc1deec385afa
hdptm_170617202450_6333_state_19170.npz b1cbbe73548c07adeca9329fb7965d0045e52a5af1a561967381ebea97889dcc
hdptm_170617202450_6333_state_19180.npz 8254e6586d1cdf61911afa73d4cd49df8a72c4951cf978442cc7655d40d1604a
hdptm_170617202450_6333_state_19190.npz 386db58a375862cc0292d9cefc66f3da87c8d5d15d887bbcf2be64603ec20ea0
hdptm_170617202450_6333_state_19200.npz ee7ce427247d69dfdee5c624170620514b7092a578b55247275e15e8ed0fce9b
hdptm_170617202450_6333_state_19210.npz 7dff8c7a9bfc2aaa62a74ae1e161bb5e083c371bb63c26a960d7d295e3ff819b
hdptm_170617202450_6333_state_19220.npz 1bbd70b4ca4385bcb3c07fc3597d1dc332fd1b265befa198a967881e1a27ecaf
hdptm_170617202450_6333_state_19230.npz 8fe014690b637ee9be3d21d975391a180b42931433fc6984a3d7f89ccad30813
hdptm_170617202450_6333_state_19240.npz c7f96995c0d91a92d5c3e7b39b946538ae4acb581604dc3fc0bdcbcdab6a8464
hdptm_170617202450_6333_state_19250.npz e35aeca75a6ddcca4337f6394da7b7a66cbf4e7f59b5fa2196ae7789ca675c12
hdptm_170617202450_6333_state_19260.npz d249f87aeae32583306bf4c23c3a241839dc1f2754ce272cb26f720cf334bc90
hdptm_170617202450_6333_state_19270.npz 9548567eabb7ec4a12d8d59f01a4d295a08ca7685e62771d630ac7127f8b46e7
hdptm_170617202450_6333_state_19280.npz dcc07c85a9c91dadc8066ed97ad41d738b464a11eaa95b645f061a6bc57fd085
hdptm_170617202450_6333_state_19290.npz 8a7bcfb27a9445239c9d38998d30d8db685f7ab92bdb0ba85b72d4e20b6f0bfa
hdptm_170617202450_6333_state_19300.npz 8e34da3bda88e342b0046c809c609e24d7927626f09c8455e97ec54767c063f0
hdptm_170617202450_6333_state_19310.npz 7c948def3ae6b24bcefc5d17a165354d282e74f7dfa661b343bcb236bfe114e8
hdptm_170617202450_6333_state_19320.npz ac74ebcdff39c4a5f32fd6f6dd6c1294f584dce6536b6eaf1dffcbc512dab340
hdptm_170617202450_6333_state_19330.npz 6a02ea121ddf1a6dcdf5616707c0a0c46414ad2ed5219cf4b3f08d0ac4add6ce
hdptm_170617202450_6333_state_19340.npz 11e9ce24716ac4852d87f3043afff8a7624d1384da19fb41ffa50b8dddca6b76
hdptm_170617202450_6333_state_19350.npz 119881f704fd40ed0bf156e66ff801755922e8782f34cb281618da7482bcedca
hdptm_170617202450_6333_state_19360.npz 579c001ca2f4812b98149480698675120cf9f6759214d408ee519edced5365a2
hdptm_170617202450_6333_state_19370.npz 78efe683e79cd6fdc164659658e33e681e9c9f18e1a6e1dcd22e43f59388b998
hdptm_170617202450_6333_state_19380.npz bb78c916ec550dbb1458af9e3fa9e03f30f2ca604adf09953b2bfbb2223dc3d6
hdptm_170617202450_6333_state_19390.npz 09c802096a17cef6554a4d8bcad4e7f4020d67502378d99deb07b6e3846ec211
hdptm_170617202450_6333_state_19400.npz 59e76f0bc6d3fe1ce81131ecc3839c0970196d38c812c487aa97235e20281f77
hdptm_170617202450_6333_state_19410.npz e98c8b229f7af914518eaa522a4b8c39e0534429a20e9eb93c50c501d53a6c98
hdptm_170617202450_6333_state_19420.npz d97e227352d9db0f7c180cfbba291a3d68603796c12eb89669b5d53cda38bdf9
hdptm_170617202450_6333_state_19430.npz 16c023d80ade574fdbab993b191a03b27043d0805aec0f5bac32518e8fbf6f6a
hdptm_170617202450_6333_state_19440.npz 8ef8f6d4d542406475ee5ef7d0551561defef556d7b75fc337a4a588668c45d4
hdptm_170617202450_6333_state_19450.npz b48b4d4ea8f57e2c5bf293673b24a2003b55bbc4615fcd3661ac7acbef35d89c
hdptm_170617202450_6333_state_19460.npz 5998645559a3d936109ddd786e0f2d73667de7a80c435700f06c969bf5d0fca9
hdptm_170617202450_6333_state_19470.npz f729fc10ebd3715af80a7600d3fc6bc4c2d741e11454970351f07abeb18eec0e
hdptm_170617202450_6333_state_19480.npz 665e457a2462cf30aab5d8ab5a79b1dbb4e6f305d42cc2066efd81f35b257706
hdptm_170617202450_6333_state_19490.npz 36e5247839b15d32b8100756d76426a664af22b8413c5e195d40802d42896a84
hdptm_170617202450_6333_state_19500.npz e4613cb454b29ac5e3e5f419079e9e0c0c38295adba129f361f749a948269482
hdptm_170617202450_6333_state_19510.npz 6785bc7506629b87f271d9022eb648033ea61a63b13c42c12f7a11c1bb1f1526
hdptm_170617202450_6333_state_19520.npz 26439ee52c214ec5283a882c4e976d4cbd20d4321a1d6ab57cb4015e7a3a302e
hdptm_170617202450_6333_state_19530.npz 7da0b15f82c93c78a0bded179ca73fab61521d2be5abede01af0ac1b632a370d
hdptm_170617202450_6333_state_19540.npz 1e2d38e7228ec08aec617e4e94eebbec740ade10c7bca43513aa72f04137e1e1
hdptm_170617202450_6333_state_19550.npz 84745e32828efdb8642ef3242a7405304a19d0e99d407f67f20a0a20fa9844ae
hdptm_170617202450_6333_state_19560.npz 6783b0d5ec6a61fc85c49ec95fed53d2e7f1cd8116a6b35e473b218a2e66f83d
hdptm_170617202450_6333_state_19570.npz 3e0afadedf9fe5f0ddb26ec1e147e103cda15fa7c53d7894b4b43006c3368124
hdptm_170617202450_6333_state_19580.npz cdac508bc665928a82d5fbefdaaa1fe40b14b8e6e20f48f3bd5f617f01906807
hdptm_170617202450_6333_state_19590.npz 63178a70c938803757bf3701b9f9fd38f85c42a9a56eb3005dfee4605dedb5ff
hdptm_170617202450_6333_state_19600.npz f3c16b084267c09b54b840012051ad71fc9a152392524bac18da266aeddbdf08
hdptm_170617202450_6333_state_19610.npz f53ce93e6b5ca21d095cdfb485ef1c0a93b8723aed5b3f1d47912f046ea18f50
hdptm_170617202450_6333_state_19620.npz 9b707d7902cd3fc323d6a3bc5794e6328c7c9ab8fc4a1803ab46237598f774a6
hdptm_170617202450_6333_state_19630.npz f788652fc7daad24f8f16a9e33527e9f2282a34c61c49abf297bfd1c1435bc41
hdptm_170617202450_6333_state_19640.npz 0f1353c671f5e5c7c9748318e92b1ef1d50efce57c423f6800ef51b4cd54d474
hdptm_170617202450_6333_state_19650.npz 2f4bfdd72fd02c7c17f2e6f05fcb4922d32e252eacd45fbfdcde2b208e9b226c
hdptm_170617202450_6333_state_19660.npz 887731ab2a189453936610624ea4072ea201de020ad7d0820ac9e50564a4b320
hdptm_170617202450_6333_state_19670.npz 9392437d1a4c024f4f16eee1ed638f7e495eac2743887c0879094ff3cb347927
hdptm_170617202450_6333_state_19680.npz 043a10ae74c4e49f7a9c1e50dae9bf449b7a07a8af8ecda2a2ad32cbe6f359d0
hdptm_170617202450_6333_state_19690.npz 2ee4badeef50880921b8ebb91242ac1decaef17d80d3417aae6d9d9d8059714b
hdptm_170617202450_6333_state_19700.npz 79ad9e6d38f113237870d3a8052044b1ffccf4365d0f3151d1e4c1c29457edff
hdptm_170617202450_6333_state_19710.npz fe033e037a350b1f80afdb94ba08dd90b0b529a36caae0094f73a75cd85c3359
hdptm_170617202450_6333_state_19720.npz 73d77fba3122a661446f5db0a64c77ee960e292ed18ff87ed42ff202e1093a45
hdptm_170617202450_6333_state_19730.npz c4debd707536613aa1430177fb1c03f9b23d1d5a881897c556ea9cfd493c7720
hdptm_170617202450_6333_state_19740.npz a9b47af3f6fe82c51f85472f69e67de99bf7c5cce88fb793f8a140bf1905d835
hdptm_170617202450_6333_state_19750.npz 4ef5fb2a9aae7336f3ca4f51227c725d5d5f16f97916b0f3e17540f27669813a
hdptm_170617202450_6333_state_19760.npz 6571c32082aa5c009d0223b3f8f843980b48e550cb267b51c531af2479d5804c
hdptm_170617202450_6333_state_19770.npz d8ddb87feef83d76a0121fa5c050e7462e74ec003aa1dd3ded4f8a33ec4578b7
hdptm_170617202450_6333_state_19780.npz e0ed56a8f39b623dfbe8aae83e63ea61c62cdef1d74867475dc4fb87debeede2
hdptm_170617202450_6333_state_19790.npz 49c676838b6d69bff4c5697b1ee4268c914fac6ff54a37421bc8c35459a9f419
hdptm_170617202450_6333_state_19800.npz ca044e99eee9ccdc5f606885d8d87c3688d9c0719ebefa47d60c9d814404faf3
hdptm_170617202450_6333_state_19810.npz 038de2608ffdeeb4a4dccc9c71e0d674616f36cf056a95c0a95ee74afa5b6535
hdptm_170617202450_6333_state_19820.npz 72a87a8749d9f82f3ed25ea5df0c8f96efac5c94fc7de8cc07c6ee3d57ef2c39
hdptm_170617202450_6333_state_19830.npz aa52113c9e68d80a02f717739240cca50af6734cd23b92e481ba8a8493a6d26c
hdptm_170617202450_6333_state_19840.npz cae47400e63db6748fcd7b87055623fbc33a4c831a17d9e2ab939dabdee8ade0
hdptm_170617202450_6333_state_19850.npz 245994e5a33e7ab2ef33f0f29d91d12289055f9103f30e29f14b7a95152c4f33
hdptm_170617202450_6333_state_19860.npz d76be92b7e1ea383072743bb57d26c16a16495268ced639acc88c8c9ab682c3a
hdptm_170617202450_6333_state_19870.npz 6dd791cd116efb0c13007f81e5341f15d8e636594b9da256fdb3853f104b9f36
hdptm_170617202450_6333_state_19880.npz c4bbcada19fd5dedc602a4aef92b0def31f1b8c0badda5fee8e6147e443089b6
hdptm_170617202450_6333_state_19890.npz 5f55097c1dab7c441df6d964ca4eb33c13dd50c679306bdb7b6267e324d3a8bb
hdptm_170617202450_6333_state_19900.npz 447ebb530652ee64be1c404dac2b486e9ae9e600cc18a1f58a8cb2a4775a7d75
hdptm_170617202450_6333_state_19910.npz 66d2444d50c556445a90a8a3c44d6f5802296ff3c7467080b9cc6ea65cc7b356
hdptm_170617202450_6333_state_19920.npz 65211ccdb9090dd1714ffd6de5bf7b35dc565adb7a10000722bc2f8ce8d5b845
hdptm_170617202450_6333_state_19930.npz 6df3476774eb256b082b5aeda5e879f780d7a8bd20df936337283da172f51f65
hdptm_170617202450_6333_state_19940.npz 4fae17fd61bb16c83cc3ac647aa7903a8d41a5786dafef43e6cf0423fdabb2c7
hdptm_170617202450_6333_state_19950.npz 44d7a8608e071d01303ed9f060f3c51fd66d4b865d316e020771237929352452
hdptm_170617202450_6333_state_19960.npz f0363ffe99a18209de5e036cc9912a81171c1e451defa412738fed097a5d6e3d
hdptm_170617202450_6333_state_19970.npz 6226b1a68d7d51d0e903aa16b0bef0b1759ee3f8edb7faa1b148255ade2340ee
hdptm_170617202450_6333_state_19980.npz d55545cc6d2464b1a8f9ef740420d93288100872b2242361d841199d3d9054a5
hdptm_170617202450_6333_state_19990.npz 21dd9a492a6a9911553bcd6caf1ea4954aa28fbbb61293747dfe9286ab99050a
hdptm_170617202450_6333_state_20000.npz 4b593d3027d2d76a6186de244b459ceab0d74dacf11bf1db913fbadb90ed110c
'''.strip().splitlines()
hdptm_201117172636_2290_state_checksums = '''
hdptm_201117172636_2290_state_12000.npz 11437bf9bcbeb120200d233822e67f87630780923094ee37c3ad0ba35bf511da
hdptm_201117172636_2290_state_12010.npz d3559444ff4e9098d6ea6216ce2afd2665bd19dd8963825189cb45d9e6c1d64b
hdptm_201117172636_2290_state_12020.npz c1d2e458ee366150869c8be18c897c4c60d96227adf0228b6297bd9df6220e7f
hdptm_201117172636_2290_state_12030.npz fa447583f242ffe45bc441cee34dc9ecb78da33b18a480d98026e70e262981ee
hdptm_201117172636_2290_state_12040.npz 4ee93667dd3662032d00263628c37980d02c6682a0ba75050e2f96a1fb54e612
hdptm_201117172636_2290_state_12050.npz d6fa8eb2340534f90710c81d32b385be158d8b20d96923eaac1fe0f7e8ce958e
hdptm_201117172636_2290_state_12060.npz 52c02cce5f6dead3c57c1bbf849caaa3c25e879905ce713cf0a1adc025182213
hdptm_201117172636_2290_state_12070.npz aac843fc16c7af66aadbb38e589842d37c28369910bad13604615fc4d7ea2c8d
hdptm_201117172636_2290_state_12080.npz 96214e1648a7480c7c635950c978e0109934c5b6390260476f1f02c47bf00249
hdptm_201117172636_2290_state_12090.npz 1814613561f524275a6991f7157c7d17b470d4c6df5801ebd4bb910990f90f17
hdptm_201117172636_2290_state_12100.npz 1fdf6bba764eb3650a530a3b3f8f4378a602463c48edaa741e811ec9f4b40547
hdptm_201117172636_2290_state_12110.npz 55e80b139763c636b9b1df998095867334e1328a35c826fbf19b3cedef921da3
hdptm_201117172636_2290_state_12120.npz 18aac3b341b47d6c0cbf4792ba3eea24189c84ae76bd659e3fa507fc6925a536
hdptm_201117172636_2290_state_12130.npz 29464f260ebd0ee01f0024a3659b958daeabe2ac5971e561e878c6532b3a2713
hdptm_201117172636_2290_state_12140.npz 4a251c321d18b161c9fe159472ccccbe93915a3fdeabd16f3ba72487336bd43a
hdptm_201117172636_2290_state_12150.npz f40711eec89e8074e8446fc504b0b454c0d425dc63add807d541e667e16d1af9
hdptm_201117172636_2290_state_12160.npz 29cb8eee77f9a74e07d3c5d8a2a3d9f074ab70a9564805814531431dc71704f2
hdptm_201117172636_2290_state_12170.npz 45c42a89ea2778aaab542e4125254a84c77d6bd21e0cc521df02c0ee771bd5f2
hdptm_201117172636_2290_state_12180.npz 9cf7d16ffefb92ecd9e5ce39aaac22ff2b71d3877a026c0a0aafcfed3c3a77c3
hdptm_201117172636_2290_state_12190.npz 76ce02b3fdd6883a580edbccbb3604be5ddf65e8fcfd212dc2912229f9800a99
hdptm_201117172636_2290_state_12200.npz d74550abc232d06d1f0bf599875ff666373acb9430c03f0c1cf04611965ed638
hdptm_201117172636_2290_state_12210.npz 62c61f020b996d3ee0e94ceb4be90861d4d75733884cbc16f65d99f2d9a4f671
hdptm_201117172636_2290_state_12220.npz 11fbfb7e29f27d5e14315cfc81082b9c500df2ffbf0a75f36ace77fc612576e4
hdptm_201117172636_2290_state_12230.npz b1fe6e9c3b1ca80e3306533b619bd99b511679a1cf7079c70cb88702595cd0a2
hdptm_201117172636_2290_state_12240.npz 4d9875827548820b7fc2b6489b6ff7b510e3a84f57481ce41ce048db7282982d
hdptm_201117172636_2290_state_12250.npz 8a66908d2d38f67a5ec46ba26ca4177231a6fd552b5eb862a5daea99e9d22cb1
hdptm_201117172636_2290_state_12260.npz 242d0cfa8c666b2bb38fafe1d56bca4a6e74457ed2760cd94a14f455e0e7381d
hdptm_201117172636_2290_state_12270.npz c40d2590ace5c83ca62cd7156a13bfee1dee500320e07a769ea504b7af9c0409
hdptm_201117172636_2290_state_12280.npz 435b2240d78cdbaf58674763756a75f7450d3d5877cecb105653b1f70d52431f
hdptm_201117172636_2290_state_12290.npz 8e5947086206b572de6d36a2b15e218d9d0d1da95b033379bb555b0ae8963491
hdptm_201117172636_2290_state_12300.npz e6b046aa17f9165a7cd7195d77e5cfc71f7b8209b3013a15c0ea77e2dc3b3bd9
hdptm_201117172636_2290_state_12310.npz 5441c7b1a20b03309e5b155aa253830b293c94b6c10b55d5535893e503d8f128
'''.strip().splitlines()
| 73.834821 | 104 | 0.912026 | 1,245 | 16,539 | 11.675502 | 0.345382 | 0.11929 | 0.147358 | 0.182444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.517416 | 0.04704 | 16,539 | 223 | 105 | 74.165919 | 0.40486 | 0.136405 | 0 | 0.014599 | 0 | 0 | 0.984285 | 0.965398 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
da8089058869977e8a1f3875a6acbaed71ba57b4 | 651 | py | Python | src/turret/core/cli.py | VanJanssen/turret | a3b87940aa7ed07df7a60e7633f795c0fec26462 | [
"MIT"
] | 1 | 2018-01-09T12:40:00.000Z | 2018-01-09T12:40:00.000Z | src/turret/core/cli.py | VanJanssen/turret | a3b87940aa7ed07df7a60e7633f795c0fec26462 | [
"MIT"
] | 42 | 2018-01-05T11:45:56.000Z | 2019-03-11T09:41:11.000Z | src/turret/core/cli.py | VanJanssen/turret | a3b87940aa7ed07df7a60e7633f795c0fec26462 | [
"MIT"
] | 1 | 2017-08-29T15:54:28.000Z | 2017-08-29T15:54:28.000Z | # -*- coding: utf-8 -*-
"""Entry point for the command line interface of Turret.
The command line interface is split across multiple files, to increase
modulairity and maintainability. Every component of Turret has its own
subcommand, the CLI for this is in the `cli.py` file of this component. This
file imports the subcommands for those components and adds them to the main
group.
"""
import click
from turret.raw.cli import raw
from turret.scout.cli import scout
@click.group()
@click.version_option(message='Turret %(version)s')
def main():
"""Entry point for the Turret CLI."""
pass
main.add_command(scout)
main.add_command(raw)
| 23.25 | 76 | 0.749616 | 102 | 651 | 4.754902 | 0.519608 | 0.041237 | 0.053608 | 0.065979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001832 | 0.16129 | 651 | 27 | 77 | 24.111111 | 0.886447 | 0.631336 | 0 | 0 | 0 | 0 | 0.078947 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | true | 0.111111 | 0.333333 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 3 |
da8173e404603548727bb332a693728554dd4658 | 3,927 | py | Python | timekeeper/log.py | jmcph4/timekeeper | 1ab850739c7071ebd8a4d1a63795d014bfa9c41b | [
"MIT"
] | null | null | null | timekeeper/log.py | jmcph4/timekeeper | 1ab850739c7071ebd8a4d1a63795d014bfa9c41b | [
"MIT"
] | 5 | 2017-07-19T10:09:32.000Z | 2017-07-30T03:32:56.000Z | timekeeper/log.py | jmcph4/timekeeper | 1ab850739c7071ebd8a4d1a63795d014bfa9c41b | [
"MIT"
] | null | null | null | from datetime import datetime
import sqlite3
from . import slice
class Log(object):
"""
Represents a series of slices, forming a log of how time was spent
"""
DT_FMT = "%Y-%m-%d %H:%M"
_COL_WIDTH = 15
def __init__(self, slices):
self._slices = {}
for s in slices:
self._slices[s.start] = (s, False)
@property
def slices(self):
sl = {}
for k, v in self._slices.items():
sl[k] = v[0]
return sl
def get_slice(self, dt):
"""
Returns the slice at the specified time
"""
return self._slices.get(dt)[0]
def set_slice(self, s, saved=False):
"""
Adds s to the log, overwriting any slice previously at that location
"""
self._slices[s.start] = (s, saved)
def __repr__(self):
s = "Start | End | Category | Description \n"
s += "-----------------+------------------+-----------------+-------------------------------\n"
for k, v in self._slices.items():
start_str = v[0].start.strftime(self.DT_FMT)
end_str = v[0].end.strftime(self.DT_FMT)
if not v[1]:
saved_notice = "(!)"
else:
saved_notice = ""
s += saved_notice + start_str + " | " + end_str + " | " + v[0].category + " " * (self._COL_WIDTH - len(v[0].category)) + " | " + v[0].description + "\n"
return s
def save(self, db_path):
"""
Saves the log to the specified database file by inserting each slice
into the SQL table
"""
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS log (id INTEGER PRIMARY KEY AUTOINCREMENT, start DATETIME, end DATETIME, category VARCHAR, description TEXT)''')
for k, v in self._slices.items():
if not v[1]: # if not saved
start_str = v[0].start.strftime(self.DT_FMT)
end_str = v[0].end.strftime(self.DT_FMT)
data = (start_str, end_str, v[0].category, v[0].description)
c.execute('''INSERT INTO log (start, end, category, description) VALUES (?, ?, ?, ?)''', data)
conn.commit()
v = (v[0], True) # set slice as saved
conn.close()
def load(self, db_path):
"""
Loads a log from the specified database file by inserting each slice
into the log object from the SQL table
"""
conn = sqlite3.connect(db_path)
c = conn.cursor()
c.execute('''SELECT * FROM log''')
data = c.fetchall()
for d in data:
self.set_slice(slice.Slice(datetime.strptime(d[1], self.DT_FMT),
datetime.strptime(d[2], self.DT_FMT),
d[3], d[4]), True)
conn.close()
def __len__(self):
length = 0
for k, v in self._slices.items():
length += len(v[0])
return length
def category_aggregate(self):
"""
Returns a dictionary associating each category in the log with the total
number of minutes attributed to it
"""
categories = {}
for k, v in self._slices.items():
categories[v[0].category] = 0
for k, v in self._slices.items():
categories[v[0].category] += len(v[0])
return categories
def ranged_category_aggregate(self, start, end):
"""
Same as category_aggregate() but only applies to slices within the range
[start, end]
"""
new_slices = []
for k, v in self.slices.items():
if k > start and k < end:
new_slices.append(v)
tmp = Log(new_slices)
return tmp.category_aggregate()
| 29.088889 | 164 | 0.507512 | 484 | 3,927 | 3.995868 | 0.272727 | 0.015512 | 0.018097 | 0.025336 | 0.333506 | 0.315926 | 0.297311 | 0.249741 | 0.212513 | 0.212513 | 0 | 0.011458 | 0.355488 | 3,927 | 134 | 165 | 29.30597 | 0.752667 | 0.152279 | 0 | 0.243243 | 0 | 0.013514 | 0.136219 | 0.028008 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0.040541 | 0 | 0.297297 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da820a998854815eb9a984cf6f47297e37abd1fc | 709 | py | Python | tests/test_utils.py | leugimkm/codeseeker | f8a1f8668807a2b02cbaf5c596d26164ba75e366 | [
"MIT"
] | 1 | 2022-02-02T04:43:32.000Z | 2022-02-02T04:43:32.000Z | tests/test_utils.py | leugimkm/codeseeker | f8a1f8668807a2b02cbaf5c596d26164ba75e366 | [
"MIT"
] | 7 | 2022-02-02T05:25:40.000Z | 2022-03-23T17:16:19.000Z | tests/test_utils.py | leugimkm/codeseeker | f8a1f8668807a2b02cbaf5c596d26164ba75e366 | [
"MIT"
] | null | null | null | import io
import unittest
from unittest.mock import patch
from textwrap import dedent
from codeseeker.utils import show
class TestCodeSeekerUtils(unittest.TestCase):
def test_show(self):
data = [
{"path": "repository/path/to/file.py"},
{"path": "repository/path/to/file2.py"},
]
expected = dedent("""\
repository/path/to/file.py
repository/path/to/file2.py
2 file(s) found(s).\n"""
) # noqa: E124
with patch("sys.stdout", new_callable=io.StringIO) as mock_stdout:
show(data)
self.assertEqual(mock_stdout.getvalue(), expected)
if __name__ == '__main__':
unittest.main()
| 24.448276 | 74 | 0.603667 | 83 | 709 | 5.012048 | 0.518072 | 0.134615 | 0.153846 | 0.096154 | 0.216346 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011628 | 0.272214 | 709 | 28 | 75 | 25.321429 | 0.794574 | 0.014104 | 0 | 0 | 0 | 0 | 0.278336 | 0.15208 | 0 | 0 | 0 | 0 | 0.047619 | 1 | 0.047619 | false | 0 | 0.238095 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da828fe3ebcfe4b60891da48c991e49aa603a4a1 | 3,267 | py | Python | cryptography/rail_fence_cipher/Python/rail_fence_cipher.py | avi-pal/al-go-rithms | 5167a20f1db7b366ff19f2962c1746a02e4f5067 | [
"CC0-1.0"
] | 1,253 | 2017-06-06T07:19:25.000Z | 2022-03-30T17:07:58.000Z | cryptography/rail_fence_cipher/Python/rail_fence_cipher.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 554 | 2017-09-29T18:56:01.000Z | 2022-02-21T15:48:13.000Z | cryptography/rail_fence_cipher/Python/rail_fence_cipher.py | rishabh99-rc/al-go-rithms | 4df20d7ef7598fda4bc89101f9a99aac94cdd794 | [
"CC0-1.0"
] | 2,226 | 2017-09-29T19:59:59.000Z | 2022-03-25T08:59:55.000Z | # used for decryption, take the second element for sorting
def takeSecond(elem):
return elem[1]
def display_rail(lines):
depth = len(lines)
col = len(lines[0])
# depth is the number of rows of the grid
# lines is a tuple where line[i] is the i-th line to print
# col is the number of columns = number of characters of the initial string
for i in range(0,depth):
print( ( ("| %c "*col) + "|") % tuple(lines[i]) )
def encrypt(string,depth):
#make sure that string is a string!
string = str(string)
nChar = len(string)
# create a nested list with 'depth' number of items
# each item has a number of characters = length of the string to cypher
# initialize the list with all spaces:
lines = [ [' ',]*nChar for _ in range(depth)]
encStrings = list()
# encStrings will be a list dynamically filled with the letters of 'string'
# each item of the list will represent a row of the rail.
# this list will then have 'depth' items
encrStrings = ['' for _ in range(depth)]
# Define the sequence in which the rows are filled
if depth == 2:
row_sequence = [0,1]
else:
row_sequence = [i for i in range(0,depth)]
row_sequence.extend(range(depth-2,0,-1) )
# length of the sequence
seqLen = len(row_sequence)
for i in range(0,nChar):
row = row_sequence[i%seqLen] #repeatedly go through the sequence
lines[row][i] = string[i]
encrStrings[row] = encrStrings[row] + string[i]
display_rail(lines)
encrString = ''.join(c for c in encrStrings)
return encrString
def decrypt(encrString,depth):
# from depth and the length of the string we can determine the sequence
# of places in the rails as they were filled
nChar = len(encrString)
if depth == 2:
row_sequence = [1,2]
else:
row_sequence = [i for i in range(0,depth)]
row_sequence.extend(range(depth-2,0,-1) )
# length of the sequence
seqLen = len(row_sequence)
sequence = []
# build a list with the indexes of rows and column according to the sequence
for i in range(0,nChar):
row = row_sequence[i%seqLen] #repeatedly go through the sequence
sequence.append([row,i])
# sort according to rows (so in the order the encrypted string is taken)
sequence.sort()
# now associate the encrypted string to the rail 'coordinates'
for i in range(nChar):
sequence[i].append(encrString[i])
# finally for decryption we rearrange the list items according to columns and read the result
sequence.sort(key=takeSecond)
string = ''.join(c[2] for c in sequence)
return string
# EXAMPLES
# check that len(string)>depth
print("encryptions with depth 2: ")
res = encrypt("rail fence",2)
print("rail fence: " + res)
res = decrypt(res,2)
print("decryption -> " + res)
res = encrypt("Github",2)
print("Github: " + res)
res = decrypt(res,2)
print("decryption -> " + res)
res = encrypt("I am a test!",2)
print("I am a test! -> " + res)
res = decrypt(res,2)
print("decryption -> " + res)
print("encryptions with depth 3: ")
res = encrypt("rail fence",3)
print("rail fence: " + res)
res = decrypt(res,3)
print("decryption -> " + res)
res = encrypt("Github",3)
print("Github: " + res)
res = decrypt(res,3)
print("decryption -> " + res)
res = encrypt("I am a test!",3)
print("I am a test! -> " + res)
res = decrypt(res,3)
print("decryption -> " + res)
| 27.923077 | 94 | 0.685644 | 520 | 3,267 | 4.280769 | 0.236538 | 0.049416 | 0.016173 | 0.02965 | 0.340072 | 0.323001 | 0.30009 | 0.287511 | 0.27044 | 0.237197 | 0 | 0.012864 | 0.191001 | 3,267 | 116 | 95 | 28.163793 | 0.829361 | 0.367309 | 0 | 0.457143 | 0 | 0 | 0.132713 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0 | 0.014286 | 0.1 | 0.214286 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da858baf0350df7bd476677ebd9b47d2ce1cc39c | 2,736 | py | Python | tools/bin/clearAssignment.py | cpausmit/Tapas | 283016cebc036a703609317aafc28675b1c1ea17 | [
"MIT"
] | null | null | null | tools/bin/clearAssignment.py | cpausmit/Tapas | 283016cebc036a703609317aafc28675b1c1ea17 | [
"MIT"
] | null | null | null | tools/bin/clearAssignment.py | cpausmit/Tapas | 283016cebc036a703609317aafc28675b1c1ea17 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#---------------------------------------------------------------------------------------------------
# Clear an existing assignment in the database using the unique task Id.
#
#---------------------------------------------------------------------------------------------------
import sys,os,re
import MySQLdb
import Database
print " UNTESTED -- CAREFUL NEW SUMMARY TABLES -- Assignments etc."
sys.exit(0)
EMPTY_EMAIL = "EMPTY@mit.edu"
#---------------------------------------------------------------------------------------------------
# H E L P E R
#---------------------------------------------------------------------------------------------------
def findAssignment(cursor,semesterId,task):
# find person of an existing assignment
email = 'EMTPY'
results = []
# Prepare SQL query to insert record into the existing table
sql = "select * from Assignments where Term = '" + semesterId + "' and Task = '" + task + "';"
try:
# Execute the SQL command
cursor.execute(sql)
results = cursor.fetchall()
except:
print ' ERROR - select failed: ' + sql
email = 'ERROR'
if len(results) == 1:
email = results[0][1]
return email
#---------------------------------------------------------------------------------------------------
# M A I N
#---------------------------------------------------------------------------------------------------
usage = " usage: clearAssignment.py <taskId> [ <execute = no> ]\n\n"
usage += " taskId identification string for a specific assignment\n"
usage += " execute should we execute the insertion into the database\n"
usage += " activate by setting: execute = exec\n\n"
if len(sys.argv) < 2:
print "\n ERROR - need to specify the taskId.\n"
print usage
sys.exit(0)
# Read command line arguments
taskId = sys.argv[1]
execute = "no"
if len(sys.argv) > 2:
execute = sys.argv[2]
# Figure out which semester we are talking about
semesterId = taskId.split('-')[0]
print " Task : " + taskId
print " Semester: " + semesterId
# Open database connection
db = Database.DatabaseHandle()
# Prepare a cursor object using cursor() method
cursor = db.getCursor()
# Prepare SQL query to insert record into the existing table
sql = "update Assignments" + \
" set Person = '%s' where Term = '%s' and Task = '%s';"%(EMPTY_EMAIL,semesterId,taskId)
try:
# Execute the SQL command
print " MYSQL> " + sql
if execute == "exec":
cursor.execute(sql)
db.commit()
except:
print ' ERROR - update failed: ' + sql
# disconnect from server
db.disco()
# exit
sys.exit()
| 31.813953 | 100 | 0.482822 | 277 | 2,736 | 4.761733 | 0.436823 | 0.018196 | 0.018196 | 0.025777 | 0.133434 | 0.078848 | 0.078848 | 0.078848 | 0.078848 | 0.078848 | 0 | 0.004634 | 0.211257 | 2,736 | 85 | 101 | 32.188235 | 0.60658 | 0.394371 | 0 | 0.170213 | 0 | 0 | 0.375535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.06383 | null | null | 0.170213 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
da85979a8a85aecc4d586a12af1e63303a1db9ca | 146 | py | Python | qna/admin.py | aryaputra28/covidify-PerancanganWeb | 34d6d0017f44248c172fc58e6e1b138e23e68a95 | [
"Unlicense"
] | null | null | null | qna/admin.py | aryaputra28/covidify-PerancanganWeb | 34d6d0017f44248c172fc58e6e1b138e23e68a95 | [
"Unlicense"
] | null | null | null | qna/admin.py | aryaputra28/covidify-PerancanganWeb | 34d6d0017f44248c172fc58e6e1b138e23e68a95 | [
"Unlicense"
] | null | null | null | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(pertanyaan)
admin.site.register(komentar)
| 24.333333 | 32 | 0.808219 | 20 | 146 | 5.9 | 0.6 | 0.152542 | 0.288136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.10274 | 146 | 5 | 33 | 29.2 | 0.900763 | 0.178082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 5 |
da85a9841500fbd6e450901a3ca02828bbbeb03f | 1,443 | py | Python | selfdrive/can/tests/test_packer_chrysler.py | matthewklinko/openpilot | b0563a59684d0901f99abbb58ac1fbd729ded1f9 | [
"MIT"
] | 4 | 2019-02-12T03:06:31.000Z | 2020-07-17T03:54:46.000Z | selfdrive/can/tests/test_packer_chrysler.py | matthewklinko/openpilot | b0563a59684d0901f99abbb58ac1fbd729ded1f9 | [
"MIT"
] | 3 | 2020-09-08T07:21:59.000Z | 2020-09-08T07:22:07.000Z | selfdrive/can/tests/test_packer_chrysler.py | matthewklinko/openpilot | b0563a59684d0901f99abbb58ac1fbd729ded1f9 | [
"MIT"
] | 4 | 2019-05-21T19:02:46.000Z | 2020-03-24T14:27:45.000Z | import unittest
import random
from selfdrive.can.tests.packer_old import CANPacker as CANPackerOld
from selfdrive.can.packer import CANPacker
import selfdrive.car.chrysler.chryslercan as chryslercan
class TestPackerMethods(unittest.TestCase):
def setUp(self):
self.chrysler_cp_old = CANPackerOld("chrysler_pacifica_2017_hybrid")
self.chrysler_cp = CANPacker("chrysler_pacifica_2017_hybrid")
def test_correctness(self):
# Test all commands, randomize the params.
for _ in xrange(1000):
gear = ('drive', 'reverse', 'low')[random.randint(0, 3) % 3]
lkas_active = (random.randint(0, 2) % 2 == 0)
hud_alert = random.randint(0, 6)
hud_count = random.randint(0, 65536)
lkas_car_model = random.randint(0, 65536)
m_old = chryslercan.create_lkas_hud(self.chrysler_cp_old, gear, lkas_active, hud_alert, hud_count, lkas_car_model)
m = chryslercan.create_lkas_hud(self.chrysler_cp, gear, lkas_active, hud_alert, hud_count, lkas_car_model)
self.assertEqual(m_old, m)
apply_steer = (random.randint(0, 2) % 2 == 0)
moving_fast = (random.randint(0, 2) % 2 == 0)
frame = random.randint(0, 65536)
m_old = chryslercan.create_lkas_command(self.chrysler_cp_old, apply_steer, moving_fast, frame)
m = chryslercan.create_lkas_command(self.chrysler_cp, apply_steer, moving_fast, frame)
self.assertEqual(m_old, m)
if __name__ == "__main__":
unittest.main()
| 40.083333 | 120 | 0.726265 | 203 | 1,443 | 4.871921 | 0.315271 | 0.105157 | 0.113246 | 0.051567 | 0.435794 | 0.344793 | 0.293225 | 0.173913 | 0.173913 | 0.084934 | 0 | 0.039037 | 0.165627 | 1,443 | 35 | 121 | 41.228571 | 0.782392 | 0.02772 | 0 | 0.074074 | 0 | 0 | 0.057816 | 0.041399 | 0 | 0 | 0 | 0 | 0.074074 | 1 | 0.074074 | false | 0 | 0.185185 | 0 | 0.296296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da85c358f54be05780771410e2c91e3ce7581a8d | 9,156 | py | Python | kddg/api/layers.py | Kortemme-Lab/kddg | 9fc09172abbefd4fef49261687c60a9bd9b6b29b | [
"MIT"
] | 2 | 2016-06-14T00:32:02.000Z | 2020-05-04T03:29:46.000Z | kddg/api/layers.py | Kortemme-Lab/kddg | 9fc09172abbefd4fef49261687c60a9bd9b6b29b | [
"MIT"
] | null | null | null | kddg/api/layers.py | Kortemme-Lab/kddg | 9fc09172abbefd4fef49261687c60a9bd9b6b29b | [
"MIT"
] | null | null | null | #!/usr/bin/python2.4
# encoding: utf-8
"""
api_layers.py
The definition of the layers of the database API and the generic user interface class.
Created by Shane O'Connor 2015.
Copyright (c) 2015 __UCSF__. All rights reserved.
"""
import inspect
import functools
from klab import colortext
from kddg.api import settings
sys_settings = settings.load()
### API function decorators. These are used to group functions together when printing the help text.
functional_layer = {
0 : 'API warnings',
1 : 'Information layer',
2 : 'Prediction layer',
3 : 'Results layer',
4 : 'Analysis layer',
5 : 'Application layer',
6 : 'Consistency layer',
7 : 'Data entry layer',
None: 'Miscellanous'
}
def alien(func):
func._helptype = 'Alien functions (these should be moved into another package)'
func._layer = 0
func._layer_order = 0
return func
def brokenfn(func):
func._helptype = 'Broken functions: this need to be fixed/updated'
func._layer = 0
func._layer_order = 1
return func
def deprecated(func):
func._helptype = 'Deprecated functions. These should be removed but exist for now to print errors upon use'
func._layer = 0
func._layer_order = 2
return func
def informational_misc(func):
func._helptype = 'Miscellaneous information API'
func._layer = 1
func._layer_order = 0
return func
def informational_file(func):
func._helptype = 'File information API'
func._layer = 1
func._layer_order = 1
return func
def informational_pdb(func):
func._helptype = 'Structure information API'
func._layer = 1
func._layer_order = 2
return func
def informational_complex(func):
func._helptype = 'Complex information API'
func._layer = 1
func._layer_order = 3
return func
def informational_job(func):
func._helptype = 'Prediction information API'
func._layer = 1
func._layer_order = 4
return func
def job_creator(func):
func._helptype = 'Job creation API'
func._layer = 2
func._layer_order = 0
return func
def job_input(func):
func._helptype = 'Input file generation API'
func._layer = 2
func._layer_order = 1
return func
def job_execution(func):
func._helptype = 'Job execution API'
func._layer = 2
func._layer_order = 2
return func
def job_completion(func):
func._helptype = 'Job completion API'
func._layer = 2
func._layer_order = 3
return func
def job_results(func):
func._helptype = 'Results API'
func._layer = 3
func._layer_order = 0
return func
def analysis_api(func):
func._helptype = 'Analysis API'
func._layer = 4
func._layer_order = 0
return func
def app_pymol(func):
func._helptype = 'PyMOL API'
func._layer = 5
func._layer_order = 0
return func
def sanity_check(func):
func._helptype = 'Data consistency /sanity checks'
func._layer = 6
func._layer_order = 0
return func
def general_data_entry(func):
func._helptype = 'Data entry'
func._layer = 7
func._layer_order = 0
return func
def ppi_data_entry(func):
func._helptype = 'PPI Data entry'
func._layer = 7
func._layer_order = 1
return func
class GenericUserInterface(object):
'''This is the class that should be used to interface with the database. It hides functions that should only be called
within this other API functions.
The class contains a private copy of the internal API and wraps the public functions of that API so that the
functions of GenericUserInterface contain only the public functions of the internal API. Private functions
are denoted as such by a leading underscore in the function name.
'''
@staticmethod
def generate(cls, passwd = None, username = sys_settings.database.username, hostname = sys_settings.database.hostname, rosetta_scripts_path = None, rosetta_database_path = None, port = sys_settings.database.port, file_content_buffer_size = None):
return GenericUserInterface(cls, passwd = passwd, username = username, hostname = hostname, rosetta_scripts_path = rosetta_scripts_path, rosetta_database_path = rosetta_database_path, port = port, file_content_buffer_size = file_content_buffer_size)
@staticmethod
def bind_object_function(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs): return fn(*args, **kwargs)
return wrapper
def __init__(self, cls, passwd = None, username = sys_settings.database.username, hostname = sys_settings.database.hostname, rosetta_scripts_path = None, rosetta_database_path = None, port = sys_settings.database.port, file_content_buffer_size = None):
self._ddg_interface = cls(passwd = passwd, username = username, hostname = hostname, rosetta_scripts_path = rosetta_scripts_path, rosetta_database_path = rosetta_database_path, port = port, file_content_buffer_size = file_content_buffer_size)
self._api_functions = []
self._api_function_args = {}
self.DDG_db = self._ddg_interface.DDG_db
self.DDG_db_utf = self._ddg_interface.DDG_db_utf
self.cls = cls
for m in inspect.getmembers(cls, predicate=inspect.ismethod):
if m[0][0] != '_':
fn_name = m[0]
fn_ref = getattr(self._ddg_interface, fn_name)
self._api_function_args[fn_name] = fn_ref.func_code.co_varnames[:fn_ref.func_code.co_argcount]
self._api_functions.append(fn_name)
self.__dict__[fn_name] = GenericUserInterface.bind_object_function(getattr(self._ddg_interface, fn_name))
def help(self, show_deprecated_functions = False):
print(self.get_help(show_deprecated_functions = show_deprecated_functions))
def get_help(self, show_deprecated_functions = False):
helpstr = []
title = ' %s API ' % self._ddg_interface.__class__.__name__
l = len(title)
helpstr.append(colortext.mcyan('\n' + ('*' * (l + 10)) + '\n' + ('*' * 5) + title + ('*' * 5) + '\n' + ('*' * (l + 10)) + '\n'))
doc_strings = {}
for fn_name in sorted(self._api_functions):
fn = self.__dict__[fn_name]
function_layer, function_layer_order, function_class = None, None, None
try:
function_layer = fn._layer
assert(function_layer in functional_layer)
function_layer_order = fn._layer_order
except:
function_layer = None
function_layer_order = 0
try:
function_class = fn._helptype
except:
function_class = 'Miscellanous'
if function_class.startswith('Deprecated functions') and not show_deprecated_functions:
continue
doc_strings[function_layer] = doc_strings.get(function_layer, {})
doc_strings[function_layer][function_layer_order] = doc_strings[function_layer].get(function_layer_order, {})
doc_strings[function_layer][function_layer_order][function_class] = doc_strings[function_layer][function_layer_order].get(function_class, {})
doc_strings[function_layer][function_layer_order][function_class][fn_name] = self._get_fn_docstring(fn, fn_name)
for function_layer, function_layer_components in sorted(doc_strings.iteritems()):
function_layer_name = functional_layer[function_layer]
prefix = ''
if function_layer != None:
prefix = 'Layer %d: ' % function_layer
helpstr.append(colortext.mcyan('-------- %s%s --------\n' % (prefix, function_layer_name)))
for function_layer_order, function_classes in sorted(function_layer_components.iteritems()):
for function_class, fn_names in sorted(function_classes.iteritems()):
helpstr.append(colortext.mlightpurple(' %s\n' % function_class))
for fn_name, docstr in sorted(fn_names.iteritems()):
helpstr.append(colortext.mgreen(' %s(%s)' % (fn_name, ', '.join(self._api_function_args[fn_name]))))
if docstr:
helpstr.append(colortext.myellow(' %s' % ('\n '.join([s.strip() for s in docstr.split('\n') if s.strip()]))))
else:
helpstr.append(colortext.mred(' <not documented>'))
helpstr.append('')
return '\n'.join(helpstr)
def _get_fn_docstring(self, fn, fn_name, default_name = ''):
'''Returns the docstring for a function, winding up the inheritance tree until we find a non-empty docstring.
If no docstring is found, default_name is returned.'''
if fn.__doc__:
return fn.__doc__
# Wind up the hierarchy until we find the class where this function was last defined
for parent in self.cls.__mro__[1:]:
overridden = getattr(parent, fn_name, None)
if overridden and overridden.__doc__:
return overridden.__doc__
return default_name
| 34.292135 | 257 | 0.662844 | 1,152 | 9,156 | 4.978299 | 0.212674 | 0.056495 | 0.050218 | 0.020924 | 0.356582 | 0.33531 | 0.296774 | 0.207149 | 0.150654 | 0.122406 | 0 | 0.009556 | 0.245631 | 9,156 | 266 | 258 | 34.421053 | 0.820762 | 0.108126 | 0 | 0.302198 | 0 | 0 | 0.093132 | 0 | 0 | 0 | 0 | 0 | 0.005495 | 1 | 0.137363 | false | 0.021978 | 0.021978 | 0.010989 | 0.296703 | 0.010989 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da8712cd2ff361045352f744da703fa2ec6f82df | 3,142 | py | Python | fb_api.py | wing3s/shop_bot | 4c6a34538ac8de9999edae190f6269bc6a63c2cf | [
"BSD-3-Clause"
] | 1 | 2016-04-11T01:18:53.000Z | 2016-04-11T01:18:53.000Z | fb_api.py | wing3s/shop_bot | 4c6a34538ac8de9999edae190f6269bc6a63c2cf | [
"BSD-3-Clause"
] | null | null | null | fb_api.py | wing3s/shop_bot | 4c6a34538ac8de9999edae190f6269bc6a63c2cf | [
"BSD-3-Clause"
] | null | null | null | import os
import requests
import time
import ConfigParser
import logging
import logging.config
from requests.exceptions import RequestException
from helper import get_logger, base_path
config = ConfigParser.ConfigParser()
config.read(os.path.join(base_path, 'config.ini'))
logger = get_logger('fb_api', __file__)
__author__ = "Wen-Hao Lee"
__email__ = "wing3s@gmail.com"
__copyright__ = "Copyright 2014, Numnum"
class FBBot(object):
graph_url = "https://graph.facebook.com"
cooldown = 120 # sec
search_radius = 500 # m
def search_restaurant(self, lat, lon):
restaurants = self._search_place('restaurant', lat, lon)
steakhouses = self._search_place('steakhouse', lat, lon)
bars = self._search_place('bar', lat, lon)
return restaurants + steakhouses + bars
def _search_place(self, query, lat, lon):
params = {
'q': query,
'type': 'place',
'center': '%s,%s' % (lat, lon),
'distance': self.search_radius,
'limit': 500,
'offset': 0
}
return self.search(params)
def search(self, params):
params['access_token'] = "{app_id}|{app_key}".format(
app_key=config.get('fbAPI', 'key'),
app_id=config.get('fbAPI', 'id'))
try:
r = requests.get(
"%s/%s" % (self.graph_url, 'search'),
params=params)
resp = r.json()
if r.status_code != 200:
resp_err = resp.get('error')
err_code = resp_err.get('code')
if err_code == 4:
logger.warning(
'Reach limit, cooldown %ds' % self.cooldown)
time.sleep(self.cooldown)
return self.search(params)
else:
logger.error(resp)
return None
return resp['data']
except RequestException as err:
logger.error(err)
def fetch(self, fbid):
try:
r = requests.get("%s/%s" % (self.graph_url, fbid))
resp = r.json()
if r.status_code != 200:
resp_err = resp.get('error')
err_code = resp_err.get('code')
if err_code == 4:
logger.warning(
'Reach limit, cooldown %ds' % self.cooldown)
time.sleep(self.cooldown)
return self.fetch(fbid)
elif err_code == 21:
err_msg = resp_err.get('message')
new_fbid_pt = 'page ID'
new_fbid = err_msg[
err_msg.index(new_fbid_pt)+len(new_fbid_pt)+1:
err_msg.index('.')]
logger.warning(
'Get new fbid %s for %s' % (new_fbid, fbid))
return self.fetch(new_fbid)
else:
logger.error([resp, r.url])
return None
return resp
except RequestException as err:
logger.error(err) | 34.152174 | 70 | 0.510185 | 341 | 3,142 | 4.510264 | 0.316716 | 0.03186 | 0.029259 | 0.028609 | 0.287386 | 0.287386 | 0.287386 | 0.23407 | 0.23407 | 0.196359 | 0 | 0.013306 | 0.378103 | 3,142 | 92 | 71 | 34.152174 | 0.773797 | 0.001591 | 0 | 0.349398 | 0 | 0 | 0.103349 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.048193 | false | 0 | 0.096386 | 0 | 0.301205 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da89128d24114037df1325dfa4587c3b0ac3e279 | 6,409 | py | Python | examples/formula_library.py | bherbruck/plend | 55271d79c983cc3b3307661833c5a7dcc11efc32 | [
"MIT"
] | 5 | 2020-02-21T09:22:58.000Z | 2021-09-07T16:39:47.000Z | examples/formula_library.py | bherbruck/plend | 55271d79c983cc3b3307661833c5a7dcc11efc32 | [
"MIT"
] | null | null | null | examples/formula_library.py | bherbruck/plend | 55271d79c983cc3b3307661833c5a7dcc11efc32 | [
"MIT"
] | 1 | 2022-01-26T20:00:47.000Z | 2022-01-26T20:00:47.000Z | """
This example shows how to statically define formulas,
add them to a formula library, optimize them, and
output the results.
"Statically in this context means we are manually
setting the attribures (min and max) for each
ingredient and nutrient rather defining them
dynamically (which is where plend really shines)
TODO: make an example with dynamic formulas
"""
from plend import Nutrient, Ingredient, Formula, FormulaLibrary
from plend.presets.poultry import *
# initialize the starter formula
starter = Formula(name='Starter', code='B1', batch_size=100)
# add ingredients to starter from presets
starter.add_ingredient(corn)
starter.add_ingredient(soybean_meal)
starter.add_ingredient(oil, maximum=10)
# add nutrients to grower from presets
starter.add_ingredient(limestone)
starter.add_ingredient(meat_meal, maximum=10)
starter.add_nutrient(energy, minimum=3010)
starter.add_nutrient(protein, minimum=24)
starter.add_nutrient(fiber)
starter.add_nutrient(calcium, minimum=1)
# initialize the grower formula
grower = Formula(name='Grower', code='B2', batch_size=100)
# add ingredients to grower from presets
grower.add_ingredient(corn)
grower.add_ingredient(soybean_meal)
grower.add_ingredient(oil, maximum=10)
# add nutrients to grower from presets
grower.add_ingredient(limestone)
grower.add_ingredient(meat_meal, maximum=10)
grower.add_nutrient(energy, minimum=3175)
grower.add_nutrient(protein, minimum=22)
grower.add_nutrient(fiber)
grower.add_nutrient(calcium, minimum=0.9)
# initialize the finisher formula
finisher = Formula(name='Finisher', code='B3', batch_size=100)
# add ingredients to finisher from presets
finisher.add_ingredient(corn)
finisher.add_ingredient(soybean_meal)
finisher.add_ingredient(oil, maximum=10)
finisher.add_ingredient(limestone)
finisher.add_ingredient(meat_meal, maximum=10)
# add nutrients to finisher from presets
finisher.add_nutrient(energy, minimum=3225)
finisher.add_nutrient(protein, minimum=20)
finisher.add_nutrient(fiber)
finisher.add_nutrient(calcium, minimum=0.85)
formulas = FormulaLibrary(name='Broiler')
formulas.add_formulas(starter, grower, finisher)
formulas.optimize()
print(formulas.to_csv())
formulas.save_csv('examples/formulas.csv')
"""
this will have the output (this output has been aligned for readability):
library_name ,formula_name ,formula_code ,formula_cost ,formula_status ,item_type ,item_name ,item_code ,item_amount ,item_minimum ,item_maximum
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,ingredient ,Corn , ,58.587658 ,0 ,
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,ingredient ,Soybean Meal , ,30.429012 ,0 ,
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,ingredient ,Oil , ,0.63258515 ,0 ,10
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,ingredient ,Limestone , ,0.35074529 ,0 ,
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,ingredient ,Meat Meal , ,10.0 ,0 ,10
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,nutrient ,Energy , ,3010.0000132 ,3010 ,
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,nutrient ,Protein , ,24.000000110000002 ,24 ,
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,nutrient ,Fiber , ,2.37756181 ,0 ,
Broiler ,Starter ,B1 ,68.312016841 ,Optimal ,nutrient ,Calcium , ,1.0 ,1 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,ingredient ,Corn , ,61.16353 ,0 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,ingredient ,Soybean Meal , ,25.859865 ,0 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,ingredient ,Oil , ,2.8656471 ,0 ,10
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,ingredient ,Limestone , ,0.11095768 ,0 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,ingredient ,Meat Meal , ,10.0 ,0 ,10
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,nutrient ,Energy , ,3174.9999923 ,3175 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,nutrient ,Protein , ,21.999999950000003 ,22 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,nutrient ,Fiber , ,2.3048842 ,0 ,
Broiler ,Grower ,B2 ,68.284483722 ,Optimal ,nutrient ,Calcium , ,0.9000000014 ,0.9 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,ingredient ,Corn , ,66.023255 ,0 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,ingredient ,Soybean Meal , ,20.933866 ,0 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,ingredient ,Oil , ,3.038852 ,0 ,10
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,ingredient ,Limestone , ,0.0040261626 ,0 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,ingredient ,Meat Meal , ,10.0 ,0 ,10
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,nutrient ,Energy , ,3224.9999740000003 ,3225 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,nutrient ,Protein , ,19.999999805 ,20 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,nutrient ,Fiber , ,2.278597355 ,0 ,
Broiler ,Finisher ,B3 ,66.00538196504 ,Optimal ,nutrient ,Calcium , ,0.849999999288 ,0.85 ,
""" | 60.462264 | 157 | 0.568575 | 656 | 6,409 | 5.478659 | 0.217988 | 0.054257 | 0.040067 | 0.045075 | 0.497496 | 0.458542 | 0.393434 | 0.217585 | 0.062883 | 0.031163 | 0 | 0.164482 | 0.345452 | 6,409 | 106 | 158 | 60.462264 | 0.692253 | 0.107037 | 0 | 0 | 0 | 0 | 0.036424 | 0.013907 | 0 | 0 | 0 | 0.009434 | 0 | 1 | 0 | false | 0 | 0.054054 | 0 | 0.054054 | 0.027027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da89cac67e3dd9455f993529126f6ea3e387def3 | 1,278 | py | Python | sstmap/scripts/dtr_to_netcdf.py | ssabrii/SSTMap | f4f3fb72ed632f00b9f519ae9eab4a41b6c69db9 | [
"MIT"
] | 23 | 2017-12-12T17:59:26.000Z | 2022-02-01T20:19:56.000Z | sstmap/scripts/dtr_to_netcdf.py | ssabrii/SSTMap | f4f3fb72ed632f00b9f519ae9eab4a41b6c69db9 | [
"MIT"
] | 45 | 2017-05-03T14:05:19.000Z | 2022-03-02T07:28:39.000Z | sstmap/scripts/dtr_to_netcdf.py | ssabrii/SSTMap | f4f3fb72ed632f00b9f519ae9eab4a41b6c69db9 | [
"MIT"
] | 24 | 2017-04-28T19:49:56.000Z | 2021-11-05T17:57:02.000Z | from argparse import ArgumentParser
import mdtraj as md
def parse_args():
"""Parse the command line arguments and perform some validation on the
arguments
Returns
-------
args : argparse.Namespace
The namespace containing the arguments
"""
parser = ArgumentParser(
description='''Run GIST calculations through command-line.''')
parser.add_argument('-i', '--input_parm', required=False, type=str,
help='''Input toplogy File.''')
parser.add_argument('-t', '--input_traj', required=True, type=str,
help='''Input trajectory file.''')
parser.add_argument('-o', '--output_prefix', required=False, type=str,
help='''Prefix for all the results files.''')
args = parser.parse_args()
return args
def main():
args = parse_args()
print("Reading in trajectory ...")
traj = md.load_dtr(args.input_traj, top=args.input_parm)
print(traj)
print("Outputting NETCDF ...")
traj.save_netcdf(args.output_prefix + "_converted.nc")
print("Outputting PDB file of frame 1 ...")
traj[0].save_pdb(args.output_prefix + "_converted.pdb")
print("Done")
def entry_point():
main()
if __name__ == '__main__':
entry_point()
| 29.045455 | 74 | 0.628326 | 152 | 1,278 | 5.098684 | 0.486842 | 0.034839 | 0.065806 | 0.051613 | 0.061935 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002041 | 0.233177 | 1,278 | 43 | 75 | 29.72093 | 0.788776 | 0.126761 | 0 | 0 | 0 | 0 | 0.258986 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.074074 | 0 | 0.222222 | 0.185185 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da8aea2d0e2cc438f1e2afe2e6c0015770e13ef8 | 6,106 | py | Python | tpDcc/abstract/scene.py | tpDcc/tpDccLib | a4f77a3fdd981eac494331e429c92bd3e4a87d3b | [
"MIT"
] | 6 | 2021-03-02T00:31:53.000Z | 2021-03-30T09:02:54.000Z | tpDcc/abstract/scene.py | tpDcc/tpDccLib | a4f77a3fdd981eac494331e429c92bd3e4a87d3b | [
"MIT"
] | 1 | 2021-03-02T08:43:34.000Z | 2021-03-04T01:36:02.000Z | tpDcc/abstract/scene.py | tpDcc/tpDccLib | a4f77a3fdd981eac494331e429c92bd3e4a87d3b | [
"MIT"
] | 1 | 2021-03-03T21:01:51.000Z | 2021-03-03T21:01:51.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains DCC scene abstract class implementation
"""
from __future__ import print_function, division, absolute_import
from tpDcc import dcc
from tpDcc.dcc import sceneobject
from tpDcc.libs.python import python, decorators
class AbstractScene(object):
# ==============================================================================================
# ABSTRACT FUNCTIONS
# ==============================================================================================
@decorators.abstractmethod
def _dcc_objects(self, from_selection=False, wildcard='', object_type=None):
"""
Internal function that returns DCC objects from current scene
:param from_selection: bool, Whether to return only selected DCC objects or all objects in the scene
:param wildcard: str, filter objects by its name
:param object_type: int
:return: list(variant)
"""
raise NotImplementedError('Abstract Scene _dcc_objects function not implemented!')
@decorators.abstractmethod
def _rename_dcc_objects(self, dcc_native_objects, names, display=True):
"""
Rename given DCC objects with the given new names
:param dcc_native_objects: variant or list(variant)
:param names: list(str)
:param display: bool, Whether or not we want to rename internal dcc name or display name
:return: bool, True if the operation is successful; False otherwise
"""
raise NotImplementedError('Abstract Scene _remove_dcc_objects function not implemented!')
# ==============================================================================================
# BASE
# ==============================================================================================
def objects(self, wildcard='', object_type=None):
"""
Returns a list of scene objects as SceneObjects
:param wildcard: str, filter objects by its name
:param object_type:
:return: list(SceneObject)
"""
return [sceneobject.SceneObject(self, obj) for obj in self._dcc_objects(
from_selection=False, wildcard=wildcard, object_type=object_type)]
def selected_objects(self, wildcard='', object_type=None):
"""
Returns a list of selected objects in current scene as SceneObjects
:param wildcard: str, filter objects by its name
:param object_type: int
:return: list(SceneObject)
"""
return [sceneobject.SceneObject(self, obj) for obj in self._dcc_objects(
from_selection=True, wildcard=wildcard, object_type=object_type)]
def root_object(self):
"""
Returns the DCC root object of the scene as SceneObject
:return: SceneObject or None
"""
dcc_root = self._dcc_root_object()
if not dcc_root:
return None
return dcc.SceneObject(self, dcc_root)
def remove_objects(self, objects):
"""
Removes the given objects from the this scene
:param objects: list(SceneObject)
:return: bool, True if the operation was successful; False otherwise
"""
objects = python.force_list(objects)
return self._remove_dcc_objects([obj.dcc_native_object() for obj in objects if not obj.is_deleted()])
def rename_objects(self, objects, names, display=True):
"""
Rename given objects with the given new names
:param objects: SceneObject or list(SceneObject)
:param names: list(str)
:param display: bool, Whether or not we want to rename internal dcc name or display name
:return: bool, True if the operation is successful; False otherwise
"""
objects = python.force_list(objects)
names = python.force_list(names)
if len(objects) != len(names):
return False
return self._rename_dcc_objects(
[obj.dcc_native_object() for obj in objects if not obj.is_deleted()], names, display=display)
def find_object_by_name(self, name):
"""
Looks for an individual node for its name
:param name: str, name of the object to find
:return: SceneObject or None
"""
dcc_object = self._find_dcc_object_by_name(name)
if not dcc_object:
return None
return sceneobject.SceneObject(self, dcc_object)
def find_object_by_id(self, unique_id):
"""
Looks for an individual node for its name
:param unique_id: unique identifier of the object to find in current scene
:return: SceneObject or None
"""
dcc_object = self._find_dcc_object_by_id(unique_id)
if not dcc_object:
return None
return sceneobject.SceneObject(self, dcc_object)
# ==============================================================================================
# INTERNAL
# ==============================================================================================
def _dcc_root_object(self):
"""
Internal function that returns DCC root object from current scene
:return: variant
"""
return dcc.root_node()
def _remove_dcc_objects(self, dcc_native_objects):
"""
Internal function that removes given DCC objects from current scene
:param dcc_native_objects: variant or list(variant)
:return: bool, True if the operation is successful; False otherwise
"""
return dcc.delete_node(dcc_native_objects)
def _find_dcc_object_by_name(self, name):
"""
Internal function that returns a valid a DCC object by its name
:param name: str
:return: variant
"""
return dcc.find_node_by_name(name)
def _find_dcc_object_by_id(self, unique_id):
"""
Internal function that returns a valid DCC object its ID
:param unique_id: str
:return: variant
"""
return dcc.find_node_by_id(unique_id)
| 34.891429 | 109 | 0.589912 | 691 | 6,106 | 5.037627 | 0.150507 | 0.037346 | 0.020684 | 0.031026 | 0.632577 | 0.555013 | 0.464234 | 0.424878 | 0.357656 | 0.335248 | 0 | 0.000218 | 0.250409 | 6,106 | 174 | 110 | 35.091954 | 0.760323 | 0.458238 | 0 | 0.26 | 0 | 0 | 0.041316 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.26 | false | 0 | 0.08 | 0 | 0.66 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 2 |
da8e614ab7b081bdccebe5c0752328dc5769b689 | 11,303 | py | Python | fpga/lib/pcie/tb/test_dma_client_axis_sink_512_64.py | totuwei/corundum | e983ad519fb4523d0ffca32f5e436195bcfc945c | [
"BSD-2-Clause-FreeBSD"
] | 544 | 2019-08-12T03:45:32.000Z | 2022-03-19T14:17:20.000Z | fpga/lib/pcie/tb/test_dma_client_axis_sink_512_64.py | akira2009999/corundum | cdc14769c33186c6d45fcd79b95c70889febff2b | [
"BSD-2-Clause-FreeBSD"
] | 78 | 2020-08-20T20:06:33.000Z | 2022-03-30T23:44:37.000Z | fpga/lib/pcie/tb/test_dma_client_axis_sink_512_64.py | akira2009999/corundum | cdc14769c33186c6d45fcd79b95c70889febff2b | [
"BSD-2-Clause-FreeBSD"
] | 142 | 2019-07-15T04:23:23.000Z | 2022-03-29T01:25:33.000Z | #!/usr/bin/env python
"""
Copyright (c) 2019 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import dma_ram
import axis_ep
module = 'dma_client_axis_sink'
testbench = 'test_%s_512_64' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Parameters
SEG_COUNT = 4
SEG_DATA_WIDTH = 128
SEG_ADDR_WIDTH = 12
SEG_BE_WIDTH = int(SEG_DATA_WIDTH/8)
RAM_ADDR_WIDTH = SEG_ADDR_WIDTH+(SEG_COUNT-1).bit_length()+(SEG_BE_WIDTH-1).bit_length()
AXIS_DATA_WIDTH = 64
AXIS_KEEP_ENABLE = (AXIS_DATA_WIDTH>8)
AXIS_KEEP_WIDTH = (AXIS_DATA_WIDTH/8)
AXIS_LAST_ENABLE = 1
AXIS_ID_ENABLE = 0
AXIS_ID_WIDTH = 8
AXIS_DEST_ENABLE = 0
AXIS_DEST_WIDTH = 8
AXIS_USER_ENABLE = 1
AXIS_USER_WIDTH = 1
LEN_WIDTH = 20
TAG_WIDTH = 8
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_axis_write_desc_ram_addr = Signal(intbv(0)[RAM_ADDR_WIDTH:])
s_axis_write_desc_len = Signal(intbv(0)[LEN_WIDTH:])
s_axis_write_desc_tag = Signal(intbv(0)[TAG_WIDTH:])
s_axis_write_desc_valid = Signal(bool(0))
s_axis_write_data_tdata = Signal(intbv(0)[AXIS_DATA_WIDTH:])
s_axis_write_data_tkeep = Signal(intbv(0)[AXIS_KEEP_WIDTH:])
s_axis_write_data_tvalid = Signal(bool(0))
s_axis_write_data_tlast = Signal(bool(0))
s_axis_write_data_tid = Signal(intbv(0)[AXIS_ID_WIDTH:])
s_axis_write_data_tdest = Signal(intbv(0)[AXIS_DEST_WIDTH:])
s_axis_write_data_tuser = Signal(intbv(0)[AXIS_USER_WIDTH:])
ram_wr_cmd_ready = Signal(intbv(0)[SEG_COUNT:])
enable = Signal(bool(0))
abort = Signal(bool(0))
# Outputs
s_axis_write_desc_ready = Signal(bool(0))
m_axis_write_desc_status_len = Signal(intbv(0)[LEN_WIDTH:])
m_axis_write_desc_status_tag = Signal(intbv(0)[TAG_WIDTH:])
m_axis_write_desc_status_id = Signal(intbv(0)[AXIS_ID_WIDTH:])
m_axis_write_desc_status_dest = Signal(intbv(0)[AXIS_DEST_WIDTH:])
m_axis_write_desc_status_user = Signal(intbv(0)[AXIS_USER_WIDTH:])
m_axis_write_desc_status_valid = Signal(bool(0))
s_axis_write_data_tready = Signal(bool(0))
ram_wr_cmd_be = Signal(intbv(0)[SEG_COUNT*SEG_BE_WIDTH:])
ram_wr_cmd_addr = Signal(intbv(0)[SEG_COUNT*SEG_ADDR_WIDTH:])
ram_wr_cmd_data = Signal(intbv(0)[SEG_COUNT*SEG_DATA_WIDTH:])
ram_wr_cmd_valid = Signal(intbv(0)[SEG_COUNT:])
# PCIe DMA RAM
dma_ram_inst = dma_ram.PSDPRam(2**16)
dma_ram_pause = Signal(bool(0))
dma_ram_port0 = dma_ram_inst.create_write_ports(
clk,
ram_wr_cmd_be=ram_wr_cmd_be,
ram_wr_cmd_addr=ram_wr_cmd_addr,
ram_wr_cmd_data=ram_wr_cmd_data,
ram_wr_cmd_valid=ram_wr_cmd_valid,
ram_wr_cmd_ready=ram_wr_cmd_ready,
pause=dma_ram_pause,
name='port0'
)
# sources and sinks
write_desc_source = axis_ep.AXIStreamSource()
write_desc_source_pause = Signal(bool(False))
write_desc_source_logic = write_desc_source.create_logic(
clk,
rst,
tdata=(s_axis_write_desc_ram_addr, s_axis_write_desc_len, s_axis_write_desc_tag),
tvalid=s_axis_write_desc_valid,
tready=s_axis_write_desc_ready,
pause=write_desc_source_pause,
name='write_desc_source'
)
write_desc_status_sink = axis_ep.AXIStreamSink()
write_desc_status_sink_logic = write_desc_status_sink.create_logic(
clk,
rst,
tdata=(m_axis_write_desc_status_len, m_axis_write_desc_status_tag, m_axis_write_desc_status_id, m_axis_write_desc_status_dest, m_axis_write_desc_status_user),
tvalid=m_axis_write_desc_status_valid,
name='write_desc_status_sink'
)
write_data_source = axis_ep.AXIStreamSource()
write_data_source_pause = Signal(bool(False))
write_data_source_logic = write_data_source.create_logic(
clk,
rst,
tdata=s_axis_write_data_tdata,
tkeep=s_axis_write_data_tkeep,
tvalid=s_axis_write_data_tvalid,
tready=s_axis_write_data_tready,
tlast=s_axis_write_data_tlast,
tid=s_axis_write_data_tid,
tdest=s_axis_write_data_tdest,
tuser=s_axis_write_data_tuser,
pause=write_data_source_pause,
name='write_data_source'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_axis_write_desc_ram_addr=s_axis_write_desc_ram_addr,
s_axis_write_desc_len=s_axis_write_desc_len,
s_axis_write_desc_tag=s_axis_write_desc_tag,
s_axis_write_desc_valid=s_axis_write_desc_valid,
s_axis_write_desc_ready=s_axis_write_desc_ready,
m_axis_write_desc_status_len=m_axis_write_desc_status_len,
m_axis_write_desc_status_tag=m_axis_write_desc_status_tag,
m_axis_write_desc_status_id=m_axis_write_desc_status_id,
m_axis_write_desc_status_dest=m_axis_write_desc_status_dest,
m_axis_write_desc_status_user=m_axis_write_desc_status_user,
m_axis_write_desc_status_valid=m_axis_write_desc_status_valid,
s_axis_write_data_tdata=s_axis_write_data_tdata,
s_axis_write_data_tkeep=s_axis_write_data_tkeep,
s_axis_write_data_tvalid=s_axis_write_data_tvalid,
s_axis_write_data_tready=s_axis_write_data_tready,
s_axis_write_data_tlast=s_axis_write_data_tlast,
s_axis_write_data_tid=s_axis_write_data_tid,
s_axis_write_data_tdest=s_axis_write_data_tdest,
s_axis_write_data_tuser=s_axis_write_data_tuser,
ram_wr_cmd_be=ram_wr_cmd_be,
ram_wr_cmd_addr=ram_wr_cmd_addr,
ram_wr_cmd_data=ram_wr_cmd_data,
ram_wr_cmd_valid=ram_wr_cmd_valid,
ram_wr_cmd_ready=ram_wr_cmd_ready,
enable=enable,
abort=abort
)
@always(delay(4))
def clkgen():
clk.next = not clk
def wait_normal():
while write_desc_status_sink.empty():
yield clk.posedge
def wait_pause_ram():
while write_desc_status_sink.empty():
dma_ram_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
dma_ram_pause.next = False
yield clk.posedge
def wait_pause_source():
while write_desc_status_sink.empty():
write_data_source_pause.next = True
yield clk.posedge
yield clk.posedge
yield clk.posedge
write_data_source_pause.next = False
yield clk.posedge
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
# testbench stimulus
cur_tag = 1
enable.next = 1
yield clk.posedge
print("test 1: write")
current_test.next = 1
addr = 0x00000000
test_data = b'\x11\x22\x33\x44'
write_desc_source.send([(addr, len(test_data), cur_tag)])
write_data_source.send(axis_ep.AXIStreamFrame(test_data, id=cur_tag))
yield write_desc_status_sink.wait(2000)
status = write_desc_status_sink.recv()
print(status)
assert status.data[0][0] == len(test_data)
assert status.data[0][1] == cur_tag
assert status.data[0][2] == cur_tag
data = dma_ram_inst.read_mem(addr, 32)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
assert dma_ram_inst.read_mem(addr, len(test_data)) == test_data
cur_tag = (cur_tag + 1) % 256
yield delay(100)
yield clk.posedge
print("test 2: various writes")
current_test.next = 2
for length in list(range(1,66))+[128]:
for offset in list(range(8,65,8))+list(range(4096-64,4096,8)):
for diff in [-16, -2, -1, 0, 1, 2, 16]:
if length+diff < 1:
continue
for wait in wait_normal, wait_pause_ram, wait_pause_source:
print("length %d, offset %d, diff %d"% (length, offset, diff))
#addr = length * 0x100000000 + offset * 0x10000 + offset
addr = offset
test_data = bytearray([x%256 for x in range(length)])
test_data2 = bytearray([x%256 for x in range(length+diff)])
dma_ram_inst.write_mem(addr & 0xffff80, b'\xaa'*(len(test_data)+256))
write_desc_source.send([(addr, len(test_data), cur_tag)])
write_data_source.send(axis_ep.AXIStreamFrame(test_data2, id=cur_tag))
yield wait()
yield clk.posedge
yield clk.posedge
status = write_desc_status_sink.recv()
print(status)
assert status.data[0][0] == min(len(test_data), len(test_data2))
assert status.data[0][1] == cur_tag
assert status.data[0][2] == cur_tag
data = dma_ram_inst.read_mem(addr&0xfffff0, 64)
for i in range(0, len(data), 16):
print(" ".join(("{:02x}".format(c) for c in bytearray(data[i:i+16]))))
if len(test_data) <= len(test_data2):
assert dma_ram_inst.read_mem(addr-8, len(test_data)+16) == b'\xaa'*8+test_data+b'\xaa'*8
else:
assert dma_ram_inst.read_mem(addr-8, len(test_data2)+16) == b'\xaa'*8+test_data2+b'\xaa'*8
cur_tag = (cur_tag + 1) % 256
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| 35.211838 | 166 | 0.657348 | 1,674 | 11,303 | 4.038829 | 0.158303 | 0.101168 | 0.076912 | 0.066262 | 0.56042 | 0.4841 | 0.327171 | 0.308978 | 0.291229 | 0.221269 | 0 | 0.02679 | 0.253649 | 11,303 | 320 | 167 | 35.321875 | 0.774656 | 0.107228 | 0 | 0.244635 | 0 | 0 | 0.031867 | 0.002184 | 0 | 0 | 0.002581 | 0 | 0.038627 | 1 | 0.030043 | false | 0 | 0.017167 | 0 | 0.051502 | 0.034335 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da8e6b5d27ab3ab699761ec5dcf3eadc2360c2c0 | 5,713 | py | Python | scrape_votes.py | purrcat259/reddit-vote-grapher | 0a0f1dccee7befc6e94e856d09eb61b546b34644 | [
"MIT"
] | 1 | 2016-05-18T06:30:26.000Z | 2016-05-18T06:30:26.000Z | scrape_votes.py | purrcat259/reddit-vote-grapher | 0a0f1dccee7befc6e94e856d09eb61b546b34644 | [
"MIT"
] | null | null | null | scrape_votes.py | purrcat259/reddit-vote-grapher | 0a0f1dccee7befc6e94e856d09eb61b546b34644 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import time
import os
import csv
import praw
import OAuth2Util
from pprint import pprint
class SubmissionCSV:
def __init__(self, file_name='', csv_directory='data'):
self.file_name = file_name + '.csv'
self.file_path = os.path.join(os.getcwd(), csv_directory, self.file_name)
def run(self, data_row=None):
self.create_csv()
if data_row is not None:
self.write_row(row=data_row)
def create_csv(self):
# create the CSV if it does not exist
if not os.path.isfile(self.file_path):
with open(self.file_path, mode='w', newline='') as csvfile:
csvfile.flush()
time.sleep(1)
def write_row(self, row=None):
if row is not None:
with open(self.file_path, mode='a', newline='') as csvfile:
writer = csv.writer(csvfile, quotechar='"')
writer.writerow(row)
csvfile.flush()
class VoteScraper:
def __init__(self, user_agent='vote-grapher-v1-by-Always_SFW', subreddit='EliteDangerous', verbose=True):
self.user_agent = user_agent
self.subreddit_name = subreddit
self.verbose = verbose
self.r = None
self.o = None
self.subreddit = None
self.submission_limit = 50
self.start_time = time.time()
# holds the objects for cached submissions.
self.cached_submissions = []
def run(self):
self.connect()
while True:
print('Retrieving/Removing submissions')
self.cache_new_submissions()
self.remove_old_submissions()
self.store_submissions_data()
self.show_time_elapsed()
self.print('Pausing for 120 seconds')
time.sleep(120)
def print(self, string=''):
if self.verbose:
print(string)
def connect(self):
# initialise a connection to reddit
self.print('Initialising connection to Reddit')
try:
self.r = praw.Reddit(self.user_agent)
self.o = OAuth2Util.OAuth2Util(self.r)
# force re-validating the access token
self.o.refresh(force=True)
self.print('Successfully connected to Reddit')
except Exception as e:
print('Unable to connect to Reddit: {}'.format(e))
quit()
self.subreddit = self.r.get_subreddit(subreddit_name=self.subreddit_name)
def get_latest_submissions(self):
# self.print('Getting latest submissions')
try:
new_submissions = self.subreddit.get_new(limit=self.submission_limit)
except Exception as e:
print(e)
return []
return new_submissions
def cache_new_submissions(self):
new_submissions = self.get_latest_submissions()
# self.print('Caching new submissions')
previous_count = len(self.cached_submissions)
for submission in new_submissions:
if submission not in self.cached_submissions:
self.cached_submissions.append(submission)
self.print('{} new submissions recorded'.format(len(self.cached_submissions) - previous_count))
def remove_old_submissions(self):
# self.print('Removing old submissions')
current_time = time.time()
to_remove = []
previous_count = len(self.cached_submissions)
for submission in self.cached_submissions:
if (current_time - submission.created_utc) > (12 * 60 * 60):
# self.print('Removing Submission with ID: {} as it is older than 12 hours'.format(submission.id))
to_remove.append(submission)
# remove the old submissions from the cached submissions list
self.cached_submissions = [sub for sub in self.cached_submissions if sub not in to_remove]
self.print('{} old submissions removed'.format(previous_count - len(self.cached_submissions)))
# append '_complete' to the old submission file names
for submission in to_remove:
file_name = str(submission.id) + '.csv'
new_file_name = str(submission.id) + '_complete.csv'
path = os.path.join(os.getcwd(), 'data', file_name)
# only perform this if the file actually exists
if os.path.isfile(path):
os.rename(src=path, dst=os.path.join(os.getcwd(), 'data', new_file_name))
def store_submissions_data(self):
for i, sub in enumerate(self.cached_submissions):
try:
sub.refresh()
ratio = self.r.get_submission(sub.permalink).upvote_ratio
except Exception as e:
print(e)
continue
ups = int(round((ratio*sub.score)/(2*ratio - 1)) if ratio != 0.5 else round(sub.score/2))
downs = ups - sub.score
self.print('[{}] ID: {} S/U/D: {}/{}/{} Ratio: {} Age: {} hours Link: {}'.format(
i,
sub.id,
sub.score,
ups,
downs,
ratio,
abs(round((time.time() - sub.created_utc) / (60 * 60), 1)),
sub.short_link))
subcsv = SubmissionCSV(file_name=sub.id)
subcsv.run(data_row=[time.time(), sub.score, ups, downs, ratio])
time.sleep(2)
def show_time_elapsed(self):
# convert to hours
time_elapsed = (time.time() - self.start_time) / (60 * 60)
self.print('{} hours passed since start of script'.format(round(time_elapsed, 1)))
def main():
v = VoteScraper()
v.run()
if __name__ == '__main__':
main()
| 37.585526 | 114 | 0.593033 | 688 | 5,713 | 4.765988 | 0.252907 | 0.067399 | 0.070448 | 0.029277 | 0.165294 | 0.093626 | 0.031717 | 0.031717 | 0.031717 | 0 | 0 | 0.009512 | 0.300718 | 5,713 | 151 | 115 | 37.834437 | 0.811264 | 0.097322 | 0 | 0.10084 | 0 | 0.008403 | 0.075233 | 0.005638 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0.008403 | 0.05042 | 0 | 0.201681 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da8e99c09aabca3db1bc0e1af11da10e940286d6 | 2,549 | py | Python | tests/fixtures/dict_list/docket_list_with_homicide.py | SimmonsRitchie/court_docket_scraper | f467d59c4ea8dbddb4fd7545dc36656a4b30e46d | [
"MIT"
] | 1 | 2021-10-29T20:12:44.000Z | 2021-10-29T20:12:44.000Z | tests/fixtures/dict_list/docket_list_with_homicide.py | SimmonsRitchie/court_docket_scraper | f467d59c4ea8dbddb4fd7545dc36656a4b30e46d | [
"MIT"
] | 2 | 2019-07-19T20:13:16.000Z | 2019-07-19T20:13:16.000Z | tests/fixtures/dict_list/docket_list_with_homicide.py | SimmonsRitchie/court_docket_scraper | f467d59c4ea8dbddb4fd7545dc36656a4b30e46d | [
"MIT"
] | null | null | null | docket_list = [
{
"county": "Dauphin",
"docketnum": 1,
"case_caption": "Commonwealth V. Smith, John A.",
"arresting_agency": "Harrisburg PD",
"municipality": "Harrisburg",
"defendant": "John A. Smith",
"defendant_race": "white",
"defendant_gender": "Male",
"dob": "01/01/1986",
"filing_date": "03/03/2019",
"charges": "Receiving Stolen Property; Driving W/O A License",
"bail": 25000,
"url": "https://ujsportal.pacourts.us/DocketSheets/MDJReport.ashx?docketNumber=MJ-12302-CR-0000110-2019&dnh=zj8BkxXzkOi23xMzscQ6hw%3d%3d",
},
{
"county": "Dauphin",
"docketnum": 2,
"case_caption": "Commonwealth V. Smith, Duke A.",
"arresting_agency": "Harrisburg PD",
"municipality": "Harrisburg",
"defendant": "Duke A. Smith",
"defendant_race": "white",
"defendant_gender": "Male",
"dob": "01/01/1986",
"filing_date": "03/03/2019",
"charges": "Receiving Stolen Property; Driving W/O A License",
"bail": 25000,
"url": "https://ujsportal.pacourts.us/DocketSheets/MDJReport.ashx?docketNumber=MJ-12302-CR-0000110-2019&dnh=zj8BkxXzkOi23xMzscQ6hw%3d%3d",
},
{
"county": "Dauphin",
"docketnum": 3,
"case_caption": "Commonwealth V. Smith, John A.",
"arresting_agency": "Harrisburg PD",
"municipality": "Harrisburg",
"defendant": "John A. Smith",
"defendant_race": "white",
"defendant_gender": "Male",
"dob": "01/01/1986",
"filing_date": "03/03/2019",
"charges": "Receiving Stolen Property; homicide; Driving W/O A "
"License",
"bail": 25000,
"url": "https://ujsportal.pacourts.us/DocketSheets/MDJReport.ashx?docketNumber=MJ-12302-CR-0000110-2019&dnh=zj8BkxXzkOi23xMzscQ6hw%3d%3d",
},
{
"county": "Dauphin",
"docketnum": 4,
"case_caption": "Commonwealth V. Smith, John A.",
"arresting_agency": "Harrisburg PD",
"municipality": "Harrisburg",
"defendant": "John A. Smith",
"defendant_race": "white",
"defendant_gender": "Male",
"dob": "01/01/1986",
"filing_date": "03/03/2019",
"charges": "Receiving Stolen Property; Driving W/O A License; Murder",
"bail": 25000,
"url": "https://ujsportal.pacourts.us/DocketSheets/MDJReport.ashx?docketNumber=MJ-12302-CR-0000110-2019&dnh=zj8BkxXzkOi23xMzscQ6hw%3d%3d",
}
]
| 39.828125 | 146 | 0.582581 | 264 | 2,549 | 5.545455 | 0.234848 | 0.020492 | 0.060109 | 0.065574 | 0.960383 | 0.940574 | 0.940574 | 0.940574 | 0.900273 | 0.900273 | 0 | 0.092098 | 0.250294 | 2,549 | 63 | 147 | 40.460317 | 0.673993 | 0 | 0 | 0.698413 | 0 | 0.063492 | 0.634759 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
da9203437dccc2b66c4d623067b94d8a0a97c3de | 3,488 | py | Python | DeleteBook.py | saurabhmaurya45/library-management-system | 2e489728068cca87ed58f493ac2524b6586f66cf | [
"Apache-2.0"
] | null | null | null | DeleteBook.py | saurabhmaurya45/library-management-system | 2e489728068cca87ed58f493ac2524b6586f66cf | [
"Apache-2.0"
] | null | null | null | DeleteBook.py | saurabhmaurya45/library-management-system | 2e489728068cca87ed58f493ac2524b6586f66cf | [
"Apache-2.0"
] | null | null | null | from tkinter import *
import pymysql as ms
from tkinter import messagebox
# Add your own database name and password here to reflect in the code
mypass = "saurabh"
mydatabase = "library"
con = ms.connect(host="localhost", user="root", password=mypass, database=mydatabase)
cur = con.cursor()
# Enter Table Names here
bookTable = "books" # Book Table
def deleteBook():
bid = en1.get()
try:
a = int(bid)
type1 = type(a)
if type1 == int:
print(True)
cur.execute('select Book_Id from books')
print(True)
list = []
for i in cur:
getId = i[0]
list.append(getId)
print(True)
if a in list:
deleteSql = "delete from " + bookTable + " where Book_Id = '" + bid + "'"
cur.execute(deleteSql)
print(True)
con.commit()
print(True)
# messagebox.showinfo('success',"Successfully deleted Book Id "+bid+" ")
lb6 = Label(labelFrame, text="Successfully deleted book ", bg='black', fg='white',
font=("times new roman", 18, "bold"))
lb6.place(relx=0.3, rely=0.75)
print(True)
else:
lb6 = Label(labelFrame, text="Book deletion failed ", bg='black', fg='white',
font=("times new roman", 18, "bold"))
lb6.place(relx=0.3, rely=0.75)
# messagebox.showinfo('Error', "Please insert correct Book ID")
except:
messagebox.showinfo('Error', 'Invalid Book ID, must be number')
print(bid)
def delete():
global en1, con, cur, bookTable, root, labelFrame
root = Tk()
root.title("Library")
root.minsize(width=400, height=400)
root.geometry("1350x700+0+0")
root.config(bg='#0099cc')
title = Label(root, text="Welcome to Sterling's Library", bd=15, relief=GROOVE,
font=("algerian", 40, "bold"), bg="red", fg="white")
title.pack(side=TOP, fill=X)
labelFrame = Frame(root, bg='#333945', bd=10, relief=GROOVE)
labelFrame.place(relx=0.1, rely=0.35, relwidth=0.8, relheight=0.35)
headingFrame1 = Frame(root, bg="blue", bd=10, relief=GROOVE)
headingFrame1.place(relx=0.25, rely=0.15, relwidth=0.60, relheight=0.13)
headingLabel = Label(headingFrame1, text="DELETE BOOK", bg='blue', fg='white',
font=("bookman old style", 34, "bold"))
headingLabel.place(relx=0.25, rely=0.15, relwidth=0.5, relheight=0.5)
# Book ID to Delete
lb2 = Label(labelFrame, text="Book ID : ", bg='black', fg='white', font=("bookman old style", 20, "bold"))
lb2.place(relx=0.1, rely=0.33)
en1 = Entry(labelFrame)
en1.place(relx=0.3, rely=0.35, relwidth=0.62, relheight=0.15)
# Submit Button
SubmitBtn = Button(root, text="SUBMIT", bg='#d1ccc0', fg='black', font=("times new roman", 18, "bold"),
relief=GROOVE, bd=10, command=deleteBook)
SubmitBtn.place(relx=0.28, rely=0.75, relwidth=0.18, relheight=0.08)
quitBtn = Button(root, text="Quit", bg='#f7f1e3', fg='black', font=("times new roman", 18, "bold"), relief=GROOVE,
bd=10, command=root.quit)
quitBtn.place(relx=0.53, rely=0.75, relwidth=0.18, relheight=0.08)
root.mainloop()
| 35.591837 | 119 | 0.556479 | 443 | 3,488 | 4.376975 | 0.363431 | 0.041774 | 0.046416 | 0.03507 | 0.240846 | 0.22589 | 0.174317 | 0.174317 | 0.174317 | 0.114492 | 0 | 0.062041 | 0.297592 | 3,488 | 97 | 120 | 35.958763 | 0.729388 | 0.076261 | 0 | 0.149254 | 0 | 0 | 0.15061 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029851 | false | 0.029851 | 0.044776 | 0 | 0.074627 | 0.104478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da920dcd51ae362c5f26a3360b65c16937f31fe7 | 8,474 | py | Python | asdf/extension.py | larrybradley/asdf | b1e0fe6ab7aa319d5939ec2aa78d23822abf6bd4 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | asdf/extension.py | larrybradley/asdf | b1e0fe6ab7aa319d5939ec2aa78d23822abf6bd4 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | asdf/extension.py | larrybradley/asdf | b1e0fe6ab7aa319d5939ec2aa78d23822abf6bd4 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import os
import abc
import warnings
from pkg_resources import iter_entry_points
import six
import importlib
from . import types
from . import resolver
from .util import get_class_name
from .type_index import AsdfTypeIndex
from .version import version as asdf_version
from .exceptions import AsdfDeprecationWarning
__all__ = ['AsdfExtension', 'AsdfExtensionList']
ASDF_TEST_BUILD_ENV = 'ASDF_TEST_BUILD'
@six.add_metaclass(abc.ABCMeta)
class AsdfExtension:
"""
Abstract base class defining an extension to ASDF.
"""
@classmethod
def __subclasshook__(cls, C):
if cls is AsdfExtension:
return (hasattr(C, 'types') and
hasattr(C, 'tag_mapping') and
hasattr(C, 'url_mapping'))
return NotImplemented
@abc.abstractproperty
def types(self):
"""
A list of `asdf.CustomType` subclasses that describe how to store
custom objects to and from ASDF.
"""
pass
@abc.abstractproperty
def tag_mapping(self):
"""
A list of 2-tuples or callables mapping YAML tag prefixes to JSON Schema
URL prefixes.
For each entry:
- If a 2-tuple, the first part of the tuple is a YAML tag
prefix to match. The second part is a string, where case
the following are available as Python formatting tokens:
- ``{tag}``: the complete YAML tag.
- ``{tag_suffix}``: the part of the YAML tag after the
matched prefix.
- ``{tag_prefix}``: the matched YAML tag prefix.
- If a callable, it is passed the entire YAML tag must return
the entire JSON schema URL if it matches, otherwise, return `None`.
Note that while JSON Schema URLs uniquely define a JSON
Schema, they do not have to actually exist on an HTTP server
and be fetchable (much like XML namespaces).
For example, to match all YAML tags with the
``tag:nowhere.org:custom` prefix to the
``http://nowhere.org/schemas/custom/`` URL prefix::
return [('tag:nowhere.org:custom/',
'http://nowhere.org/schemas/custom/{tag_suffix}')]
"""
pass
@abc.abstractproperty
def url_mapping(self):
"""
A list of 2-tuples or callables mapping JSON Schema URLs to
other URLs. This is useful if the JSON Schemas are not
actually fetchable at their corresponding URLs but are on the
local filesystem, or, to save bandwidth, we have a copy of
fetchable schemas on the local filesystem. If neither is
desirable, it may simply be the empty list.
For each entry:
- If a 2-tuple, the first part is a URL prefix to match. The
second part is a string, where the following are available
as Python formatting tokens:
- ``{url}``: The entire JSON schema URL
- ``{url_prefix}``: The matched URL prefix
- ``{url_suffix}``: The part of the URL after the prefix.
- If a callable, it is passed the entire JSON Schema URL and
must return a resolvable URL pointing to the schema content.
If it doesn't match, should return `None`.
For example, to map a remote HTTP URL prefix to files installed
alongside as data alongside Python module::
return [('http://nowhere.org/schemas/custom/1.0.0/',
asdf.util.filepath_to_url(
os.path.join(SCHEMA_PATH, 'stsci.edu')) +
'/{url_suffix}.yaml'
)]
"""
pass
class AsdfExtensionList:
"""
Manage a set of extensions that are in effect.
"""
def __init__(self, extensions):
tag_mapping = []
url_mapping = []
validators = {}
self._type_index = AsdfTypeIndex()
for extension in extensions:
if not isinstance(extension, AsdfExtension):
raise TypeError(
"Extension must implement asdf.types.AsdfExtension "
"interface")
tag_mapping.extend(extension.tag_mapping)
url_mapping.extend(extension.url_mapping)
for typ in extension.types:
self._type_index.add_type(typ, extension)
validators.update(typ.validators)
for sibling in typ.versioned_siblings:
self._type_index.add_type(sibling, extension)
validators.update(sibling.validators)
self._tag_mapping = resolver.Resolver(tag_mapping, 'tag')
self._url_mapping = resolver.Resolver(url_mapping, 'url')
self._validators = validators
@property
def tag_to_schema_resolver(self):
"""Deprecated. Use `tag_mapping` instead"""
warnings.warn(
"The 'tag_to_schema_resolver' property is deprecated. Use "
"'tag_mapping' instead.",
AsdfDeprecationWarning)
return self._tag_mapping
@property
def tag_mapping(self):
return self._tag_mapping
@property
def url_mapping(self):
return self._url_mapping
@property
def type_index(self):
return self._type_index
@property
def validators(self):
return self._validators
class BuiltinExtension:
"""
This is the "extension" to ASDF that includes all the built-in
tags. Even though it's not really an extension and it's always
available, it's built in the same way as an extension.
"""
@property
def types(self):
return types._all_asdftypes
@property
def tag_mapping(self):
return resolver.DEFAULT_TAG_TO_URL_MAPPING
@property
def url_mapping(self):
return resolver.DEFAULT_URL_MAPPING
class _DefaultExtensions:
def __init__(self):
self._extensions = []
self._extension_list = None
self._package_metadata = {}
def _load_installed_extensions(self, group='asdf_extensions'):
for entry_point in iter_entry_points(group=group):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always', category=AsdfDeprecationWarning)
ext = entry_point.load()
if not issubclass(ext, AsdfExtension):
warnings.warn("Found entry point {}, from {} but it is not a "
"subclass of AsdfExtension, as expected. It is "
"being ignored.".format(ext, entry_point.dist))
continue
dist = entry_point.dist
name = get_class_name(ext, instance=False)
self._package_metadata[name] = (dist.project_name, dist.version)
self._extensions.append(ext())
for warning in w:
warnings.warn('{} (from {})'.format(warning.message, name),
AsdfDeprecationWarning)
@property
def extensions(self):
# This helps avoid a circular dependency with external packages
if not self._extensions:
# If this environment variable is defined, load the default
# extension. This allows the package to be tested without being
# installed (e.g. for builds on Debian).
if os.environ.get(ASDF_TEST_BUILD_ENV):
# Fake the extension metadata
name = get_class_name(BuiltinExtension, instance=False)
self._package_metadata[name] = ('asdf', asdf_version)
self._extensions.append(BuiltinExtension())
self._load_installed_extensions()
return self._extensions
@property
def extension_list(self):
if self._extension_list is None:
self._extension_list = AsdfExtensionList(self.extensions)
return self._extension_list
@property
def package_metadata(self):
return self._package_metadata
def reset(self):
"""This will be used primarily for testing purposes."""
self._extensions = []
self._extension_list = None
self._package_metadata = {}
def resolver(self, uri):
tag_mapping = self.extension_list.tag_mapping
url_mapping = self.extension_list.url_mapping
return url_mapping(tag_mapping(uri))
default_extensions = _DefaultExtensions()
| 33.362205 | 80 | 0.623554 | 1,006 | 8,474 | 5.097416 | 0.269384 | 0.031201 | 0.023206 | 0.006435 | 0.205733 | 0.146451 | 0.115055 | 0.100234 | 0.081513 | 0.067473 | 0 | 0.001516 | 0.299504 | 8,474 | 253 | 81 | 33.494071 | 0.862365 | 0.345291 | 0 | 0.267176 | 0 | 0 | 0.070767 | 0.009462 | 0 | 0 | 0 | 0 | 0 | 1 | 0.152672 | false | 0.022901 | 0.091603 | 0.061069 | 0.381679 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da923abab5b7e2cb6e8f37c23f2fa4cc9504aff5 | 2,153 | py | Python | source/setup.py | Sylvain-Barde/mic-toolbox | 10d9d930a1a359aaa831f2f917eff357a3d5282e | [
"BSD-3-Clause"
] | 4 | 2019-06-28T20:36:33.000Z | 2022-01-04T21:49:52.000Z | source/setup.py | Sylvain-Barde/mic-toolbox | 10d9d930a1a359aaa831f2f917eff357a3d5282e | [
"BSD-3-Clause"
] | 1 | 2019-06-27T14:52:52.000Z | 2019-07-04T14:14:14.000Z | source/setup.py | Sylvain-Barde/mic-toolbox | 10d9d930a1a359aaa831f2f917eff357a3d5282e | [
"BSD-3-Clause"
] | 1 | 2019-06-27T13:33:42.000Z | 2019-06-27T13:33:42.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 12 14:48:07 2018
@author: sb636
"""
import os
import sys
from setuptools import setup, Extension, find_packages
from distutils.errors import DistutilsModuleError
# Check for cython installation
try:
from Cython.Distutils import build_ext as _build_ext
HAVE_CYTHON = True
except ImportError:
# As a fallback import the standard setuptools build_ext, and raise
# error about Cython later
from setuptools.command.build_ext import build_ext as _build_ext
HAVE_CYTHON = False
def scandir(dir, files=[]):
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isfile(path) and path.endswith(".pyx"):
files.append(path.replace(os.path.sep, ".")[:-4])
elif os.path.isdir(path):
scandir(path, files)
return files
def makeExtension(extName):
extPath = extName.replace(".", os.path.sep)+".pyx"
return Extension(extName, [extPath])
class build_ext(_build_ext):
def initialize_options(self):
if not HAVE_CYTHON:
raise DistutilsModuleError(
'Cython is required to compile the package.\n'
'Cython can be obtained at www.cython.org or installed with '
'conda or pip.')
super(build_ext, self).initialize_options()
def finalize_options(self):
try:
import numpy
except ImportError:
raise DistutilsModulesError('Building extension modules requires numpy')
for ext in self.distribution.ext_modules:
ext.include_dirs.extend([numpy.get_include(), '.'])
ext.cython_directives = {
"cdivision": True,
"cdivision_warnings": False
}
super(build_ext, self).finalize_options()
setup(
name="mic-toolbox",
version="0.1.0a1",
packages=find_packages(),
ext_modules=[makeExtension(name) for name in scandir('mic')],
cmdclass={'build_ext': build_ext},
options = {'build_ext': {'inplace': True, 'force': True}}
)
| 29.902778 | 85 | 0.625639 | 258 | 2,153 | 5.100775 | 0.472868 | 0.079027 | 0.021277 | 0.024316 | 0.051672 | 0.051672 | 0.051672 | 0.051672 | 0 | 0 | 0 | 0.013325 | 0.267998 | 2,153 | 71 | 86 | 30.323944 | 0.821701 | 0.099861 | 0 | 0.081633 | 0 | 0 | 0.132615 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081633 | false | 0 | 0.183673 | 0 | 0.326531 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da92ca41103ff60b1a50e24d1900c7aae0620a32 | 4,049 | py | Python | api-reconstruction/ipython_analysis.py | eurecom-s3/syscall2api | 2f2c72c759b0fd803fe1302c3b6717cda1906916 | [
"MIT"
] | 10 | 2019-09-24T13:36:15.000Z | 2021-11-01T02:40:10.000Z | api-reconstruction/ipython_analysis.py | eurecom-s3/syscall2api | 2f2c72c759b0fd803fe1302c3b6717cda1906916 | [
"MIT"
] | 2 | 2020-10-19T11:51:08.000Z | 2021-04-17T01:08:23.000Z | api-reconstruction/ipython_analysis.py | eurecom-s3/syscall2api | 2f2c72c759b0fd803fe1302c3b6717cda1906916 | [
"MIT"
] | null | null | null | #!/usr/local/bin/ipython3 -i
import sys
from analysis import *
import analysis.classes as classes
import nwalign as nw
kb = {}
apis = {}
syscalls = {}
regexes = {}
models = {}
models2 = {}
kb_file = 'kb_no_empties.pickle'
regex_file = 'new_regex.pickle'
models_file = 'models.pickle'
models2_file = 'models2.pickle'
symbols_file = 'symbols.pickle'
leaf_models = {}
def first_run():
global kb
global apis
global syscalls
global regexes
global models
global symbols_file
kb_file = "pruned_db.pickle"
if not Path(kb_file).is_file():
print("Error: No KB file found", file=sys.stderr)
sys.exit(1)
with open(kb_file, "rb") as pf:
d = pickle.load(pf)
syscalls = pickle.load(pf)
d = prune_kb_from_signals(d)
print("Finding leaf apis")
leaves = find_leaves(d)
print("Finding strong polymorph apis")
polymorph = find_polymorph(d)
print("Finding empty apis")
empties = find_empties(d)
print("Finding 0Sys apis")
no_sys = find_no_syscall_apis(d)
print("Finding 0IndSys apis")
no_ind_sys = find_no_indirect_sys(d)
apis = set(d.keys())
print("Finding no-leaf apis")
no_leaves = apis - leaves
print("Finding weak monomorph apis")
monomorph = apis - polymorph
print("Finding 1+Sys apis")
sys = apis - no_sys
print("Finding 1+IndSys apis")
ind_sys = apis - no_ind_sys
print("Finding weak polymorph")
weak_polymorph = find_weak_polymorph(d)
print("Finding strong monomorph apis")
strong_monomorph = apis - weak_polymorph
print("Building models for strong monomorph apis")
precise_models = build_precise_models(d, strong_monomorph)
print("Building models for implicit monomorph apis")
implicit_precise_models = find_implicit_monomorph_models(d, precise_models)
print("Finding empty/non-empty models")
empty_models = {api for api, model in implicit_precise_models.items()
if len(model) == 0}
non_empty_models = {api: model
for api, model in implicit_precise_models.items()
if api not in empty_models}
strong_monomorph |= set(implicit_precise_models.keys())
# checks that no_ind_sys is a subset of no_sys
check_0sys(no_sys, no_ind_sys)
check_polymorph(weak_polymorph, polymorph)
check_empties_have_precise_model(empties, precise_models)
check_implicit_precise_models(implicit_precise_models, precise_models)
check_empties_have_empty_model(empties, empty_models)
kb = prune_kb_from_empties(d, empty_models)
with open('kb_no_empties.pickle', 'wb') as pf:
pickle.dump(kb, pf)
pickle.dump(syscalls, pf)
with open(symbols_file, 'wb') as pf:
pickle.dump(set(kb.keys()), pf)
pickle.dump(syscalls, pf)
def load_kb_no_empties():
global kb
global syscalls
global apis
global regexes
global regexes_test
global test_results
global models
global kb_file
global regex_file
global models_file
global symbols_file
global leaf_models
global models2
print("Loading KB")
with open(kb_file, "rb") as pf:
sys.modules['classes'] = classes
kb= pickle.load(pf)
syscalls = pickle.load(pf)
print("Loading symbols")
apis, syscalls = load_symbols(symbols_file)
kb = prune_kb_from_signals(kb)
# print("Loading regexes")
# f = open(regex_file, 'rb')
# regexes_test = pickle.load(f)
# f.close()
# regexes, test_results = regexes_split_test_results(regexes_test)
print("Loading generic models")
models = load_models(models_file)
print("Loading not-so-generic models")
models2 = load_models(models2_file)
symbols_generator(apis | syscalls.keys())
leaf_models = find_leaves_models(models, syscalls)
if __name__ == '__main__':
if (not Path(kb_file).is_file()
or not Path(models_file).is_file()
or not Path(symbols_file).is_file()):
first_run()
else:
load_kb_no_empties()
| 28.716312 | 79 | 0.678686 | 551 | 4,049 | 4.727768 | 0.185118 | 0.055278 | 0.029942 | 0.013052 | 0.127447 | 0.098273 | 0.087524 | 0.031478 | 0.031478 | 0 | 0 | 0.004456 | 0.224006 | 4,049 | 140 | 80 | 28.921429 | 0.824634 | 0.056557 | 0 | 0.160714 | 0 | 0 | 0.153987 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017857 | false | 0 | 0.035714 | 0 | 0.053571 | 0.169643 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
da92e435dd529ef7c6adb64e5afe197974724936 | 660 | py | Python | tests/client/test_spread_sheet.py | shin-hama/git-svn-monitor | acb793c2da63d6802efa8e0e6c99482f4fad0f80 | [
"MIT"
] | null | null | null | tests/client/test_spread_sheet.py | shin-hama/git-svn-monitor | acb793c2da63d6802efa8e0e6c99482f4fad0f80 | [
"MIT"
] | 36 | 2021-07-12T00:08:03.000Z | 2022-03-25T11:19:39.000Z | tests/client/test_spread_sheet.py | shin-hama/git-svn-monitor | acb793c2da63d6802efa8e0e6c99482f4fad0f80 | [
"MIT"
] | null | null | null | from datetime import datetime
from git_svn_monitor.client import spread_seat
from git_svn_monitor.core.config import TIMESTAMP_FORMAT
def test__convert_datetime_to_str() -> None:
timestamp = "2021-01-01 12:34:56"
_timestamp = datetime.strptime(timestamp, TIMESTAMP_FORMAT)
converted = spread_seat._convert_to_str(_timestamp)
assert timestamp == converted
def test__convert_int_to_str() -> None:
num = 1
converted = spread_seat._convert_to_str(num)
assert converted == "1"
def test__convert_str_with_new_line() -> None:
text = "new line"
converted = spread_seat._convert_to_str(text+"\n")
assert converted == text
| 27.5 | 63 | 0.75 | 92 | 660 | 4.978261 | 0.380435 | 0.054585 | 0.091703 | 0.170306 | 0.203057 | 0.203057 | 0 | 0 | 0 | 0 | 0 | 0.028933 | 0.162121 | 660 | 23 | 64 | 28.695652 | 0.799277 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 0 | 0 | 0 | 0 | 0 | 0.1875 | 1 | 0.1875 | false | 0 | 0.1875 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
16f0b6155221bd21f39e5a25133a8324a5286c72 | 3,785 | py | Python | accessdata/api/extensions.py | AccessDataOps/FTK-API-SDK | 34e689a55eadacc51e6ff585e9126799f80e269a | [
"MIT"
] | 2 | 2021-12-10T10:20:08.000Z | 2022-01-06T11:15:43.000Z | accessdata/api/extensions.py | AccessDataOps/FTK-API-SDK | 34e689a55eadacc51e6ff585e9126799f80e269a | [
"MIT"
] | null | null | null | accessdata/api/extensions.py | AccessDataOps/FTK-API-SDK | 34e689a55eadacc51e6ff585e9126799f80e269a | [
"MIT"
] | null | null | null | ## /api/extensions.py
"""
Maintains the API endpoint URI extensions.
"""
## Declaring __all__
__all__ = (
"status_check_ext",
"site_server_status_check_ext",
"case_create_ext",
"case_list_ext",
"case_create_portable_ext",
"evidence_list_ext",
"evidence_processed_list_ext",
"evidence_process_ext",
"object_page_list_ext",
"label_create_ext"
"label_list_ext"
"label_objects_job_ext"
"label_objects_list_ext"
"label_objects_count_ext"
"label_objects_sync_ext"
"search_report_ext",
"export_natives_ext",
"agent_push_ext",
"agent_collection_ext",
"agent_disk_acquisition_ext",
"agent_memory_acquisition_ext",
"agent_remediation_ext",
"agent_software_inventory_ext",
"agent_volatile_analysis_ext",
"agent_volatile_import_ext",
"job_status_ext",
"attribute_list_ext",
"attribute_list_by_case_ext",
"child_file_categories_ext",
"processing_case_ext",
"server_setting_ext",
"yara_ioc_rule_import_ext",
)
## Predefined Constants
DELETE = "delete"
GET = "get"
PATCH = "patch"
POST = "post"
PUT = "put"
## Status Extensions
base_ext = "api/v2/enterpriseapi"
status_check_ext = GET, base_ext + "/statuscheck"
site_server_status_check_ext = GET, base_ext + "/agent/getsiteserverstatus"
## Case Management Extensions
case_create_ext = POST, base_ext + "/core/createcase"
case_list_ext = GET, base_ext + "/core/getcaselist"
case_create_portable_ext = POST, base_ext + "/core/{caseid}/createportablecase"
## Evidence Management Extensions
evidence_list_ext = GET, base_ext + "/core/{caseid}/getevidencelist"
evidence_processed_list_ext = GET, base_ext + "/core/{caseid}/getprocessedevidencelist"
evidence_process_ext = POST, base_ext + "/core/{caseid}/processdata"
## Object Management Extensions
object_page_list_ext = POST, base_ext + "/core/{caseid}/getobjectlist/{pagenumber}/{pagesize}"
## Label Management Extensions
label_create_ext = POST, base_ext + "/core/{caseid}/createlabel"
label_list_ext = GET, base_ext + "/core/{caseid}/getlabellist"
label_objects_job_ext = POST, base_ext + "/jobs/{caseid}/labelobjects"
label_objects_list_ext = GET, base_ext + "/core/cases/{caseid}/label/{labelid}/evidenceobjects"
label_objects_count_ext = GET, base_ext + "/core/cases/{caseid}/label/{labelid}/objectscount"
label_objects_sync_ext = POST, base_ext + "/{caseid}/labelobjectssync"
## Search Extensions
search_report_ext = POST, base_ext + "/jobs/{caseid}/createsearchcountreport"
## Export Extenstions
export_natives_ext = POST, base_ext + "/jobs/{caseid}/dumpnativeobjects"
## Agent Management Extensions
agent_push_ext = POST, base_ext + "/agent/{caseid}/runagentpush"
agent_collection_ext = POST, base_ext + "/agent/{caseid}/collectiononagent"
agent_disk_acquisition_ext = POST, base_ext + "/agent/{caseid}/diskacquistion"
agent_memory_acquisition_ext = POST, base_ext + "/agent/{caseid}/memoryacquistion"
agent_remediation_ext = POST, base_ext + "/agent/{caseid}/remediate"
agent_software_inventory_ext = POST, base_ext + "/agent/{caseid}/softwareinventory"
agent_volatile_analysis_ext = POST, base_ext + "/agent/{caseid}/volatile"
agent_volatile_import_ext = GET, base_ext + "/agent/{caseid}/importvolatile/{jobid}"
## Generic Job Extensions
job_status_ext = GET, base_ext + "/core/{caseid}/getjobstatus/{jobid}"
## Utility Extensions
attribute_list_ext = GET, base_ext + "/core/getallattributes"
attribute_list_by_case_ext = GET, base_ext + "/core/{caseid}/getallattributesbycaseid"
child_file_categories_ext = GET, base_ext + "/core/getchildrenfilecategories"
processing_case_ext = GET, base_ext + "/processingcaseid"
server_setting_ext = GET, base_ext + "/core/getserversetting/{setting}"
yara_ioc_rule_import_ext = POST, base_ext + "/agent/importiocandyara" | 33.495575 | 97 | 0.763804 | 476 | 3,785 | 5.638655 | 0.22479 | 0.086066 | 0.069672 | 0.088674 | 0.335693 | 0.259314 | 0.086811 | 0.029806 | 0.029806 | 0 | 0 | 0.000298 | 0.113606 | 3,785 | 113 | 98 | 33.495575 | 0.799702 | 0.090092 | 0 | 0 | 0 | 0 | 0.491501 | 0.382474 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.055556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16f0f93ca79ae51931ef205e9c059a600e80445c | 1,982 | py | Python | evaluators/dialog/state/distinct.py | kaniblu/vhda | 35941097ef552568c29f66cc55d8ce1927f34978 | [
"MIT"
] | 3 | 2021-01-12T05:43:20.000Z | 2021-03-05T17:03:06.000Z | evaluators/dialog/state/distinct.py | kaniblu/vhda | 35941097ef552568c29f66cc55d8ce1927f34978 | [
"MIT"
] | null | null | null | evaluators/dialog/state/distinct.py | kaniblu/vhda | 35941097ef552568c29f66cc55d8ce1927f34978 | [
"MIT"
] | null | null | null | __all__ = ["DistinctStateEvaluator"]
from dataclasses import dataclass
from typing import Sequence, Optional
import torch
import utils
from utils import TensorMap
from datasets import VocabSet
from ...evaluator import DialogEvaluator
@dataclass
class DistinctStateEvaluator(DialogEvaluator):
vocabs: VocabSet
_values: dict = utils.private_field(default_factory=dict)
def reset(self):
self._values.clear()
@property
def speakers(self):
return set(spkr for spkr in self.vocabs.speaker.f2i if spkr != "<unk>")
@staticmethod
def compute_distinct(tokens):
if len(tokens) == 0:
return torch.tensor(0.0)
return torch.tensor(len(set(tokens)) / len(tokens))
def compute(self, samples: Sequence, spkr=None):
return {i: [self.compute_distinct(turn.text, i)
for sample in samples for turn in sample.output.turns
if spkr is None or turn.speaker == spkr]
for i in self.ngrams}
def update(self, samples: Sequence) -> Optional[TensorMap]:
for sample in samples:
asvs = [asv for turn in sample.output if turn.speaker != "<unk>"
for asv in turn.state]
spkr_asvs = {spkr: [asv for turn in sample.output
if turn.speaker != "<unk>"
for asv in turn.state]
for spkr in self.speakers}
stats = {"dist-a": self.compute_distinct(asvs)}
stats.update({
f"dist-a-{spkr}": self.compute_distinct(spkr_asvs[spkr])
for spkr in self.speakers
})
for k, v in stats.items():
if k not in self._values:
self._values[k] = list()
self._values[k].append(v.item())
return
def get(self) -> Optional[TensorMap]:
return {k: torch.tensor(v).mean() for k, v in self._values.items()}
| 33.59322 | 79 | 0.584258 | 239 | 1,982 | 4.769874 | 0.309623 | 0.031579 | 0.023684 | 0.034211 | 0.173684 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0 | 0.002952 | 0.316347 | 1,982 | 58 | 80 | 34.172414 | 0.838376 | 0 | 0 | 0.042553 | 0 | 0 | 0.028254 | 0.0111 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12766 | false | 0 | 0.148936 | 0.06383 | 0.468085 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16f2f95568be402c343f83e95bd816466c4a6dd1 | 1,788 | py | Python | src/dataset/manually_labeled_bases.py | yullidias/AutomaticIronyDetection | 3297ddc4ecc97e840b00df4ba4f9e6b8e710fdb9 | [
"MIT"
] | null | null | null | src/dataset/manually_labeled_bases.py | yullidias/AutomaticIronyDetection | 3297ddc4ecc97e840b00df4ba4f9e6b8e710fdb9 | [
"MIT"
] | 1 | 2020-12-05T14:22:03.000Z | 2020-12-05T14:22:03.000Z | src/dataset/manually_labeled_bases.py | yullidias/AutomaticIronyDetection | 3297ddc4ecc97e840b00df4ba4f9e6b8e710fdb9 | [
"MIT"
] | null | null | null | import src.utils.constants as cns
from src.utils.files import write_list
import pandas as pd
import glob
import os
def read_sheets():
manually_labeled_df = pd.DataFrame()
for sheet in glob.glob(cns.PATH_LABELED + '*'):
manually_labeled_df = manually_labeled_df.append(
pd.read_excel(sheet, index_col=0), ignore_index=True)
return manually_labeled_df
def rename_columns(dataset):
return dataset.rename(columns={
"pathOriginal": "path_ask",
"tweet 'Pergunta'": "reply_response_tweet",
"pathTweet": "id",
"tweet a ser avaliado": "tweet",
"rotulo": "label"
})
def parser_label(label):
if label == "Irônico":
return cns.IRONIC_LABEL
elif label == "Não irônico":
return cns.NOT_IRONIC_LABEL
else:
return cns.DONT_KNOW_LABLE
def update_label(df, col):
df[col] = df[col].apply(parser_label)
def path_to_id(df, col):
df[col] = df[col].apply(lambda x: os.path.basename(x)
.split('.json')[0])
def get_by_label(df, label):
return df[df["label"] == label]
def generate_manually_bases():
labled_df = read_sheets()
path_to_id(labled_df, "pathTweet")
labled_df = rename_columns(labled_df)
labled_df = labled_df[["id", "label"]]
update_label(labled_df, "label")
print("Generate base manually labeled as ironic ...")
write_list(cns.B_M_IRONIC,
get_by_label(labled_df, cns.IRONIC_LABEL)["id"].to_list())
print("Generate base manually labeled as not ironic ...")
write_list(cns.B_M_NOT_IRONIC,
get_by_label(labled_df, cns.NOT_IRONIC_LABEL)["id"].to_list())
return labled_df
if __name__ == "__main__":
generate_manually_bases()
| 26.294118 | 77 | 0.644295 | 243 | 1,788 | 4.440329 | 0.325103 | 0.074143 | 0.063021 | 0.037071 | 0.222428 | 0.18721 | 0.087118 | 0 | 0 | 0 | 0 | 0.001461 | 0.23434 | 1,788 | 67 | 78 | 26.686567 | 0.786706 | 0 | 0 | 0 | 0 | 0 | 0.143736 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.145833 | false | 0 | 0.104167 | 0.041667 | 0.395833 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16f3b2b6831a8b521ba1a14a11fa6b224ec5222f | 1,785 | py | Python | cryptotest.py | xebia/django-DefectDojo | 7bc6695bd8fb93e23b0d8ed8326f5d01283eadaa | [
"BSD-3-Clause"
] | null | null | null | cryptotest.py | xebia/django-DefectDojo | 7bc6695bd8fb93e23b0d8ed8326f5d01283eadaa | [
"BSD-3-Clause"
] | null | null | null | cryptotest.py | xebia/django-DefectDojo | 7bc6695bd8fb93e23b0d8ed8326f5d01283eadaa | [
"BSD-3-Clause"
] | 1 | 2017-09-22T20:39:39.000Z | 2017-09-22T20:39:39.000Z | import binascii, os
from Crypto.Cipher import AES
KEY = 'a0b8c7398c9363b3216ff1d001a1308e5f96a77dbf6bda2367f87519d80995fb'
IV = os.urandom(16)
def encrypt(key, iv, plaintext):
aes = AES.new(key, AES.MODE_CBC, iv, segment_size=128)
plaintext = _pad_string(plaintext)
encrypted_text = aes.encrypt(plaintext)
return binascii.b2a_hex(encrypted_text).rstrip()
def decrypt(key, iv, encrypted_text):
aes = AES.new(key, AES.MODE_CBC, iv, segment_size=128)
encrypted_text_bytes = binascii.a2b_hex(encrypted_text)
decrypted_text = aes.decrypt(encrypted_text_bytes)
decrypted_text = _unpad_string(decrypted_text)
return decrypted_text
def _pad_string(value):
length = len(value)
pad_size = 16 - (length % 16)
return value.ljust(length + pad_size, '\x00')
def _unpad_string(value):
while value[-1] == '\x00':
value = value[:-1]
return value
def prepare_for_save(IV, encrypted_value):
binascii.b2a_hex(encrypted_text).rstrip()
stored_value = "AES.1:" + binascii.b2a_hex(IV).rstrip() + ":" + encrypted_value
return stored_value
def prepare_for_view(encrypted_value):
encrypted_values = encrypted_value.split(":")
type = encrypted_values[0]
iv = binascii.a2b_hex(encrypted_values[1]).rstrip()
value = encrypted_values[2]
return decrypt(KEY, iv, value)
if __name__ == '__main__':
input_plaintext = 'The answer is no'
encrypted_text = encrypt(KEY, IV, input_plaintext)
print encrypted_text
decrypted_text = decrypt(KEY, IV, encrypted_text)
print decrypted_text
print prepare_for_save(IV, encrypted_text)
print "*****"
print prepare_for_view("AES.1:fff2e6659bef045f25f8249d36f58789:178e6f316b680b486e4e6b8cc79f589e")
assert decrypted_text == input_plaintext
| 31.875 | 101 | 0.729412 | 228 | 1,785 | 5.412281 | 0.276316 | 0.115883 | 0.034036 | 0.019449 | 0.19611 | 0.115073 | 0.061588 | 0.061588 | 0.061588 | 0.061588 | 0 | 0.075067 | 0.164146 | 1,785 | 55 | 102 | 32.454545 | 0.752011 | 0 | 0 | 0.046512 | 0 | 0 | 0.10084 | 0.07563 | 0 | 0 | 0 | 0 | 0.023256 | 0 | null | null | 0 | 0.046512 | null | null | 0.116279 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
16f4d90e8a8de6335b4d40090aa8cb9b83b7e850 | 871 | py | Python | Larry/preprocess.py | NCBI-Hackathons/ClusterDuck | 1d5478500dffea973f96affd969783278193aa8a | [
"MIT"
] | 7 | 2019-02-19T15:10:24.000Z | 2020-05-31T00:41:13.000Z | Larry/preprocess.py | NCBI-Hackathons/ClusterDuck | 1d5478500dffea973f96affd969783278193aa8a | [
"MIT"
] | 11 | 2018-03-21T20:01:32.000Z | 2022-03-11T23:19:40.000Z | Larry/preprocess.py | NCBI-Hackathons/DiseaseClusters | 1d5478500dffea973f96affd969783278193aa8a | [
"MIT"
] | 3 | 2018-03-19T13:14:23.000Z | 2018-03-20T14:13:38.000Z | from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer
STOPWORDS = set(stopwords.words('english'))
# Instantiate Lemmanizer
WNL = WordNetLemmatizer()
def preprocess(abstract, keywords=None):
"""
Convert an abstract to word tokens. This is done by lowering the case
of the text, tokenizing the text, removing english stopwords and
punctuation,and finally lemmatizing the words.
Args:
abstract: (str)
Return:
str
"""
# Lowercase all words
abstract = abstract.lower()
# tokenize words, remove punctuation
tokenizer = RegexpTokenizer(r'\w[\w-]+')
tokens = tokenizer.tokenize(abstract)
# Remove stopwords and lemmatize tokens
words = [WNL.lemmatize(word) for word in tokens if word not in STOPWORDS]
return words
| 26.393939 | 77 | 0.6969 | 102 | 871 | 5.95098 | 0.539216 | 0.039539 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.228473 | 871 | 32 | 78 | 27.21875 | 0.903274 | 0.394948 | 0 | 0 | 0 | 0 | 0.031513 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16f520db46f1b8b8a53e17ff7a93ac14fed25f00 | 16,848 | py | Python | Codes/env.py | zongdaoming/Reinforcement-Learning | 426b646b1184e96d8a0f6c6341e53b13ef89ea12 | [
"Apache-2.0"
] | 1 | 2021-04-20T13:49:55.000Z | 2021-04-20T13:49:55.000Z | Codes/env.py | zongdaoming/Reinforcement-Learning | 426b646b1184e96d8a0f6c6341e53b13ef89ea12 | [
"Apache-2.0"
] | 1 | 2021-04-18T18:27:49.000Z | 2021-04-18T18:27:49.000Z | Codes/env.py | zongdaoming/Reinforcement-Learning | 426b646b1184e96d8a0f6c6341e53b13ef89ea12 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author : naive dormin
# @time : 2021/04/19 02:17:43
# @version : 1.0.0
import os
import time
import numpy as np
import random
from utils import *
import pickle
from ConvE import ConvE_double
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
byteTensor = torch.cuda.ByteTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
byteTensor = torch.ByteTensor
class Env(object):
"""knowledge graph environment definition"""
def __init__(self, dataPath, task=None, model="TransE"):
f1 = open(dataPath + 'entity2id.txt')
f2 = open(dataPath + 'relation2id.txt')
self.entity2id = f1.readlines()
self.relation2id = f2.readlines()
f1.close()
f2.close()
self.entity2id_ = {}
self.relation2id_ = {}
self.id2entity_ = {}
self.id2relation_ = {}
self.relations = []
for line in self.entity2id:
self.entity2id_[line.split()[0]] = int(line.split()[1])
self.id2entity_[int(line.split()[1])] = line.split()[0]
for line in self.relation2id:
self.relation2id_[line.split()[0]] = int(line.split()[1])
self.id2relation_[int(line.split()[1])] = line.split()[0]
self.relations.append(line.split()[0])
# Which model to compute pretrained embedding of entities and relations? (The definition of states)
if model == "TransH":
print("Uses TransH")
self.entity2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransH_entity_embedding.txt')
self.relation2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransH_relation_embedding.txt')
self.norm2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransH_norm_embedding.txt')
if task is not None:
relation = task.strip().split()[2].replace('_', ':')
w_r = self.norm2vec[self.relation2id_[relation]]
new_entity2vec = self.entity2vec - \
np.sum(self.entity2vec * w_r, axis=1, keepdims=True) * w_r
self.entity2vec = new_entity2vec
elif model == "TransR":
print("Uses TransR")
self.entity2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransR_entity_embedding.txt')
self.relation2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransR_relation_embedding.txt')
self.projection2vec = np.loadtxt(
dataPath + "NELL-995_100_1.0_TransR_norm_embedding.txt")
dim = int(np.sqrt(self.projection2vec.shape[1]))
# By default, entities and relations share the same dimension
# This is not the main point of research
self.projection2vec = self.projection2vec.reshape([-1, dim, dim])
if task is not None:
relation = task.strip().split()[2].replace('_', ':')
M_vec = self.projection2vec[self.relation2id_[relation], :, :]
new_entity2vec = np.matmul(M_vec, self.entity2vec.T).T
self.entity2vec = new_entity2vec
elif model == "TransD":
print("Uses TransD")
self.entity2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransD_entity_embedding.txt')
self.relation2vec = np.loadtxt(
dataPath + 'NELL-995_100_1.0_TransD_relation_embedding.txt')
self.ent_norm2vec = np.loadtxt(
dataPath + "NELL-995_100_1.0_TransD_ent_norm_embedding.txt")
self.rel_norm2vec = np.loadtxt(
dataPath + "NELL-995_100_1.0_TransD_rel_norm_embedding.txt")
if task is not None:
relation = task.strip().split()[2].replace('_', ':')
rel_proj = self.rel_norm2vec[self.relation2id_[relation]]
new_entity2vec = self.entity2vec + \
np.sum(self.entity2vec * self.ent_norm2vec,
axis=1, keepdims=True) * rel_proj
self.entity2vec = new_entity2vec
elif model == "ProjE":
print("Uses ProjE")
self.entity2vec = np.loadtxt(
dataPath + 'NELL-995_100_ProjE_entity_embedding.txt')
self.relation2vec = np.loadtxt(
dataPath + 'NELL-995_100_ProjE_relation_embedding.txt')
self.simple_hr_combination_weights = np.loadtxt(
dataPath + "NELL-995_100_ProjE_simple_hr_combination_weights.txt")
self.simple_tr_combination_weights = np.loadtxt(
dataPath + "NELL-995_100_ProjE_simple_tr_combination_weights.txt")
self.combination_bias_hr = np.loadtxt(
dataPath + "NELL-995_100_ProjE_combination_bias_hr.txt")
self.combination_bias_tr = np.loadtxt(
dataPath + "NELL-995_100_ProjE_combination_bias_tr.txt")
if task is not None:
relation = task.strip().split()[2].replace('_', ':')
dim = self.entity2vec.shape[1]
r = self.relation2vec[[self.relation2id_[relation]]]
# ent_mat = np.transpose(self.entity2vec)
hr = self.entity2vec * \
self.simple_hr_combination_weights[:dim] + \
r * self.simple_hr_combination_weights[dim:]
new_entity2vec = np.tanh(hr + self.combination_bias_hr)
self.entity2vec = new_entity2vec
elif model == "ConvE":
print("Uses ConvE")
start_time = time.time()
self.entity2vec = np.loadtxt(
dataPath + 'NELL-995_100_ConvE_entity_embedding.txt')
self.relation2vec = np.loadtxt(
dataPath + 'NELL-995_100_ConvE_relation_embedding.txt')
self.TransE_to_ConvE_id_entity = {}
with open(dataPath + "TransE_to_ConvE_entity_id.txt") as fr:
for line in fr:
line_list = line.strip().split()
self.TransE_to_ConvE_id_entity[int(
line_list[0])] = int(line_list[1])
self.TransE_to_ConvE_id_relation = {}
with open(dataPath + "TransE_to_ConvE_relation_id.txt") as fr:
for line in fr:
line_list = line.strip().split()
self.TransE_to_ConvE_id_relation[int(
line_list[0])] = int(line_list[1])
homepath = os.path.expanduser('~')
token2idx_ent, idx2token_ent, label2idx_ent, idx2label_ent = pickle.load(
open(homepath + "/.data/NELL-995/vocab_e1", 'rb'))
token2idx_rel, idx2token_rel, label2idx_rel, idx2label_rel = pickle.load(
open(homepath + "/.data/NELL-995/vocab_rel", 'rb'))
self.ConvE_model = ConvE_double(
len(token2idx_ent), len(token2idx_rel))
model_params = torch.load(
dataPath + "NELL-995_ConvE_0.2_0.3_100.model")
self.ConvE_model.load_state_dict(model_params)
for parameter in self.ConvE_model.parameters():
parameter.requires_grad = False
if USE_CUDA:
self.ConvE_model.cuda()
if task is not None:
relation = task.strip().split()[2].replace('_', ':')
rel_id = token2idx_rel[relation]
ConvE_ent_id_list = [self.TransE_to_ConvE_id_entity[i]
for i in range(len(self.TransE_to_ConvE_id_entity))]
new_entity2vec_list = []
bs = self.ConvE_model.batch_size
batch_count = len(ConvE_ent_id_list) // bs
for i in range(batch_count):
x_middle, output = self.ConvE_model(longTensor(
ConvE_ent_id_list[i * bs: (i + 1) * bs]), longTensor([rel_id] * bs))
new_entity2vec_list.append(x_middle.cpu())
if len(ConvE_ent_id_list) % bs != 0:
input_ent_list = ConvE_ent_id_list[batch_count * bs:] + [
0] * (bs - len(ConvE_ent_id_list) % bs)
x_middle, output = self.ConvE_model(longTensor(
input_ent_list), longTensor([rel_id] * bs))
new_entity2vec_list.append(
x_middle[: len(ConvE_ent_id_list) % bs].cpu())
self.entity2vec = torch.cat(new_entity2vec_list).numpy()
torch.cuda.empty_cache()
"""
else:
if USE_CUDA:
self.ConvE_model.cuda()
"""
end_time = time.time()
print("Embedding calculation time: ", end_time - start_time)
else:
print("Default. Uses TransE")
self.entity2vec = np.loadtxt(dataPath + 'entity2vec.bern')
self.relation2vec = np.loadtxt(dataPath + 'relation2vec.bern')
if task is None:
self.embedding_precomputed_flag = False
else:
self.embedding_precomputed_flag = True
self.model = model
self.path = []
self.path_relations = []
# Knowledge Graph for path finding
f = open(dataPath + 'kb_env_rl.txt')
kb_all = f.readlines()
f.close()
self.kb = []
if task != None:
relation = task.split()[2] # Remove query relation and its inverse
for line in kb_all:
rel = line.split()[2]
if rel != relation and rel != relation + '_inv':
self.kb.append(line)
else:
for line in kb_all:
self.kb.append(line)
self.entity2link = {}
# Build the dictionary. Attention: they are all represented with numbers!
for line in self.kb:
line_list = line.strip().split()
head = self.entity2id_[line_list[0]]
tail = self.entity2id_[line_list[1]]
rel = self.relation2id_[line_list[2]]
if head not in self.entity2link:
self.entity2link[head] = {rel: [tail]}
elif rel not in self.entity2link[head]:
self.entity2link[head][rel] = [tail]
else:
self.entity2link[head][rel].append(tail)
self.die = 0 # record how many times does the agent choose an invalid action
self.banned_action_list = []
def interact(self, state, action):
# state and action are all represented with numbers
# print("Die: ", self.die)
'''
This function process the interact from the agent
state: is [current_position, target_position, die]
action: an integer
return: (reward, [new_position, target_position, die], done)
'''
done = 0 # Whether the episode has finished
curr_pos = state[0]
target_pos = state[1]
if action in self.banned_action_list:
# print("Type 1")
choices = []
elif curr_pos not in self.entity2link:
# print("Type 2", curr_pos)
choices = []
elif action not in self.entity2link[curr_pos]:
# print("Type 3")
choices = []
else:
# print("Type 4")
choices = self.entity2link[curr_pos][action]
"""
chosed_relation = self.relations[action]
choices = []
for line in self.kb:
triple = line.rsplit()
e1_idx = self.entity2id_[triple[0]]
if curr_pos == e1_idx and triple[2] == chosed_relation and triple[1] in self.entity2id_:
choices.append(triple)
"""
if len(choices) == 0: # doesn't find a successful path
# print("No proper path! ")
reward = -1
self.die += 1
next_state = state # stay in the initial state
next_state[-1] = self.die # Total failure times
# print(next_state)
return (reward, next_state, done)
else: # find a valid step
# print("Proper path exists! ")
# Randomly choose one from multiple choices
chose_entity = random.choice(choices)
# path[2]: relation;path[1]: tail entity(the next entity)
self.path.append(self.id2relation_[
action] + ' -> ' + self.id2entity_[chose_entity])
self.path_relations.append(self.id2relation_[action]) # Relation
# print 'Find a valid step', path
# print 'Action index', action
self.die = 0
new_pos = chose_entity # Using the next entity as the new position
reward = 0 # Reward is zero means the action is valid
new_state = [new_pos, target_pos, self.die]
if new_pos == target_pos:
print('Find a path:', self.path)
done = 1 # episode finished
reward = 0 # reward is 0 means the episode is successful
new_state = None
# print(new_state)
return (reward, new_state, done)
def idx_state(self, idx_list, relation=None): # Calculate state vector
if idx_list != None:
curr = self.entity2vec[idx_list[0], :]
targ = self.entity2vec[idx_list[1], :]
if self.embedding_precomputed_flag == True or relation is None:
pass
else:
if self.model == "TransH":
w_r = self.norm2vec[relation]
curr = curr - np.sum(curr * w_r) * w_r
targ = targ - np.sum(targ * w_r) * w_r
elif self.model == "TransR":
M_vec = self.projection2vec[relation, :, :]
curr = np.matmul(M_vec, curr.T).T
targ = np.matmul(M_vec, targ.T).T
elif self.model == "TransD":
rel_proj = self.rel_norm2vec[relation]
curr = curr + \
np.sum(
curr * self.ent_norm2vec[idx_list[0]]) * rel_proj
targ = targ + \
np.sum(
targ * self.ent_norm2vec[idx_list[1]]) * rel_proj
elif self.model == "ProjE":
dim = self.entity2vec.shape[1]
r = self.relation2vec[relation]
curr = curr * \
self.simple_hr_combination_weights[:dim] + \
r * self.simple_hr_combination_weights[dim:]
curr = np.tanh(curr + self.combination_bias_hr)
targ = targ * \
self.simple_hr_combination_weights[:dim] + \
r * self.simple_hr_combination_weights[dim:]
targ = np.tanh(targ + self.combination_bias_hr)
elif self.model == "ConvE":
curr_id = self.TransE_to_ConvE_id_entity[idx_list[0]]
targ_id = self.TransE_to_ConvE_id_entity[idx_list[1]]
rel_id = self.TransE_to_ConvE_id_relation[relation]
bs = self.ConvE_model.batch_size
curr = [curr_id] + [0] * (bs - 1)
curr, output = self.ConvE_model(
longTensor(curr), longTensor([rel_id] * bs))
curr = curr[0].cpu().numpy()
targ = [targ_id] + [0] * (bs - 1)
targ, output = self.ConvE_model(
longTensor(targ), longTensor([rel_id] * bs))
targ = targ[0].cpu().numpy()
else: # Default, TransE
pass
return np.expand_dims(np.concatenate((curr, targ - curr)), axis=0)
else:
return None
def get_valid_actions(self, entityID): # Get the valid action
actions = set()
for line in self.kb:
triple = line.split()
e1_idx = self.entity2id_[triple[0]]
if e1_idx == entityID:
actions.add(self.relation2id_[triple[2]])
return np.array(list(actions))
# A path's embedding is calculated as summing all the relational vectors
def path_embedding(self, path):
embeddings = [self.relation2vec[self.relation2id_[relation], :]
for relation in path]
embeddings = np.reshape(embeddings, (-1, embedding_dim))
path_encoding = np.sum(embeddings, axis=0)
return np.reshape(path_encoding, (-1, embedding_dim))
| 42.014963 | 107 | 0.553063 | 1,927 | 16,848 | 4.623249 | 0.149974 | 0.034572 | 0.038164 | 0.042429 | 0.393086 | 0.328656 | 0.275564 | 0.234706 | 0.196543 | 0.165563 | 0 | 0.032135 | 0.344314 | 16,848 | 400 | 108 | 42.12 | 0.774328 | 0.094611 | 0 | 0.237624 | 0 | 0 | 0.081437 | 0.063137 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016502 | false | 0.006601 | 0.039604 | 0 | 0.079208 | 0.026403 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16f718d511a624ff6bafcf060c184c7b35cb49f0 | 2,608 | py | Python | txrtpengine/NGCPProxy.py | braams/txrtpengine | 5511cf79d7fc338b28d927c19e5ff3b88e66a5be | [
"MIT"
] | null | null | null | txrtpengine/NGCPProxy.py | braams/txrtpengine | 5511cf79d7fc338b28d927c19e5ff3b88e66a5be | [
"MIT"
] | null | null | null | txrtpengine/NGCPProxy.py | braams/txrtpengine | 5511cf79d7fc338b28d927c19e5ff3b88e66a5be | [
"MIT"
] | null | null | null | import json
from twisted.internet import reactor
from twisted.python import log
from twisted.web.resource import Resource
from twisted.web.server import NOT_DONE_YET
from twisted.web.server import Site
from txrtpengine.NGCP import NGCPClient
class NGCPProxy(Resource):
def __init__(self, addr):
self.c = NGCPClient(addr)
self.isLeaf = True
Resource.__init__(self)
def _onResponse(self, response, request):
request.write(json.dumps(response).encode('utf-8'))
request.finish()
def _onError(self, error, request):
request.write(json.dumps({'error': str(error)}).encode('utf-8'))
request.finish()
def render_POST(self, request):
request.setHeader('Content-Type', 'application/json; charset=utf-8')
# copy-paste from https://stackoverflow.com/a/33571117
def _byteify(data, ignore_dicts=False):
# if this is a unicode string, return its string representation
if isinstance(data, unicode):
return data.encode('utf-8')
# if this is a list of values, return list of byteified values
if isinstance(data, list):
return [_byteify(item, ignore_dicts=True) for item in data]
# if this is a dictionary, return dictionary of byteified keys and values
# but only if we haven't already byteified it
if isinstance(data, dict) and not ignore_dicts:
return {
_byteify(key, ignore_dicts=True): _byteify(value, ignore_dicts=True)
for key, value in data.iteritems()
}
# if it's anything else, return it in its original form
return data
try:
content = request.content.read().decode("utf-8")
cmd = json.loads(content, object_hook=_byteify)
d = self.c.command(cmd)
d.addCallback(self._onResponse, request)
d.addErrback(self._onError, request)
return NOT_DONE_YET
except Exception as e:
return json.dumps({'error': str(e)}, ensure_ascii=False, indent=1).encode('utf-8')
if __name__ == '__main__':
import sys
from twisted.web.client import getPage
log.startLogging(sys.stdout)
def test():
reactor.listenTCP(1222, Site(NGCPProxy(('127.0.0.1', 16222))))
def onResponse(data):
log.msg("response: %s" % data)
getPage('http://localhost:1222/', method='POST', postdata='{"command":"ping"}').addBoth(onResponse)
reactor.callWhenRunning(test)
reactor.run()
| 33.435897 | 107 | 0.625383 | 321 | 2,608 | 4.965732 | 0.417445 | 0.041405 | 0.035132 | 0.016939 | 0.100376 | 0.032622 | 0 | 0 | 0 | 0 | 0 | 0.017792 | 0.267255 | 2,608 | 77 | 108 | 33.87013 | 0.816327 | 0.132285 | 0 | 0.039216 | 0 | 0 | 0.066933 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137255 | false | 0 | 0.176471 | 0 | 0.45098 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16f73a2d16ee1a1b4874c0d6207d250cd9f1609d | 6,980 | py | Python | ise_session_gui.py | ComtecSystem-dev/ise_session | 299bf47b7584094c7722a27a5cbec704e8acc084 | [
"Apache-2.0"
] | null | null | null | ise_session_gui.py | ComtecSystem-dev/ise_session | 299bf47b7584094c7722a27a5cbec704e8acc084 | [
"Apache-2.0"
] | null | null | null | ise_session_gui.py | ComtecSystem-dev/ise_session | 299bf47b7584094c7722a27a5cbec704e8acc084 | [
"Apache-2.0"
] | null | null | null | import sys
import requests
import xmltodict
from functools import partial
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt
from PyQt5 import uic
#Link : Qt5 UI File
# - condition : The UI file should be located in the sam directory as the this file
form_class = uic.loadUiType("./ise_session.ui")[0]
# Class Define : UI Open
class ISE_Session():
def __init__(self, ip, id, pwd):
self.ip = ip
self.id = id
self.pwd = pwd
def getActiveSession(self):
url = "https://%s/admin/API/mnt/Session/ActiveList" % self.ip
ret_state, ret_val = self.request_action("get", url, self.id, self.pwd)
return ret_state, ret_val
def deleteSessionByMAC(self, MAC):
url = "https://%s/admin/API/mnt/Session/Delete/MACAddress/%s" % (self.ip, MAC)
ret_state, ret_val = self.request_action("delete", url, self.id, self.pwd)
return ret_state, ret_val
def request_action(self, request_type, url, id, pwd, ):
print("\t Request URL : %s %s" % (request_type, url))
print("\t Request ID/PWD : [%s][%s]" % (id, pwd))
session = requests.Session()
session.auth = (id, pwd)
if request_type == "get":
response = session.get(url, verify=False)
elif request_type == "delete":
response = session.delete(url, verify=False)
else:
return 000, "unknow error"
ret_val = None
if response.status_code == 401:
ret_val = "Auth failed"
elif response.status_code != 200:
ret_val = "Error code %s " % (response.status_code)
else:
ret_val = xmltodict.parse(response.text)
return response.status_code, ret_val
class MyWindow(QMainWindow, form_class) :
def __init__(self) :
super().__init__()
self.setupUi(self)
self.lineEdit_IP.setText("10.200.150.212")
self.lineEdit_ID.setText("admin")
self.lineEdit_PWD.setText("Comtec123")
# Linking functions to buttons
self.pushButton.clicked.connect(self.button1Function)
def button1Function(self):
ISE_IP = self.lineEdit_IP.text()
ISE_ID = self.lineEdit_ID.text()
ISE_PWD = self.lineEdit_PWD.text()
print("[MyWindow] button1Function() - [%s][%s][%s]" % (ISE_IP, ISE_ID, ISE_PWD))
ise_session = ISE_Session(ISE_IP, ISE_ID, ISE_PWD)
ret_state, ret_val = ise_session.getActiveSession()
if ret_state != 200:
QMessageBox.about(self, "에러", "%s" % (ret_val) )
else:
print("[MyWindow] button1Function() - %s" % (ret_state))
session_count = 0
session_list = []
if ret_val is not None and "activeList" in ret_val:
session_count = ret_val['activeList']['@noOfActiveSession']
if session_count == "1":
ret = ret_val['activeList']['activeSession']
session = {}
session['user_name'] = ret['user_name'] if 'user_name' in ret else '!!!'
session['mac'] = ret['calling_station_id'] if 'calling_station_id' in ret else '!!!'
session['ip'] = ret['framed_ip_address'] if 'framed_ip_address' in ret else '!!!'
session['sw_ip'] = ret['nas_ip_address'] if 'nas_ip_address' in ret else '!!!'
session_list.append(session)
print("\t%s" % (session))
else:
for ret in ret_val['activeList']['activeSession']:
session = {}
session['user_name'] = ret['user_name'] if 'user_name' in ret else '!!!'
session['mac'] = ret['calling_station_id'] if 'calling_station_id' in ret else '!!!'
session['ip'] = ret['framed_ip_address'] if 'framed_ip_address' in ret else '!!!'
session['sw_ip'] = ret['nas_ip_address'] if 'nas_ip_address' in ret else '!!!'
session_list.append(session)
print("\t%s" % (session))
self.Set_Table(["user_name", "mac", "ip", "sw_ip"], session_list)
def click_btn(self, btnClass, MAC):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Warning)
msgBox.setText("Are you soure you want to delete session on MAC(%s)" % (MAC))
msgBox.setWindowTitle("warring")
msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
returnValue = msgBox.exec()
if returnValue == QMessageBox.Ok:
ISE_IP = self.lineEdit_IP.text()
ISE_ID = self.lineEdit_ID.text()
ISE_PWD = self.lineEdit_PWD.text()
ise_session = ISE_Session(ISE_IP, ISE_ID, ISE_PWD)
ret_state, ret_val = ise_session.deleteSessionByMAC(MAC)
if ret_state == 200:
if ret_val is not None and "mnt-rest-result" in ret_val:
if "status" in ret_val["mnt-rest-result"]:
btnClass.setEnabled(False)
return
QMessageBox.about(self, "Error[%s]" % ret_state, "%s" % (ret_val) )
pass
def Set_Table(self, head_list, data_list):
self.tableWidget.setRowCount(len(data_list))
self.tableWidget.setColumnCount(len(head_list)+1)
self.tableWidget.setHorizontalHeaderLabels([" "]+head_list)
self.tableWidget.setColumnWidth(0, 50)
self.tableWidget.setColumnWidth(1, 130)
self.tableWidget.setColumnWidth(2, 150)
self.tableWidget.setColumnWidth(3, 130)
self.tableWidget.setColumnWidth(4, 130)
col_count = 0
row_count = 0
for table_data in data_list:
col_count = 0
btnDelete = QPushButton("Delete")
btnDelete.MAC = table_data['mac']
btnDelete.clicked.connect(partial(self.click_btn, btnDelete, table_data['mac']))
#btnDelete.clicked.connect(self.click_btn)
self.tableWidget.setCellWidget(row_count, col_count, btnDelete)
col_count = 1
for column_name in head_list:
column_val = table_data[column_name] if column_name in table_data else '!!!'
tableitem = QTableWidgetItem(column_val)
tableitem.setFlags(Qt.ItemIsEnabled)
self.tableWidget.setItem(row_count, col_count, tableitem)
col_count = col_count + 1
row_count = row_count + 1
if __name__ == "__main__" :
#QApplication : run the servic
app = QApplication(sys.argv)
#created the instance to WindowClass
myWindow = MyWindow()
#show UI
myWindow.show()
#Run Program
app.exec_() | 41.058824 | 109 | 0.570487 | 807 | 6,980 | 4.72119 | 0.22057 | 0.033071 | 0.018898 | 0.033596 | 0.298163 | 0.298163 | 0.275591 | 0.234646 | 0.234646 | 0.234646 | 0 | 0.013834 | 0.316476 | 6,980 | 170 | 110 | 41.058824 | 0.784741 | 0.039398 | 0 | 0.227273 | 0 | 0 | 0.13174 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0.007576 | 0.05303 | 0 | 0.166667 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16fa4d94e2d23cb6b84df2ab54d37fba71a56141 | 1,597 | py | Python | examples/zegar.py | BrownEmmett/8digit | aa9ab5e6673ec0fa3764510bd845a94ac37c4c1e | [
"MIT"
] | null | null | null | examples/zegar.py | BrownEmmett/8digit | aa9ab5e6673ec0fa3764510bd845a94ac37c4c1e | [
"MIT"
] | null | null | null | examples/zegar.py | BrownEmmett/8digit | aa9ab5e6673ec0fa3764510bd845a94ac37c4c1e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-18 Richard Hull and contributors
# See LICENSE.rst for details.
"""
Example for seven segment displays.
"""
import time
from datetime import datetime
from luma.led_matrix.device import max7219
from luma.core.interface.serial import spi, noop
from luma.core.virtual import viewport, sevensegment
def date(seg):
"""
Display current date on device.
"""
now = datetime.now()
seg.text = now.strftime("%d-%m-%y")
def clock(seg, seconds):
"""
Display current time on device.
"""
interval = 0.5
for i in range(int(seconds / interval)):
now = datetime.now()
seg.text = now.strftime("%H-%M-%S")
# calculate blinking dot
if i % 2 == 0:
seg.text = now.strftime("%H-%M-%S")
else:
seg.text = now.strftime("%H %M %S")
time.sleep(interval)
def show_message_vp(device, msg, delay=0.1):
# Implemented with virtual viewport
width = device.width
padding = " " * width
msg = padding + msg + padding
n = len(msg)
virtual = viewport(device, width=n, height=8)
sevensegment(virtual).text = msg
for i in reversed(list(range(n - width))):
virtual.set_position((i, 0))
time.sleep(delay)
def show_message_alt(seg, msg, delay=0.1):
# Does same as above but does string slicing itself
width = seg.device.width
padding = " " * width
msg = padding + msg + padding
for i in range(len(msg)):
seg.text = msg[i:i + width]
time.sleep(delay)
else:
pass
| 22.492958 | 55 | 0.612398 | 222 | 1,597 | 4.378378 | 0.441441 | 0.036008 | 0.041152 | 0.074074 | 0.200617 | 0.200617 | 0.200617 | 0.088477 | 0 | 0 | 0 | 0.017677 | 0.256105 | 1,597 | 70 | 56 | 22.814286 | 0.800505 | 0.14402 | 0 | 0.324324 | 0 | 0 | 0.0278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.027027 | 0.135135 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
16fa7014b2509e362e1a19500f13adaa6c41db09 | 1,109 | py | Python | caption_generation/sub_json.py | Collapsar-G/clevr-dataset-gen | a09b0559b53891bf4f4771190e4ad361406c67fe | [
"BSD-3-Clause"
] | 1 | 2021-05-23T13:48:59.000Z | 2021-05-23T13:48:59.000Z | caption_generation/sub_json.py | Collapsar-G/clevr-dataset-gen | a09b0559b53891bf4f4771190e4ad361406c67fe | [
"BSD-3-Clause"
] | null | null | null | caption_generation/sub_json.py | Collapsar-G/clevr-dataset-gen | a09b0559b53891bf4f4771190e4ad361406c67fe | [
"BSD-3-Clause"
] | null | null | null | import argparse
import json
import os
import ijson
parser = argparse.ArgumentParser()
# /questions/CLEVR_test_questions.json
# Inputs
parser.add_argument('--all_scene_paths', default='../data/CLEVR_v1.0/scenes',
help="JSON file containing questions information for all images " +
"from generate_questions.py")
parser.add_argument('--output_scene_file', default='../data/CLEVR_v1.0/CLEVR_train_scenes.json',
help="Directory containing JSON templates for captions")
if __name__ == "__main__":
all_scenes = []
args = parser.parse_args()
paths = os.listdir(args.all_scene_paths)
for scene_path in paths:
# print(scene_path)
with open(args.all_scene_paths + "/" + scene_path, 'r') as f:
all_scenes.append(json.load(f))
output = {
'info':
{"split": "train", "license": "Creative Commons Attribution (CC BY 4.0)", "version": "1.0",
"date": "2/14/2017"},
'scenes': all_scenes
}
with open(args.output_scene_file, 'w') as f:
json.dump(output, f)
| 34.65625 | 103 | 0.627592 | 140 | 1,109 | 4.728571 | 0.485714 | 0.036254 | 0.058912 | 0.054381 | 0.057402 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017794 | 0.239856 | 1,109 | 31 | 104 | 35.774194 | 0.767497 | 0.055005 | 0 | 0 | 0 | 0 | 0.321839 | 0.084291 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.16 | 0 | 0.16 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
16fd36971459752eacaa3008f88c6855b286e881 | 1,439 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/commerce/tests/factories.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/commerce/tests/factories.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/commerce/tests/factories.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """ Factories for generating fake commerce-related data. """
import factory
from factory.fuzzy import FuzzyText
class OrderFactory(factory.Factory):
""" Factory for stubbing orders resources from Ecommerce (v2). """
class Meta:
model = dict
number = factory.Sequence(lambda n: 'edx-%d' % n)
date_placed = '2016-01-01T10:00:00Z'
status = 'Complete'
currency = 'USD'
total_excl_tax = '100.00'
lines = []
class OrderLineFactory(factory.Factory):
""" Factory for stubbing order lines resources from Ecommerce (v2). """
class Meta:
model = dict
title = FuzzyText(prefix='Seat in ')
quantity = 1
description = FuzzyText()
status = 'Complete'
line_price_excl_tax = '100.00'
unit_price_excl_tax = '100.00'
product = {}
class ProductFactory(factory.Factory):
""" Factory for stubbing Product resources from Ecommerce (v2). """
class Meta:
model = dict
id = factory.Sequence(lambda n: n) # pylint: disable=invalid-name
url = 'http://test/api/v2/products/' + str(id)
product_class = 'Seat'
title = FuzzyText(prefix='Seat in ')
price = '100.00'
attribute_values = []
class ProductAttributeFactory(factory.Factory):
""" Factory for stubbing product attribute resources from
Ecommerce (v2).
"""
class Meta:
model = dict
name = FuzzyText()
code = FuzzyText()
value = FuzzyText()
| 24.810345 | 75 | 0.645587 | 166 | 1,439 | 5.53012 | 0.445783 | 0.122004 | 0.091503 | 0.104575 | 0.431373 | 0.267974 | 0.183007 | 0.183007 | 0 | 0 | 0 | 0.036397 | 0.236275 | 1,439 | 57 | 76 | 25.245614 | 0.798908 | 0.23558 | 0 | 0.333333 | 0 | 0 | 0.110377 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.055556 | 0 | 0.888889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
16fee0714125b907c565a7460bda1a63c75c9808 | 3,682 | py | Python | boundlexx/boundless/migrations/0002_create_item_timeseries.py | AngellusMortis/boundlexx | 407f5e38e8e0f067cbcb358787fc9af6a9be9b2a | [
"MIT"
] | 1 | 2021-04-23T11:49:50.000Z | 2021-04-23T11:49:50.000Z | boundlexx/boundless/migrations/0002_create_item_timeseries.py | AngellusMortis/boundlexx | 407f5e38e8e0f067cbcb358787fc9af6a9be9b2a | [
"MIT"
] | 1 | 2021-04-17T18:17:12.000Z | 2021-04-17T18:17:12.000Z | boundlexx/boundless/migrations/0002_create_item_timeseries.py | AngellusMortis/boundlexx | 407f5e38e8e0f067cbcb358787fc9af6a9be9b2a | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-07-21 17:44
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('boundless', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ItemShopStandPrice',
fields=[
('time', models.DateTimeField(auto_now=True, primary_key=True, serialize=False)),
('location_x', models.IntegerField()),
('location_y', models.IntegerField()),
('location_z', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('item_count', models.IntegerField()),
('beacon_name', models.CharField(db_index=True, max_length=64)),
('guild_tag', models.CharField(max_length=8)),
('shop_activity', models.IntegerField()),
('active', models.BooleanField(db_index=True, default=True)),
('world', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='boundless.World')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='boundless.Item')),
],
options={
'abstract': False,
'unique_together': {('time', 'world', 'location_x', 'location_y', 'item', 'price', 'item_count')},
},
),
migrations.CreateModel(
name='ItemRequestBasketPrice',
fields=[
('time', models.DateTimeField(auto_now=True, primary_key=True, serialize=False)),
('location_x', models.IntegerField()),
('location_y', models.IntegerField()),
('location_z', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('item_count', models.IntegerField()),
('beacon_name', models.CharField(db_index=True, max_length=64)),
('guild_tag', models.CharField(max_length=8)),
('shop_activity', models.IntegerField()),
('active', models.BooleanField(db_index=True, default=True)),
('world', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='boundless.World')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='boundless.Item')),
],
options={
'abstract': False,
'unique_together': {('time', 'world', 'location_x', 'location_y', 'item', 'price', 'item_count')},
},
),
migrations.RunSQL(
"CREATE EXTENSION IF NOT EXISTS timescaledb CASCADE", reverse_sql=migrations.RunSQL.noop
),
migrations.RunSQL(
'ALTER TABLE "boundless_itemshopstandprice" DROP CONSTRAINT "boundless_itemshopstandprice_pkey"', reverse_sql=migrations.RunSQL.noop
),
migrations.RunSQL(
"SELECT create_hypertable('boundless_itemshopstandprice', 'time', chunk_time_interval => 86400000000, migrate_data => true, create_default_indexes => false)", reverse_sql=migrations.RunSQL.noop
),
migrations.RunSQL(
'ALTER TABLE "boundless_itemrequestbasketprice" DROP CONSTRAINT "boundless_itemrequestbasketprice_pkey"', reverse_sql=migrations.RunSQL.noop
),
migrations.RunSQL(
"SELECT create_hypertable('boundless_itemrequestbasketprice', 'time', chunk_time_interval => 86400000000, migrate_data => true, create_default_indexes => false)", reverse_sql=migrations.RunSQL.noop
),
]
| 51.138889 | 209 | 0.605921 | 348 | 3,682 | 6.215517 | 0.281609 | 0.083218 | 0.032362 | 0.050855 | 0.784096 | 0.784096 | 0.784096 | 0.784096 | 0.784096 | 0.784096 | 0 | 0.0194 | 0.258012 | 3,682 | 71 | 210 | 51.859155 | 0.772328 | 0.012222 | 0 | 0.738462 | 1 | 0 | 0.27923 | 0.084182 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.030769 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
e50054bcfcc58e68ad2fe236a5c12539ae5190f0 | 39,700 | py | Python | det3d/models/bbox_heads/clear_mg_ohs_head.py | Lelin-HUNUST/VISTA | 7bf34132d719cb0e5e803b92cd15451df58a9a5d | [
"MIT"
] | 47 | 2022-03-21T02:41:39.000Z | 2022-03-30T17:25:29.000Z | det3d/models/bbox_heads/clear_mg_ohs_head.py | Lelin-HUNUST/VISTA | 7bf34132d719cb0e5e803b92cd15451df58a9a5d | [
"MIT"
] | 1 | 2022-03-28T15:11:26.000Z | 2022-03-28T16:27:40.000Z | det3d/models/bbox_heads/clear_mg_ohs_head.py | Lelin-HUNUST/VISTA | 7bf34132d719cb0e5e803b92cd15451df58a9a5d | [
"MIT"
] | 2 | 2022-03-23T12:56:14.000Z | 2022-03-27T14:25:50.000Z | # Copyright (c) Gorilla-Lab. All rights reserved.
import logging
from functools import partial
from collections import defaultdict
from typing import Dict, List, Optional, Sequence
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..losses.ohs_loss_clear import OHSLossClear
from ..losses.attention_constrain_loss import AttentionConstrainedLoss
from ..registry import HEADS
from ..builder import build_loss
from ...core.bbox import box_torch_ops
from ...core.bbox.geometry import points_in_convex_polygon_torch
from ...core.bbox.box_coders import BoxCoder, GroundBox3dCoderAF
from ipdb import set_trace
def multi_apply(func, *args, **kwargs):
pfunc = partial(func, **kwargs) if kwargs else func
map_results = map(pfunc, *args)
return tuple(map(list, zip(*map_results)))
def _get_pos_neg_loss(cls_loss, labels, label_weights):
# cls_loss: [N, num_anchors, num_class]
# labels: [N, num_anchors]
batch_size = cls_loss.shape[0]
if cls_loss.shape[-1] == 1 or len(cls_loss.shape) == 2:
cls_pos_loss = (labels > 0).type_as(cls_loss) * cls_loss.view(batch_size, -1)
cls_neg_loss = ((labels == 0) & (label_weights > 0)).type_as(
cls_loss) * cls_loss.view(batch_size, -1)
cls_pos_loss = cls_pos_loss.sum() / batch_size
cls_neg_loss = cls_neg_loss.sum() / batch_size
else:
cls_pos_loss = cls_loss[..., 1:].sum() / batch_size
cls_neg_loss = cls_loss[..., 0].sum() / batch_size
return cls_pos_loss, cls_neg_loss
@HEADS.register_module
class OHSHeadClear(nn.Module):
def __init__(self,
box_coder: GroundBox3dCoderAF,
num_input: int,
num_pred: int,
num_cls: int,
header: bool = True,
name: str = "",
**kwargs,):
super().__init__()
self.box_coder = box_coder
self.conv_cls = nn.Conv2d(num_input, num_cls, 1)
self.mode = kwargs.get("mode", "bev")
if self.box_coder.center == "direct":
self.conv_xy = nn.Conv2d(num_input, 2, 1)
elif self.box_coder.center == "soft_argmin":
self.conv_xy = nn.Conv2d(num_input, 2 * self.box_coder.kwargs["xy_bin_num"], 1)
self.loc_bins_x = torch.linspace(self.box_coder.kwargs["x_range"][0], self.box_coder.kwargs["x_range"][1],
self.box_coder.kwargs["xy_bin_num"]).reshape(1, 1, -1, 1, 1)
self.loc_bins_y = torch.linspace(self.box_coder.kwargs["y_range"][0], self.box_coder.kwargs["y_range"][1],
self.box_coder.kwargs["xy_bin_num"]).reshape(1, 1, -1, 1, 1)
self.loc_bins = torch.cat([self.loc_bins_x, self.loc_bins_y], 1)
else:
raise NotImplementedError
if "direct" in self.box_coder.height:
self.conv_z = nn.Conv2d(num_input, 1, 1)
elif "soft_argmin" in self.box_coder.height:
self.conv_z = nn.Conv2d(num_input, self.box_coder.kwargs["z_bin_num"], 1)
self.z_loc_bins = torch.linspace(self.box_coder.kwargs["z_range"][0], self.box_coder.kwargs["z_range"][1],
self.box_coder.kwargs["z_bin_num"]).reshape(1, self.box_coder.kwargs["z_bin_num"], 1, 1)
else:
raise NotImplementedError
if "soft_argmin" in self.box_coder.dim:
self.conv_dim = nn.Conv2d(num_input, 3 * self.box_coder.kwargs["dim_bin_num"], 1)
self.dim_loc_bins = torch.linspace(self.box_coder.kwargs["dim_range"][0], self.box_coder.kwargs["dim_range"][1],
self.box_coder.kwargs["dim_bin_num"]).reshape(1, self.box_coder.kwargs[
"dim_bin_num"], 1, 1)
self.dim_bins = torch.cat([self.dim_loc_bins, self.dim_loc_bins, self.dim_loc_bins], 1)
else:
self.conv_dim = nn.Conv2d(num_input, 3, 1)
if self.box_coder.velocity:
self.conv_velo = nn.Conv2d(num_input, 2, 1)
if self.box_coder.rotation == "vector":
self.conv_r = nn.Conv2d(num_input, 2, 1)
elif self.box_coder.rotation == "soft_argmin":
self.conv_r = nn.Conv2d(num_input, self.box_coder.kwargs["r_bin_num"], 1)
self.r_loc_bins = torch.linspace(-np.pi, np.pi, self.box_coder.kwargs["r_bin_num"]).reshape(
1, self.box_coder.kwargs["r_bin_num"], 1, 1)
else:
self.conv_r = nn.Conv2d(num_input, 1, 1)
def forward(self, x, return_loss):
x_bev = x
ret_dict = {}
cls_preds = self.conv_cls(x_bev).permute(0, 2, 3, 1).contiguous()
# predict bounding box
xy = self.conv_xy(x_bev)
z = self.conv_z(x_bev)
dim = self.conv_dim(x_bev)
# encode as bounding box
if self.box_coder.center == "soft_argmin":
xy = xy.view(
(xy.shape[0], 2, self.box_coder.kwargs["xy_bin_num"], xy.shape[2], xy.shape[3]))
xy = F.softmax(xy, dim=2)
xy = xy * self.loc_bins.to(xy.device)
xy = torch.sum(xy, dim=2, keepdim=False)
if "soft_argmin" in self.box_coder.height:
z = F.softmax(z, dim=1)
z = z * self.z_loc_bins.to(z.device)
z = torch.sum(z, dim=1, keepdim=True)
if "soft_argmin" in self.box_coder.dim:
dim = dim.view(
(dim.shape[0], 3, self.box_coder.kwargs["dim_bin_num"], dim.shape[2], dim.shape[3]))
dim = F.softmax(dim, dim=2)
dim = dim * self.dim_loc_bins.to(dim.device)
dim = torch.sum(dim, dim=2, keepdim=False)
xy = xy.permute(0, 2, 3, 1).contiguous()
z = z.permute(0, 2, 3, 1).contiguous()
dim = dim.permute(0, 2, 3, 1).contiguous()
if self.box_coder.dim == "direct":
dim = F.relu(dim)
if self.box_coder.velocity:
velo = self.conv_velo(x_bev).permute(0, 2, 3, 1).contiguous()
r_preds = self.conv_r(x_bev)
if self.box_coder.rotation == "vector":
#import pdb; pdb.set_trace()
r_preds = F.normalize(r_preds, p=2, dim=1)
elif self.box_coder.rotation == "soft_argmin":
r_preds = F.softmax(r_preds, dim=1)
r_preds = r_preds * self.r_loc_bins.to(r_preds.device)
r_preds = torch.sum(r_preds, dim=1, keepdim=True)
r_preds = r_preds.permute(0, 2, 3, 1).contiguous()
if self.box_coder.velocity:
box_preds = torch.cat([xy, z, dim, velo, r_preds], -1)
else:
box_preds = torch.cat([xy, z, dim, r_preds], -1)
ret_dict.update({"box_preds": box_preds, "cls_preds": cls_preds})
return ret_dict
@HEADS.register_module
class MultiGroupOHSHeadClear(nn.Module):
def __init__(self,
mode: str = "3d",
in_channels: List[int] = [128, ],
norm_cfg=None,
tasks: List[Dict] = [],
weights=[],
box_coder: BoxCoder = None,
with_cls: bool = True,
with_reg: bool = True,
encode_background_as_zeros: bool = True,
use_sigmoid_score: bool = True,
loss_norm: Dict = dict(type="NormByNumPositives",
pos_class_weight=1.0,
neg_class_weight=1.0,),
loss_cls: Dict = dict(type="CrossEntropyLoss",
use_sigmoid=False,
loss_weight=1.0,),
loss_bbox: Dict = dict(type="SmoothL1Loss",
beta=1.0,
loss_weight=1.0,),
atten_res: Sequence[int] = None,
assign_cfg: Optional[dict] = dict(),
name="rpn",):
super().__init__()
assert with_cls or with_reg
# read tasks and analysis the classes for tasks
num_classes = [len(t["class_names"]) for t in tasks]
self.class_names = [t["class_names"] for t in tasks]
self.num_anchor_per_locs = [1] * len(num_classes)
self.targets = tasks
# define the essential paramters
self.box_coder = box_coder
self.with_cls = with_cls
self.with_reg = with_reg
self.in_channels = in_channels
self.num_classes = num_classes
self.encode_background_as_zeros = encode_background_as_zeros
self.use_sigmoid_score = use_sigmoid_score
self.box_n_dim = self.box_coder.n_dim
self.mode = mode
self.assign_cfg = assign_cfg
self.pc_range = np.asarray(self.box_coder.pc_range) # [6]
self.dims = self.pc_range[3:] - self.pc_range[:3] # [3]
# initialize loss
self.loss_norm = loss_norm
self.loss_cls = build_loss(loss_cls)
self.loss_reg = build_loss(loss_bbox)
self.atten_res = atten_res
# initialize logger
logger = logging.getLogger("MultiGroupHead")
self.logger = logger
# check box_coder
assert isinstance(
box_coder, GroundBox3dCoderAF), "OHSLoss must comes with an anchor-free box coder"
assert box_coder.code_size == len(
loss_bbox.code_weights), "code weights does not match code size"
# set multi-tasks heads
# split each head
num_clss = []
num_preds = []
box_code_sizes = [self.box_coder.n_dim] * len(self.num_classes)
for num_c, num_a, box_cs in zip(
self.num_classes, self.num_anchor_per_locs, box_code_sizes
):
if self.encode_background_as_zeros:
num_cls = num_a * num_c
else:
num_cls = num_a * (num_c + 1)
num_clss.append(num_cls)
num_pred = num_a * box_cs
num_preds.append(num_pred)
self.logger.info(
f"num_classes: {self.num_classes}, num_preds: {num_preds}"
)
# construct each task head
self.tasks = nn.ModuleList()
for task_id, (num_pred, num_cls) in enumerate(zip(num_preds, num_clss)):
self.tasks.append(
OHSHeadClear(
self.box_coder,
self.in_channels,
num_pred,
num_cls,
header=False,
mode=self.mode,
)
)
def set_train_cfg(self, cfg):
self.ohs_loss = []
self.atten_loss = []
for task_id, target in enumerate(self.targets):
self.ohs_loss.append(
OHSLossClear(self.box_coder,
target.num_class,
self.loss_cls,
self.loss_reg,
self.encode_background_as_zeros,
cfg,
self.loss_norm,
task_id,
self.mode))
self.atten_loss.append(
AttentionConstrainedLoss(
self.box_coder, target.num_class, task_id, self.atten_res)
)
self.logger.info("Finish Attention Constrained Loss Initialization")
self.logger.info("Finish MultiGroupOHSHeadClear Initialization")
def forward(self, x, return_loss=False):
ret_dicts = []
for task in self.tasks:
ret_dicts.append(task(x, return_loss))
return ret_dicts
def loss(self, example, preds_dicts, **kwargs):
annos = example["annos"]
batch_size_device = example["num_voxels"].shape[0]
batch_labels = [anno["gt_classes"] for anno in annos]
batch_boxes = [anno["gt_boxes"] for anno in annos]
batch_atten_map = kwargs.get('atten_map', None)
rets = []
for task_id, preds_dict in enumerate(preds_dicts):
box_preds = preds_dict["box_preds"]
cls_preds = preds_dict["cls_preds"]
bs_per_gpu = len(cls_preds)
batch_task_boxes = [batch_box[task_id] for batch_box in batch_boxes]
batch_task_labels = [batch_label[task_id] for batch_label in batch_labels]
attention_loss = defaultdict(list)
for index, bam in enumerate(batch_atten_map):
temp_attention_loss = self.atten_loss[task_id](
bam, batch_task_boxes, batch_task_labels)
for ke, va in temp_attention_loss.items():
attention_loss[ke].append(va)
targets = self.assign_hotspots(cls_preds,
batch_task_boxes,
batch_task_labels)
labels, label_weights, bbox_targets, bbox_locs, num_total_pos, num_total_neg = targets
# process assign targets
labels = torch.stack(labels, 0).view(bs_per_gpu, -1) # [B, H*W]
label_weights = torch.stack(label_weights, 0).view(bs_per_gpu, -1) # [B, H*W]
kwargs = {}
# calculate ohs loss for each task
loc_loss, cls_loss = self.ohs_loss[task_id](
box_preds,
cls_preds,
labels,
label_weights,
bbox_targets,
bbox_locs,
**kwargs
)
if self.loss_norm["type"] == "NormByNumExamples":
normalizer = num_total_pos + num_total_neg
elif self.loss_norm["type"] == "NormByNumPositives":
normalizer = max(num_total_pos, 1.0)
elif self.loss_norm["type"] == "NormByNumPosNeg":
normalizer = self.loss_norm["pos_cls_weight"] * num_total_pos + \
self.loss_norm["neg_cls_weight"] * num_total_neg
elif self.loss_norm["type"] == "dont_norm": # support ghm loss
normalizer = batch_size_device
else:
raise ValueError(f"unknown loss norm type")
loc_loss_reduced = loc_loss.sum() / normalizer
loc_loss_reduced *= self.loss_reg._loss_weight
cls_pos_loss, cls_neg_loss = _get_pos_neg_loss(cls_loss, labels, label_weights)
cls_pos_loss /= self.loss_norm["pos_cls_weight"]
cls_neg_loss /= self.loss_norm["neg_cls_weight"]
cls_loss_reduced = cls_loss.sum() / normalizer
cls_loss_reduced *= self.loss_cls._loss_weight
loss = loc_loss_reduced + cls_loss_reduced
atten_loss = 0.0
for value in attention_loss.values():
if type(value) == list:
temp_loss = 0.0
norm_fac = len(value)
for temp_atten_loss in value:
temp_loss = temp_loss + temp_atten_loss
value = temp_loss * 1.0 / norm_fac
atten_loss = atten_loss + value
loss = loss + atten_loss
loc_loss_elem = [
loc_loss[:, :, i].sum() / num_total_pos
for i in range(loc_loss.shape[-1])
]
ret = {
"loss": loss,
"cls_pos_loss": cls_pos_loss.detach().cpu(),
"cls_neg_loss": cls_neg_loss.detach().cpu(),
"cls_loss_reduced": cls_loss_reduced.detach().cpu().mean(),
"loc_loss_reduced": loc_loss_reduced.detach().cpu().mean(),
"loc_loss_elem": [elem.detach().cpu() for elem in loc_loss_elem],
"num_pos": torch.tensor([num_total_pos]),
"num_neg": torch.tensor([num_total_neg]),
}
for key, value in attention_loss.items():
if type(value) == list:
temp_loss = 0.0
norm_fac = len(value)
for temp_atten_loss in value:
temp_loss = temp_loss + temp_atten_loss
value = temp_loss * 1.0 / norm_fac
ret.update({key: value.detach().cpu()})
rets.append(ret)
rets_merged = defaultdict(list)
for ret in rets:
for k, v in ret.items():
rets_merged[k].append(v)
return rets_merged
def assign_hotspots(self,
cls_scores: torch.Tensor,
gt_bboxes: List[np.ndarray],
gt_labels: List[np.ndarray]):
"""
assign hotspots(generate targets)
Args:
cls_scores (torch.Tensor, [B, H, W, C]): classification prediction score map
gt_bboxes (List[np.ndarray], [[M, ndim], [K, ndim], ...]): ground truth bounding box for each batch
gt_labels (List[np.ndarray], [[M], [K], ...]): ground truth bounding box id for each batch
cls_scores (torch.Tensor, [B, H, D, C], optional): classification prediction score map for RV.
Default to None.
"""
bs_per_gpu = len(gt_bboxes) # Get the batch size
device = cls_scores.device # Get the current device
gt_bboxes = [torch.tensor(box, device=device).float()
for box in gt_bboxes] # [M, 9], all gt_boxes
# [M] all gt_classes,start from 1,( 0 means background)
gt_labels = [torch.tensor(label, device=device).long() for label in gt_labels]
labels_list, label_weights_list, bbox_targets_list, bbox_locs_list, num_pos_list, num_neg_list = \
multi_apply(self.assign_hotspots_bev_single, cls_scores, gt_bboxes, gt_labels)
for i in range(bs_per_gpu):
bbox_locs_list[i][:, 0] = i
num_total_pos = sum([max(num, 1) for num in num_pos_list])
num_total_neg = sum([max(num, 1) for num in num_neg_list])
targets = (labels_list, label_weights_list, bbox_targets_list,
bbox_locs_list, num_total_pos, num_total_neg)
return targets
def assign_hotspots_bev_single(self,
cls_scores: torch.Tensor,
gt_bboxes: torch.Tensor,
gt_labels: torch.Tensor):
r"""
assign hotspots(generate targets) of BEV for a single batch.
Args:
cls_scores (torch.Tensor, [H, W, C]): classification prediction score map
gt_bboxes (torch.Tensor, [M, ndim]): ground truth bounding box
gt_labels_list (torch.Tensor, [M]): ground truth bounding box id
"""
h, w = cls_scores.size()[:2] # Get the size of the feature map of bev view (262,64)
# initialize relate labels
labels = torch.zeros_like(cls_scores[:, :, 0], dtype=torch.long) # Set up the bev labels
label_weights = torch.ones_like(
cls_scores[:, :, 0], dtype=torch.float) * self.loss_norm["neg_cls_weight"] # Initialize all weights to neg weights
# initialized to record the positive bbx's location in grid map
bbox_locs = cls_scores.new_zeros((0, 3), dtype=torch.long)
# initialized to record the positive bbx's regression targets
bbox_targets = cls_scores.new_zeros((0, self.box_coder.code_size), dtype=torch.float)
# scan gt_bboxes
self.effective_ratio = self.assign_cfg.get("effective_ratio", [1.0, 6.0])
if len(gt_bboxes > 0):
effective_boxes = gt_bboxes[:, [0, 1, 3, 4]].clone().detach() # [M, 4]
effective_ratio_l = (self.dims[0] / w) / effective_boxes[:, 2] # [M]
effective_ratio_w = (self.dims[1] / h) / effective_boxes[:, 3] # [M]
effective_ratio_l = effective_ratio_l.clamp(min=self.effective_ratio[0], # [M]
max=self.effective_ratio[1]) # [M]
effective_ratio_w = effective_ratio_w.clamp(min=self.effective_ratio[0], # [M]
max=self.effective_ratio[1]) # [M]
# expand the box'area into a grid if the box is too small,
# so that this box label can match the center of the correspond box
# the expanded box called `effective_boxes`
effective_boxes[:, 2] *= effective_ratio_l
effective_boxes[:, 3] *= effective_ratio_w
# get the corners
angles = gt_bboxes[:, -1] # [num_box]
effective_boxes = box_torch_ops.center_to_corner_box2d(
effective_boxes[:, :2], effective_boxes[:, 2:4], angles)
ignore_boxes_out = effective_boxes
# transfer the hybrid coordinate system to Cartesian coordinate system
self.box_coder.layout(w, h)
# read necessary parameters from box_coder
# center cartesian coordinate, grid coordinate index in hybrid coordinate
# grid_real_centers - [W * H, 2]
# w_indices - [W * H]
# h_indices - [W * H]
grid_real_centers = self.box_coder.grids_sensor
w_indices = self.box_coder.ww_l
h_indices = self.box_coder.hh_l
# scan bounding boxes
for i in range(len(gt_bboxes)):
# get the points(hotspots) cover by the bounding box
pos_mask = points_in_convex_polygon_torch(
grid_real_centers, effective_boxes[i].unsqueeze(0)) # [num_points, 8]
# get the raw hotspots
pos_ind = pos_mask.nonzero()[:, 0]
# NOTE: fix the bugs of targets assignment in bev, while using hybird coordinates,
# the `effective_boxes` may not expand enough to cover a grid center,
# so we nearest search a grid center as hotspots for this situation
gt_center = gt_bboxes[i: i + 1, :2] # [1, 2]
dist_to_grid_center = torch.norm(grid_real_centers - gt_center, dim=1) # [W * H]
min_ind = torch.argmin(dist_to_grid_center)
if min_ind not in pos_ind:
pos_ind = torch.cat([pos_ind.reshape(-1, 1), min_ind.reshape(-1, 1)],
dim=0).reshape(-1)
num_hotspots = self.assign_cfg.get("num_hotspots", 28)
if self.assign_cfg.get("select_hotspots", True):
# filter out the verbose hotspots
if len(pos_ind) > num_hotspots:
# if the hotspots are too many for the instance
# select the num_hotspots-th nearest as valid hotspots
points = grid_real_centers[pos_ind, :]
diff = gt_bboxes[i, :2] - points
diff = torch.norm(diff, dim=1)
sorted_ind = torch.argsort(diff)[:num_hotspots]
pos_ind = pos_ind[sorted_ind]
# get the indices of hotspots
pos_h_indices = h_indices[pos_ind] # [num_pos]
pos_w_indices = w_indices[pos_ind] # [num_pos]
# scan the positive hotspots
if len(pos_h_indices):
if not (labels[pos_h_indices, pos_w_indices] == 0).all():
unique_pos_h_indices = pos_h_indices.new_zeros((0,))
unique_pos_w_indices = pos_w_indices.new_zeros((0,))
unique_pos_ind = pos_ind.new_zeros((0,))
# NOTE: assert that each grid's gradient just be affected by one label
# if a grid was covered by other label, eliminate its effects
for ph, pw, pi in zip(pos_h_indices, pos_w_indices, pos_ind):
if labels[ph, pw] == 0:
unique_pos_h_indices = torch.cat(
(unique_pos_h_indices, ph.view((1))))
unique_pos_w_indices = torch.cat(
(unique_pos_w_indices, pw.view((1))))
unique_pos_ind = torch.cat((unique_pos_ind, pi.view((1))))
else:
label_weights[ph, pw] = 0
pos_h_indices = unique_pos_h_indices
pos_w_indices = unique_pos_w_indices
pos_ind = unique_pos_ind
# fullfill `labels` and `label_weights`
labels[pos_h_indices, pos_w_indices] = gt_labels[i]
label_weights[pos_h_indices, pos_w_indices] = self.loss_norm["pos_cls_weight"]
# get the overlap hotspots and set the `label_weights` as 0
ig_mask = points_in_convex_polygon_torch(
grid_real_centers, ignore_boxes_out[i].unsqueeze(0))
ig_mask = (ig_mask & (~pos_mask)).reshape(-1) # Get the overlapped grid
ig_h = h_indices[ig_mask]
ig_w = w_indices[ig_mask]
# 1 for hspots in gtbbx, 0 for non-hspots in gtbbx
label_weights[ig_h, ig_w] = 0
centers = grid_real_centers[pos_ind]
shifts = torch.zeros((len(centers), self.box_coder.code_size),
device=cls_scores.device,
dtype=torch.float)
# Got the encode bbx target for each positive grid
shifts = self.box_coder._encode(centers, shifts, gt_bboxes[i])
zeros = torch.zeros_like(pos_w_indices)
locs = torch.stack((zeros, pos_h_indices, pos_w_indices), dim=-1)
# get the corresponding bounding boxes
bbox_locs = torch.cat((bbox_locs, locs), dim=0)
bbox_targets = torch.cat((bbox_targets, shifts), dim=0)
# get the ratio os positive and negative examples
num_pos = bbox_targets.size(0)
num_neg = label_weights.nonzero().size(0) - num_pos
return (labels, label_weights, bbox_targets, bbox_locs, num_pos, num_neg)
def predict(self, example, preds_dicts, test_cfg, **kwargs):
rets = []
double_flip = test_cfg.get('double_flip', False)
for task_id, preds_dict in enumerate(preds_dicts):
batch_size_device = example['num_voxels'].shape[0]
if "metadata" not in example or len(example["metadata"]) == 0:
meta_list = [None] * batch_size_device
else:
meta_list = example["metadata"]
if double_flip:
assert batch_size_device % 4 == 0, f"batch_size_device: {batch_size_device}"
batch_size_device = int(batch_size_device / 4)
meta_list = meta_list[:4 * int(batch_size_device):4]
batch_box_preds_all = preds_dict["box_preds"]
batch_cls_preds_all = preds_dict["cls_preds"]
_, H, W, C = batch_box_preds_all.shape
batch_box_preds_all = batch_box_preds_all.reshape(
int(batch_size_device), 4, H, W, C)
batch_box_preds_sincos_all = batch_box_preds_all[:, :, :, :, 8:10].clone()
_, H, W, C = batch_cls_preds_all.shape
batch_cls_preds_all = batch_cls_preds_all.reshape(
int(batch_size_device), 4, H, W, C)
batch_cls_preds_all[:, 1] = torch.flip(batch_cls_preds_all[:, 1], dims=[1])
batch_cls_preds_all[:, 2] = torch.flip(batch_cls_preds_all[:, 2], dims=[2])
batch_cls_preds_all[:, 3] = torch.flip(batch_cls_preds_all[:, 3], dims=[1, 2])
batch_cls_preds_all = batch_cls_preds_all.sigmoid()
batch_cls_preds = batch_cls_preds_all.mean(dim=1)
batch_box_preds_sincos_all[:, 1] = torch.flip(
batch_box_preds_sincos_all[:, 1], dims=[1])
batch_box_preds_sincos_all[:, 2] = torch.flip(
batch_box_preds_sincos_all[:, 2], dims=[2])
batch_box_preds_sincos_all[:, 3] = torch.flip(
batch_box_preds_sincos_all[:, 3], dims=[1, 2])
num_class_with_bg = self.num_classes[task_id]
if not self.encode_background_as_zeros:
num_class_with_bg = self.num_classes[task_id] + 1
batch_cls_preds = batch_cls_preds.contiguous()
batch_cls_preds = batch_cls_preds.view(
batch_size_device, -1, num_class_with_bg)
batch_reg_preds = torch.zeros(
(int(batch_size_device), 4, H * W, 9), dtype=batch_box_preds_all.dtype, device=batch_box_preds_all.device)
for i in range(4):
batch_box_preds = batch_box_preds_all[:, i, :, :, :]
box_ndim = self.box_n_dim
h, w = batch_box_preds.size()[1:3]
batch_box_preds = batch_box_preds.contiguous()
batch_box_preds = batch_box_preds.view(batch_size_device, -1, box_ndim)
if i == 1: # theta = pi-theta
batch_box_preds[:, :, -2] = -batch_box_preds[:, :, -2]
batch_box_preds_sincos_all[:, i, :, :, 0] = - \
batch_box_preds_sincos_all[:, i, :, :, 0]
elif i == 2: # x=-x, theta = 2pi-theta, vx = -vx
batch_box_preds[:, :, -1] = -batch_box_preds[:, :, -1]
batch_box_preds_sincos_all[:, i, :, :, 1] = - \
batch_box_preds_sincos_all[:, i, :, :, 1]
elif i == 3: # x=-x,y=-y, theta=theta-pi, vx=-vx, vy=-vy
batch_box_preds[:, :, -1] = -batch_box_preds[:, :, -1]
batch_box_preds[:, :, -2] = -batch_box_preds[:, :, -2]
batch_box_preds_sincos_all[:, i, :, :, 0] = - \
batch_box_preds_sincos_all[:, i, :, :, 0]
batch_box_preds_sincos_all[:, i, :, :, 1] = - \
batch_box_preds_sincos_all[:, i, :, :, 1]
#import pdb; pdb.set_trace()
# -pi/2
#batch_box_preds[:, :, -2], batch_box_preds[:, :, -1] = batch_box_preds[:, :, -1], -batch_box_preds[:, :, -2]
# # +pi/2
#batch_box_preds[:, :, -2], batch_box_preds[:, :, -1] = -batch_box_preds[:, :, -1], batch_box_preds[:, :, -2]
batch_reg_preds_temp = self.box_coder._decode(
batch_box_preds[:, :, :self.box_coder.code_size], w, h
)
if i == 1: # y=-y, vy = -vy
batch_reg_preds_temp[:, :, 1] = -batch_reg_preds_temp[:, :, 1]
batch_reg_preds_temp[:, :, 7] = -batch_reg_preds_temp[:, :, 7]
elif i == 2: # x=-x, vx = -vx
batch_reg_preds_temp[:, :, 0] = -batch_reg_preds_temp[:, :, 0]
batch_reg_preds_temp[:, :, 6] = -batch_reg_preds_temp[:, :, 6]
elif i == 3: # x=-x,y=-y, vx=-vx, vy=-vy
batch_reg_preds_temp[:, :, 1] = -batch_reg_preds_temp[:, :, 1]
batch_reg_preds_temp[:, :, 0] = -batch_reg_preds_temp[:, :, 0]
batch_reg_preds_temp[:, :, 7] = -batch_reg_preds_temp[:, :, 7]
batch_reg_preds_temp[:, :, 6] = -batch_reg_preds_temp[:, :, 6]
batch_reg_preds[:, i, :, :] = batch_reg_preds_temp
batch_box_preds_sincos_all = batch_box_preds_sincos_all.mean(dim=1)
batch_box_preds_sincos_all = batch_box_preds_sincos_all.reshape(
batch_size_device, -1, 2)
batch_box_preds_rads = torch.atan2(
batch_box_preds_sincos_all[:, :, 1], batch_box_preds_sincos_all[:, :, 0])
batch_reg_preds = batch_reg_preds.reshape(batch_size_device, 4, H, W, 9)
batch_reg_preds[:, 1] = torch.flip(batch_reg_preds[:, 1], dims=[1])
batch_reg_preds[:, 2] = torch.flip(batch_reg_preds[:, 2], dims=[2])
batch_reg_preds[:, 3] = torch.flip(batch_reg_preds[:, 3], dims=[1, 2])
batch_reg_preds = batch_reg_preds.mean(dim=1)
batch_reg_preds = batch_reg_preds.reshape(batch_size_device, -1, 9)
batch_reg_preds[:, :, -1] = batch_box_preds_rads
else:
batch_box_preds = preds_dict["box_preds"]
batch_cls_preds = preds_dict["cls_preds"].sigmoid()
box_ndim = self.box_n_dim
h, w = batch_box_preds.size()[1:3]
batch_box_preds = batch_box_preds.view(batch_size_device, -1, box_ndim)
num_class_with_bg = self.num_classes[task_id]
if not self.encode_background_as_zeros:
num_class_with_bg = self.num_classes[task_id] + 1
batch_cls_preds = batch_cls_preds.contiguous()
batch_cls_preds = batch_cls_preds.view(batch_size_device, -1, num_class_with_bg)
batch_reg_preds = self.box_coder._decode(
batch_box_preds[:, :, :self.box_coder.code_size], w, h
)
batch_dir_preds = [None] * batch_size_device
rets.append(
self.get_task_detections(
task_id,
num_class_with_bg,
test_cfg,
batch_cls_preds,
batch_reg_preds,
batch_dir_preds,
meta_list,
)
)
num_samples = len(rets[0])
ret_list = []
for i in range(num_samples):
ret = {}
for k in rets[0][i].keys():
if k in ["box3d_lidar", "scores"]:
ret[k] = torch.cat([ret[i][k] for ret in rets])
elif k in ["label_preds"]:
flag = 0
for j, num_class in enumerate(self.num_classes):
rets[j][i][k] += flag
flag += num_class
ret[k] = torch.cat([ret[i][k] for ret in rets])
elif k == "metadata":
# metadata
ret[k] = rets[0][i][k]
ret_list.append(ret)
return ret_list
def get_task_detections(
self,
task_id,
num_class_with_bg,
test_cfg,
batch_cls_preds,
batch_reg_preds,
batch_dir_preds=None,
meta_list=None,
):
predictions_dicts = []
post_center_range = test_cfg.post_center_limit_range
if len(post_center_range) > 0:
post_center_range = torch.tensor(
post_center_range,
dtype=batch_reg_preds.dtype,
device=batch_reg_preds.device,
)
for box_preds, cls_preds, dir_preds, meta in zip(
batch_reg_preds,
batch_cls_preds,
batch_dir_preds,
meta_list,
):
box_preds = box_preds.float()
cls_preds = cls_preds.float()
if self.encode_background_as_zeros:
# this don't support softmax
assert self.use_sigmoid_score is True
total_scores = cls_preds
#total_scores = cls_preds
else:
# encode background as first element in one-hot vector
if self.use_sigmoid_score:
total_scores = cls_preds[..., 1:]
else:
total_scores = F.softmax(cls_preds, dim=-1)[..., 1:]
feature_map_size_prod = (
batch_reg_preds.shape[1] // self.num_anchor_per_locs[task_id]
)
# get highest score per prediction, than apply nms
# to remove overlapped box.
if num_class_with_bg == 1:
top_scores = total_scores.squeeze(-1)
top_labels = torch.zeros(
total_scores.shape[0],
device=total_scores.device,
dtype=torch.long,
)
else:
top_scores, top_labels = torch.max(total_scores, dim=-1)
if test_cfg.score_threshold > 0.0:
thresh = torch.tensor(
[test_cfg.score_threshold], device=total_scores.device
).type_as(total_scores)
top_scores_keep = top_scores >= thresh
top_scores = top_scores.masked_select(top_scores_keep)
if top_scores.shape[0] != 0:
if test_cfg.score_threshold > 0.0:
box_preds = box_preds[top_scores_keep]
assert (box_preds[:, 3:6] > 0).cpu().numpy().any()
top_labels = top_labels[top_scores_keep]
boxes_for_nms = box_torch_ops.boxes3d_to_bevboxes_lidar_torch(box_preds)
selected = box_torch_ops.rotate_nms_pcdet(boxes_for_nms, top_scores,
thresh=test_cfg.nms.nms_iou_threshold,
pre_maxsize=test_cfg.nms.nms_pre_max_size,
post_max_size=test_cfg.nms.nms_post_max_size)
else:
selected = []
# if selected is not None:
selected_boxes = box_preds[selected]
selected_labels = top_labels[selected]
selected_scores = top_scores[selected]
# finally generate predictions.
if selected_boxes.shape[0] != 0:
box_preds = selected_boxes
scores = selected_scores
label_preds = selected_labels
final_box_preds = box_preds
final_scores = scores
final_labels = label_preds
if post_center_range is not None:
mask = (final_box_preds[:, :3] >= post_center_range[:3]).all(1)
mask &= (final_box_preds[:, :3] <= post_center_range[3:]).all(1)
predictions_dict = {
"box3d_lidar": final_box_preds[mask],
"scores": final_scores[mask],
"label_preds": label_preds[mask],
"metadata": meta,
}
else:
predictions_dict = {
"box3d_lidar": final_box_preds,
"scores": final_scores,
"label_preds": final_labels,
"metadata": meta,
}
else:
dtype = batch_reg_preds.dtype
device = batch_reg_preds.device
predictions_dict = {
"box3d_lidar": torch.zeros([0, box_preds.shape[1]], dtype=dtype, device=device),
"scores": torch.zeros([0], dtype=dtype, device=device),
"label_preds": torch.zeros(
[0], dtype=top_labels.dtype, device=device
),
"metadata": meta,
}
predictions_dicts.append(predictions_dict)
return predictions_dicts
| 46.541618 | 133 | 0.540227 | 4,925 | 39,700 | 4.033503 | 0.096041 | 0.033828 | 0.038611 | 0.019935 | 0.420992 | 0.349711 | 0.26821 | 0.225925 | 0.169444 | 0.152882 | 0 | 0.015431 | 0.358463 | 39,700 | 852 | 134 | 46.596244 | 0.764537 | 0.093325 | 0 | 0.204511 | 0 | 0 | 0.036144 | 0.000615 | 0 | 0 | 0 | 0 | 0.009023 | 1 | 0.018045 | false | 0 | 0.02406 | 0 | 0.058647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e501d8d323b13656dcc8aa99310855c82f2554fb | 4,267 | py | Python | run_full_dataset.py | TimoK93/ApLift | 732070175ab6bf76db5b0c793cdb4a1fb5d235d7 | [
"MIT"
] | 4 | 2021-09-23T17:44:01.000Z | 2022-01-10T07:14:25.000Z | run_full_dataset.py | TimoK93/ApLift | 732070175ab6bf76db5b0c793cdb4a1fb5d235d7 | [
"MIT"
] | 1 | 2021-10-18T07:41:31.000Z | 2021-10-18T07:41:31.000Z | run_full_dataset.py | TimoK93/ApLift | 732070175ab6bf76db5b0c793cdb4a1fb5d235d7 | [
"MIT"
] | null | null | null | """
A script to run the main script with all sequences of a dataset.
To use the script a config.yaml needs to be specified.
Example usage:
python3 main.py config/example_config.yaml
if "pretrained_model_path" is passed as an argument in the config, training is skipped and pretrained models are used
for the inference.
"""
import os
import shutil
from copy import copy
os.environ["CUDA_VISIBLE_DEVICES"] = "" # GPUs are not necessary!
from main import run_pipeline
from src.utilities.config_reader import main_function
def copyanything(src, dst):
for root, dirs, files in os.walk(src):
for name in files:
dir = root.replace(src, dst)
dst_file = os.path.join(dir, name)
if os.path.exists(dst_file):
print("Model", dst_file, "is already existing!")
os.makedirs(dir, exist_ok=True)
shutil.copy(os.path.join(root, name), os.path.join(dir, name))
@main_function
def main(working_dir, dataset: str, pretrained_models_path=None, **kwargs):
""" Runs the main pipeline on all sequences of a dataset """
''' Create directory an copy pretrained models '''
os.makedirs(working_dir, exist_ok=True)
if pretrained_models_path is not None:
copyanything(os.path.join(pretrained_models_path), working_dir)
''' Creates a list of jobs to be executed'''
jobs = list()
if dataset == "MOT17":
detectors = ["FRCNN", "DPM", "SDP"]
train_sequences = [2, 4, 5, 9, 10, 11, 13]
test_sequences = [1, 3, 6, 7, 8, 12, 14]
for d in detectors:
for t in train_sequences:
train = ["MOT17-%s-%s" % (str(_).rjust(2, "0"), d) for _ in train_sequences if _ != t]
val = ["MOT17-%s-%s" % (str(t).rjust(2, "0"), d)]
jobs.append(dict(train=train, val=val, detector=d))
for t in test_sequences:
train = ["MOT17-%s-%s" % (str(_).rjust(2, "0"), d) for _ in train_sequences]
val = ["MOT17-%s-%s" % (str(t).rjust(2, "0"), d)]
jobs.append(dict(train=train, val=val, detector=d))
elif dataset == "MOT20":
test_sequences = [4, 6, 7, 8]
train_sequences = [1, 2, 3, 5]
for t in train_sequences:
train = ["MOT20-%s" % str(_).rjust(2, "0") for _ in train_sequences if _ != t]
val = ["MOT20-%s" % str(t).rjust(2, "0")]
jobs.append(dict(train=train, val=val, detector="FRCNN"))
for t in test_sequences:
train = ["MOT20-%s" % str(_).rjust(2, "0") for _ in train_sequences]
val = ["MOT20-%s" % str(t).rjust(2, "0")]
jobs.append(dict(train=train, val=val, detector="FRCNN"))
elif dataset == "MOT15":
test_sequences = [
'Venice-1', 'TUD-Crossing', 'PETS09-S2L2', 'KITTI-19', 'KITTI-16', 'ETH-Jelmoli', 'ETH-Linthescher',
'ETH-Crossing', 'AVG-TownCentre', 'ADL-Rundle-3', 'ADL-Rundle-1'
]
train_sequences = [
'Venice-2', 'KITTI-17', 'KITTI-13', 'ETH-Sunnyday', 'ETH-Pedcross2', 'ETH-Bahnhof', 'ADL-Rundle-8',
'TUD-Stadtmitte', 'TUD-Campus', 'ADL-Rundle-6', 'PETS09-S2L1'
]
for t in train_sequences:
train = [_ for _ in train_sequences if _ != t]
val = [t]
jobs.append(dict(train=train, val=val, detector="FRCNN"))
for t in test_sequences:
train = [_ for _ in train_sequences if _ != t]
val = [t]
jobs.append(dict(train=train, val=val, detector="FRCNN"))
''' Runs the jobs sequentially '''
features = copy(kwargs["data_config"]["edge_features"])
for job in jobs:
print("Run Job:", job)
if os.path.exists(os.path.join(working_dir, job["val"][0], job["val"][0] + ".txt")):
print("... Result file already existing!")
continue
kwargs["data_config"]["edge_features"] = copy(features)
kwargs["data_config"]["dataset"]["detector"] = job["detector"]
kwargs["training_config"]["sequences_for_training"] = job["train"]
kwargs["training_config"]["sequences_for_inference"] = job["val"]
run_pipeline(working_dir=working_dir, **kwargs)
if __name__ == "__main__":
main()
| 40.638095 | 117 | 0.588704 | 578 | 4,267 | 4.207612 | 0.261246 | 0.069079 | 0.059211 | 0.046875 | 0.362253 | 0.280839 | 0.263158 | 0.260691 | 0.260691 | 0.260691 | 0 | 0.028779 | 0.258964 | 4,267 | 104 | 118 | 41.028846 | 0.740354 | 0.093743 | 0 | 0.266667 | 0 | 0 | 0.172127 | 0.012084 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026667 | false | 0 | 0.066667 | 0 | 0.093333 | 0.04 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5033a61e857baa0aed9b97b41edbcf668557962 | 298 | py | Python | _Training_/RegEx - HackerRank/2. Character Class/Excluding Specific Characters.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | _Training_/RegEx - HackerRank/2. Character Class/Excluding Specific Characters.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | _Training_/RegEx - HackerRank/2. Character Class/Excluding Specific Characters.py | JUD210/Study-Note | 2add9db3f11d99370f49878f0c19e9caa60d2d02 | [
"MIT"
] | null | null | null | # https://www.hackerrank.com/challenges/excluding-specific-characters/problem
import re
# Inputs
standard_input = """think?"""
Regex_Pattern = (
r"^[^\d][^aeiou][^bcDF][^\r\n\t\f\s][^AEIOU][^.,]$"
) # Do not delete 'r'.
print(str(bool(re.search(Regex_Pattern, input()))).lower())
# true
| 18.625 | 77 | 0.64094 | 41 | 298 | 4.585366 | 0.829268 | 0.12766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114094 | 298 | 15 | 78 | 19.866667 | 0.712121 | 0.355705 | 0 | 0 | 0 | 0 | 0.28877 | 0.256684 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e503d96de70079ddac429727f7983b8a0fcdef59 | 354 | py | Python | toroid/toroid/pairs.py | LeoTindall/corewar32 | c29891ca67c01dd65d01d120364a010eb12eb255 | [
"Apache-2.0"
] | null | null | null | toroid/toroid/pairs.py | LeoTindall/corewar32 | c29891ca67c01dd65d01d120364a010eb12eb255 | [
"Apache-2.0"
] | 1 | 2016-08-06T23:20:56.000Z | 2016-08-06T23:20:56.000Z | toroid/toroid/pairs.py | SilverWingedSeraph/corewar32 | c29891ca67c01dd65d01d120364a010eb12eb255 | [
"Apache-2.0"
] | null | null | null | def make_pairings(warriors):
if len(warriors) == 0:
return False, False
pairings = []
for (warrior1, warrior2) in zip(warriors[0::2], warriors[1::2]):
pairings.append((warrior1, warrior2))
if len(warriors) % 2 == 0:
odd_one_out = False
else:
odd_one_out = warriors[-1]
return pairings, odd_one_out
| 29.5 | 68 | 0.610169 | 47 | 354 | 4.446809 | 0.446809 | 0.086124 | 0.129187 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045802 | 0.259887 | 354 | 11 | 69 | 32.181818 | 0.751908 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e503f5404abfe29fbefb2c59950b6c8386b55b14 | 5,814 | py | Python | Nets/TRNetShared.py | AndresOtero/TensorDecompositionMachineLearning | 455f16b405ec9d031999b0ebf9c5a68d3c20b233 | [
"MIT"
] | 3 | 2021-06-11T02:46:06.000Z | 2021-08-17T02:59:30.000Z | Nets/TRNetShared.py | AndresOtero/TensorDecompositionMachineLearning | 455f16b405ec9d031999b0ebf9c5a68d3c20b233 | [
"MIT"
] | null | null | null | Nets/TRNetShared.py | AndresOtero/TensorDecompositionMachineLearning | 455f16b405ec9d031999b0ebf9c5a68d3c20b233 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torch.nn.functional as F
from Nets.TRNetSerialized import TRNetSerialized
from Nets.TTNetParallel import FirstKernelTensorTrain, FeatureMap, TTKernel
from Nets.TTNetShared import KernelSharedTensorTrain
from Utils import Constant
from Utils.RanksFactory import RanksFactory
from Utils.TensorTools import group_divisions
from torch.nn import Parameter
class LastKernelSharedTensorRing(nn.Module):
def __init__(self, categories, first_rank, m, second_rank, init_value):
super(LastKernelSharedTensorRing, self).__init__()
self.categories = categories
self.first_rank = first_rank
self.m = m
self.second_rank = second_rank
self.weight = Parameter(torch.randn(categories, first_rank, m, second_rank))
self.init_value = init_value
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight,gain=self.init_value )
def forward(self, input, state):
x = torch.einsum('bj,rbi->rbji', [input, state]) # OuterProduct
x = torch.einsum('cijk,rbji->cbrk', [self.weight, x]) # Mutiply by core
return x
class KernelSharedTensorRing(nn.Module):
def __init__(self, first_rank, m, second_rank, init_value):
super(KernelSharedTensorRing, self).__init__()
self.first_rank = first_rank
self.m = m
self.second_rank = second_rank
self.weight = Parameter(torch.randn(first_rank, m, second_rank))
self.init_value = init_value
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight,gain=self.init_value )
def forward(self, input, state):
x = torch.einsum('bj,rbi->rbji', [input, state]) # OuterProduct
x = torch.einsum('ijk,rbji->rbk', [self.weight, x]) # Mutiply by core
return x
class TRNetShared(nn.Module):
def __init__(self, net_params):
super(TRNetShared, self).__init__()
self.ranks = RanksFactory.create_tensor_ring_shared_ranks(net_params)
self.kernels = []
self.m = net_params.get_m()
self.n = net_params.get_n()
self.amount_of_divisions = net_params.get_amount_of_divisions()
self.batch_size = net_params.get_batch_size()
self.feature_map = FeatureMap(self.n, self.m, self.amount_of_divisions, self.batch_size)
self.amount_of_divisions = net_params.amount_of_divisions
self.categories = net_params.categories
self.last_kernel = LastKernelSharedTensorRing(self.categories, self.ranks[Constant.SECOND], self.m,
self.ranks[Constant.THIRD], net_params.init_value)
self.shared_kernel = KernelSharedTensorRing(self.ranks[Constant.SECOND], self.m, self.ranks[Constant.THIRD],
net_params.init_value)
def forward(self, tensor):
featured_tensor = self.feature_map(tensor)
division_divided_tensors = group_divisions(featured_tensor, self.amount_of_divisions)
batch_size = tensor.size()[Constant.FIRST]
state = torch.ones(self.ranks[Constant.FIRST], batch_size, self.ranks[Constant.SECOND])
times = division_divided_tensors.size()[Constant.FIRST]
for t in range(0, times):
division_divided_input = division_divided_tensors[t]
state = self.shared_kernel(division_divided_input, state)
pad_input = torch.ones(batch_size, self.m)
state = self.last_kernel(pad_input, state)
state = TRNetSerialized.calculate_traces_serialized(state)
return F.log_softmax(state, dim=1)
def extra_repr(self):
return 'ranks={}'.format(
self.ranks
)
def get_number_of_parameters(self):
for p in self.parameters():
print(p.numel())
self.number = sum(p.numel() for p in self.parameters())
return self.number
class TRNetSharedWithoutFeatureMap(nn.Module):
def __init__(self, net_params):
super(TRNetSharedWithoutFeatureMap, self).__init__()
self.ranks = RanksFactory.create_tensor_ring_shared_ranks(net_params)
self.kernels = []
self.m = net_params.get_m()
self.n = net_params.get_n()
self.amount_of_divisions = net_params.get_amount_of_divisions()
self.batch_size = net_params.get_batch_size()
self.amount_of_divisions = net_params.amount_of_divisions
self.categories = net_params.categories
self.last_kernel = LastKernelSharedTensorRing(self.categories, self.ranks[Constant.SECOND], self.m,
self.ranks[Constant.LAST], net_params.init_value)
self.shared_kernel = KernelSharedTensorRing(self.ranks[Constant.SECOND], self.n, self.ranks[Constant.THIRD],
net_params.init_value)
def forward(self, tensor):
division_divided_tensors = tensor.transpose(0,1)
batch_size = tensor.size()[Constant.FIRST]
state = torch.ones(self.ranks[Constant.FIRST], batch_size, self.ranks[Constant.SECOND])
times = division_divided_tensors.size()[Constant.FIRST]
for t in range(0, times):
division_divided_input = division_divided_tensors[t]
state = self.shared_kernel(division_divided_input, state)
pad_input = torch.ones(batch_size, self.m)
state = self.last_kernel(pad_input, state)
state = TRNetSerialized.calculate_traces_serialized(state)
return state
def extra_repr(self):
return 'ranks={}'.format(
self.ranks
)
def get_number_of_parameters(self):
self.number = sum(p.numel() for p in self.parameters())
return self.number | 43.066667 | 116 | 0.673891 | 707 | 5,814 | 5.278642 | 0.154173 | 0.048232 | 0.054662 | 0.033762 | 0.78403 | 0.768489 | 0.757503 | 0.757503 | 0.721597 | 0.702304 | 0 | 0.001117 | 0.230134 | 5,814 | 135 | 117 | 43.066667 | 0.832663 | 0.009804 | 0 | 0.654867 | 0 | 0 | 0.01182 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.123894 | false | 0 | 0.088496 | 0.017699 | 0.318584 | 0.00885 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 |
e504a4528ab13dc7037b0bc87eb63a2bf6c3d4cb | 2,550 | py | Python | tests/test_deny_mime_type_validator.py | fastmonkeys/pontus | 6542190aae896cd79c55f7f43e98a6bf3cbc613b | [
"MIT"
] | 4 | 2017-04-24T10:17:28.000Z | 2020-05-28T06:25:03.000Z | tests/test_deny_mime_type_validator.py | fastmonkeys/pontus | 6542190aae896cd79c55f7f43e98a6bf3cbc613b | [
"MIT"
] | 9 | 2015-02-23T14:27:37.000Z | 2021-02-24T13:23:41.000Z | tests/test_deny_mime_type_validator.py | fastmonkeys/pontus | 6542190aae896cd79c55f7f43e98a6bf3cbc613b | [
"MIT"
] | 1 | 2017-08-14T16:40:44.000Z | 2017-08-14T16:40:44.000Z | # -*- coding: utf-8 -*-
import os
import pytest
import boto3
from pontus.exceptions import ValidationError
from pontus.validators import DenyMimeType
class TestDenyMimeTypeValidator(object):
@pytest.fixture
def jpeg_key(self, bucket):
with open(os.path.join(
os.path.dirname(__file__),
'data',
'example.jpg'
), 'rb') as image:
key_name = 'example.jpg'
obj = boto3.resource('s3').Object(bucket.name, key_name)
obj.put(
Body=image
)
return obj
def test_raises_validation_error_if_invalid_mime_type(
self,
jpeg_key
):
validator = DenyMimeType(mime_type='image/jpeg')
with pytest.raises(ValidationError) as e:
validator(jpeg_key)
assert str(e.value) == (
"Invalid file: File MIME type image/jpeg is in denied list "
"image/jpeg."
)
def test_does_not_raise_validation_error_if_valid_mime_type(
self,
jpeg_key
):
validator = DenyMimeType(mime_type='image/png')
validator(jpeg_key)
def test_repr(self):
assert repr(DenyMimeType(mime_type='image/png')) == (
u"<DenyMimeType mime_types='image/png'>"
)
def test_raises_validation_error_if_mime_type_not_in_valid_mime_types(
self,
jpeg_key
):
validator = DenyMimeType(mime_types=['image/jpeg', 'application/csv'])
with pytest.raises(ValidationError) as e:
validator(jpeg_key)
assert str(e.value) == (
"Invalid file: File MIME type image/jpeg is in denied list "
"['image/jpeg', 'application/csv']."
)
def test_doesnt_raise_validation_error_if_mime_type_in_valid_mime_types(
self,
jpeg_key
):
validator = DenyMimeType(mime_types=['image/png', 'application/csv'])
validator(jpeg_key)
def test_raises_validation_error_if_mime_type_doesnt_match_regex(
self,
jpeg_key
):
validator = DenyMimeType(regex=r'image\/.*')
with pytest.raises(ValidationError) as e:
validator(jpeg_key)
assert str(e.value) == (
"Invalid file: File MIME type image/jpeg matches denied regex "
"r'image\/.*'."
)
def test_doesnt_raise_validation_error_if_mime_type_matches_regex(
self,
jpeg_key
):
validator = DenyMimeType(regex=r'application\/.*')
validator(jpeg_key)
| 29.310345 | 78 | 0.608627 | 292 | 2,550 | 5.034247 | 0.25 | 0.061905 | 0.069388 | 0.081633 | 0.644218 | 0.57483 | 0.554422 | 0.554422 | 0.444218 | 0.385714 | 0 | 0.002221 | 0.293725 | 2,550 | 86 | 79 | 29.651163 | 0.813992 | 0.008235 | 0 | 0.432432 | 0 | 0 | 0.159478 | 0.009102 | 0 | 0 | 0 | 0 | 0.054054 | 1 | 0.108108 | false | 0 | 0.067568 | 0 | 0.202703 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e5052a97fcc4c82072c34cd78471734f05a4fd9c | 1,339 | py | Python | proyecto utilizando pewee migrations/models.py | Raul-Flores/ORM-example | ff289f74f858514cebefe7070c3688ad773a0e2a | [
"MIT"
] | null | null | null | proyecto utilizando pewee migrations/models.py | Raul-Flores/ORM-example | ff289f74f858514cebefe7070c3688ad773a0e2a | [
"MIT"
] | null | null | null | proyecto utilizando pewee migrations/models.py | Raul-Flores/ORM-example | ff289f74f858514cebefe7070c3688ad773a0e2a | [
"MIT"
] | null | null | null |
import pymysql
import sqlite3
import psycopg2
from peewee import *
db = "postgres"
class BaseModel(Model):
class Meta:
global db
secretvar= "Secret"
while True:
try:
#db = input("BD Options: [mariadb|postgres|sqlite] :")
db_migration = db
if db == "mariadb":
db = MySQLDatabase("TEST", host="localhost", port=3306, user="root", password=secretvar)
break
elif db == "postgres":
db = PostgresqlDatabase("TEST", host="localhost", port=5432, user="postgres", password=secretvar)
break
elif db == "sqlite":
db = SqliteDatabase('TEST.db')
break
except:
print ("Los valores introducidos no son correctos")
database = db
class interface(BaseModel):
device_ip = CharField(max_length=40)
intf_name = CharField(max_length=40)
description = CharField(max_length=90)
is_enabled = BooleanField()
mac_address = CharField(max_length=30)
mtu = IntegerField()
speed = IntegerField()
status_date = DateTimeField()
validation1 = BooleanField(default=True)
detalles = CharField(max_length=50)
#class Meta:
# db_table = 'interface' | 33.475 | 117 | 0.570575 | 131 | 1,339 | 5.740458 | 0.580153 | 0.079787 | 0.119681 | 0.055851 | 0.074468 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023438 | 0.330844 | 1,339 | 40 | 118 | 33.475 | 0.815848 | 0.067214 | 0 | 0.085714 | 0 | 0 | 0.097111 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.057143 | 0.114286 | 0 | 0.485714 | 0.028571 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
e50744ae0a91afd75f5121562b3f88c9fadcfea8 | 1,966 | py | Python | MyModel/signLanguageTranslator.py | rahulmishra11/Sign-Language-Translator | 83b6907f722324d01142ab25e9e9cf806c51b0d3 | [
"Apache-2.0"
] | null | null | null | MyModel/signLanguageTranslator.py | rahulmishra11/Sign-Language-Translator | 83b6907f722324d01142ab25e9e9cf806c51b0d3 | [
"Apache-2.0"
] | null | null | null | MyModel/signLanguageTranslator.py | rahulmishra11/Sign-Language-Translator | 83b6907f722324d01142ab25e9e9cf806c51b0d3 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import pandas as pd
import numpy as np
import tensorflow.keras as keras
train = pd.read_csv("./sign_mnist_train/sign_mnist_train.csv")
test = pd.read_csv("./sign_mnist_test/sign_mnist_test.csv")
# put labels into y_train variable
Y_train = train["label"]
# Drop 'label' column
X_train = train.drop(labels = ["label"],axis = 1)
# put labels into y_test variable
Y_test = test["label"]
# Drop 'label' column
X_test = test.drop(labels = ["label"],axis = 1)
# Normalize the data
X_train = X_train / 255.0
X_test = X_test / 255.0
print("x_train shape: ",X_train.shape)
print("x_test shape: ",X_test.shape)
#Reshape
X_train = X_train.values.reshape(-1,28,28,1)
X_test = X_test.values.reshape(-1,28,28,1)
print("x_train shape: ",X_train.shape)
print("x_test shape: ",X_test.shape)
model = keras.models.Sequential([
keras.layers.Conv2D(filters=64, kernel_size=3, input_shape=[28, 28, 1]),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same'),
keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same'),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same'),
keras.layers.Conv2D(filters=128, kernel_size=3, activation='relu', padding='same'),
keras.layers.MaxPooling2D(pool_size=2),
keras.layers.Flatten(),
keras.layers.Dense(units=128, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=64, activation='relu'),
keras.layers.Dropout(0.5),
keras.layers.Dense(units=25, activation='softmax'),
])
model.summary()
model.compile(
loss="sparse_categorical_crossentropy",
optimizer = 'adam',
metrics=['accuracy']
)
history = model.fit(X_train,Y_train,
epochs=10,)
pd.DataFrame(history.history).plot()
model.save("sign_mnist_train.h5")
print(model.evaluate(X_test,Y_test))
| 29.787879 | 87 | 0.715158 | 301 | 1,966 | 4.508306 | 0.269103 | 0.113486 | 0.062638 | 0.08843 | 0.537214 | 0.450258 | 0.422255 | 0.422255 | 0.422255 | 0.422255 | 0 | 0.041351 | 0.126653 | 1,966 | 65 | 88 | 30.246154 | 0.748981 | 0.066124 | 0 | 0.288889 | 0 | 0 | 0.143794 | 0.058502 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.088889 | 0 | 0.088889 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e507533d51e8ae8cde7f283f5c299ebd345bec98 | 5,817 | py | Python | gammapy/detect/tests/test_kernel.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | 3 | 2019-01-28T12:21:14.000Z | 2019-02-10T19:58:07.000Z | gammapy/detect/tests/test_kernel.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | null | null | null | gammapy/detect/tests/test_kernel.py | grburgess/gammapy | 609e460698caca7223afeef5e71826c7b32728d1 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
from numpy.testing import assert_allclose
from astropy.io import fits
from astropy.units import Quantity
from astropy.coordinates.angles import Angle
from ...utils.testing import requires_dependency, requires_data
from ...image import SkyImage
from ...stats import significance
from ...datasets import FermiGalacticCenter
from ..kernel import KernelBackgroundEstimatorData, KernelBackgroundEstimator
@requires_dependency('scipy')
def test_KernelBackgroundEstimatorData():
"""Tests compute correlated maps in KernelBackgroundEstimatorData.
This is the only method in KernelBackgroundEstimatorData that actually calculates anything.
"""
# Set up test counts and background
counts_hdu = SkyImage.empty(nxpix=10, nypix=10, binsz=1, fill=42).to_image_hdu()
counts_hdu.data[4][4] = 1000
counts = counts_hdu.data
background_data = 42 * np.ones_like(counts, dtype=float)
# Single unit pixel kernel so should actually be no change.
background_kernel = np.ones((1, 1))
images = KernelBackgroundEstimatorData(counts, background_data)
images.compute_correlated_maps(background_kernel)
# Test significance image against Li & Ma significance value
expected = significance(counts, background_data)
actual = images.significance
assert_allclose(actual, expected)
@requires_dependency('scipy')
@requires_data('gammapy-extra')
class TestKernelBackgroundEstimator(object):
def setup_class(self):
"""Prepares appropriate input and defines inputs for test cases.
"""
from scipy.ndimage import convolve
# Load/create example model images
counts_hdu = SkyImage.empty(nxpix=10, nypix=10, binsz=1, fill=42).to_image_hdu()
counts_hdu.data[4][4] = 1000
counts = counts_hdu.data
# Initial counts required by one of the tests.
self.counts = counts
psf = FermiGalacticCenter.psf()
psf = psf.table_psf_in_energy_band(Quantity([10, 500], 'GeV'))
kernel_array = psf.kernel(pixel_size=Angle(1, 'deg'),
offset_max=Angle(3, 'deg'), normalize=True)
counts_blob = convolve(counts, kernel_array, mode='constant')
self.counts_blob = counts_blob
# Start with flat background estimate
# Background must be provided as an ImageHDU
images = KernelBackgroundEstimatorData(counts=counts, header=counts_hdu.header)
images_blob = KernelBackgroundEstimatorData(counts=counts_blob, header=counts_hdu.header)
source_kernel = np.ones((1, 3))
background_kernel = np.ones((5, 3))
significance_threshold = 4
mask_dilation_radius = 1
# Loads prepared inputs into estimator
self.kbe = KernelBackgroundEstimator(
images,
source_kernel,
background_kernel,
significance_threshold,
mask_dilation_radius
)
self.kbe2 = KernelBackgroundEstimator(
images,
source_kernel,
background_kernel,
significance_threshold,
mask_dilation_radius
)
self.kbe_blob = KernelBackgroundEstimator(
images_blob,
source_kernel,
background_kernel,
significance_threshold,
mask_dilation_radius
)
def test_run_iteration_point(self):
"""Asserts that mask and background are as expected according to input."""
# Call the run_iteration code as this is what is explicitly being tested
self.kbe.run_iteration()
# Should be run twice to update the mask
self.kbe.run_iteration()
mask = self.kbe.mask_image_hdu.data
background = self.kbe.background_image_hdu.data
# Check mask matches expectations
expected_mask = np.ones_like(self.counts)
expected_mask[4][3] = 0
expected_mask[4][4] = 0
expected_mask[4][5] = 0
assert_allclose(mask.astype(int), expected_mask)
# Check background, should be 42 uniformly
assert_allclose(background.astype(float), 42 * np.ones((10, 10)))
def test_run_iteration_blob(self):
"""Asserts that mask and background are as expected according to input."""
# Call the run_iteration code as this is what is explicitly being tested
self.kbe_blob.run_iteration()
# Should be run twice to update the mask
self.kbe_blob.run_iteration()
background = self.kbe_blob.background_image_hdu.data
# Check background, should be 42 uniformly within 10%
assert_allclose(background, 42 * np.ones((10, 10)), rtol=0.15)
def test_run(self):
"""Tests run script."""
mask, background = self.kbe2.run()
assert_allclose(mask.sum(), 97)
assert_allclose(background, 42 * np.ones((10, 10)))
def test_save_files(self, tmpdir):
"""Tests that files are saves, and checks values within them."""
# Create temporary file to write output into
self.kbe.run_iteration(1)
self.kbe.save_files(base_dir=str(tmpdir), index=0)
filename = tmpdir / '00_mask.fits'
mask = fits.open(str(filename))[1].data
filename = tmpdir / '00_significance.fits'
significance = fits.open(str(filename))[1].data
filename = tmpdir / '00_background.fits'
background = fits.open(str(filename))[1].data
# Checks values in files against known results for one iteration.
assert_allclose(mask.sum(), 97)
assert_allclose(significance.sum(), 157.316195729298)
assert_allclose(background.sum(), 4200)
| 36.130435 | 97 | 0.677153 | 698 | 5,817 | 5.47851 | 0.302292 | 0.020136 | 0.013598 | 0.021967 | 0.317469 | 0.299425 | 0.275366 | 0.252877 | 0.234048 | 0.195607 | 0 | 0.026203 | 0.238955 | 5,817 | 160 | 98 | 36.35625 | 0.837588 | 0.223655 | 0 | 0.28866 | 0 | 0 | 0.020184 | 0 | 0 | 0 | 0 | 0 | 0.103093 | 1 | 0.061856 | false | 0 | 0.123711 | 0 | 0.195876 | 0.010309 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e508b0cb043508fe01e3e1d06e6baa67a2130ba3 | 4,772 | py | Python | morsesmale.py | scotthellman/discrete-topology | 6182fe607868d88c462c185be8629a35ad2d7c37 | [
"MIT"
] | null | null | null | morsesmale.py | scotthellman/discrete-topology | 6182fe607868d88c462c185be8629a35ad2d7c37 | [
"MIT"
] | null | null | null | morsesmale.py | scotthellman/discrete-topology | 6182fe607868d88c462c185be8629a35ad2d7c37 | [
"MIT"
] | null | null | null | import networkx as nx
import numpy as np
import scipy
import graph
import itertools
from collections import defaultdict
def calculate_persistence(crystal, other, minimum_value, G, function_vals):
minimums = []
min_vertices = []
other = set(other)
for vertex in crystal:
neighbors = set(G.neighbors(vertex)) & other
if len(neighbors) == 0:
continue
value = function_vals[vertex]
worst_case = minimum_value - value
minimum_dist = None
minimum_node = None
for n in neighbors:
diff = minimum_value - function_vals[n]
if minimum_dist is None or diff > worst_case and diff < minimum_dist:
minimum_dist = diff
minimum_node = n
if minimum_dist < worst_case:
minimum_dist = worst_case
minimum_node = vertex
minimums.append(minimum_dist)
min_vertices.append(minimum_node)
try:
chosen_index = np.argmin(minimums)
return minimums[chosen_index], min_vertices[chosen_index]
except ValueError:
return float("inf"), None
def find_filtrations(G, function_vals, msc):
#TODO: throw exception when 2 values are the same
# minkP(X) mines(pa,pk) maxxiekamin − xik.
crystals = defaultdict(list)
for i,label in enumerate(msc):
crystals[label].append(i)
filtration = [crystals]
while len(crystals) > 1:
#find the crystal with the smalled persistence
best_pair = None
best_persistence = None
for crystal in crystals:
minimum_val = function_vals[crystal[0]]
for other in crystals:
if other != crystal:
persistence = calculate_persistence(crystals[crystal], crystals[other],
minimum_val, G, function_vals)[0]
if best_persistence is None or persistence < best_persistence:
best_pair = (crystal, other)
best_persistence = persistence
new_crystals = defaultdict(list)
for crystal,values in crystals.items():
if crystal != best_pair[0]:
new_crystals[crystal].extend(values)
else:
new_crystals[best_pair[1]].extend(values)
filtration.append(new_crystals)
crystals = new_crystals
return filtration
def generate_morse_smale(G, pdist, function_vals):
maxima, minima, ascent, descent = find_extrema(G, pdist, function_vals)
max_labels = assign_extrema(G, maxima, ascent)
min_labels = assign_extrema(G, minima, descent)
return list(zip(min_labels, max_labels))
def assign_extrema(G, extrema, path):
assignments = [0] * len(G.nodes())
for node in G:
traverser = node
while traverser not in extrema:
traverser = path[traverser]
assignments[node] = traverser
return assignments
def find_extrema(G, pdist, function_vals):
ascent = {}
descent = {}
maxima = []
minima = []
for i,value in enumerate(function_vals):
neighbors = np.array(G.neighbors(i))
distances = np.array([d for n,d in enumerate(pdist[i]) if n in neighbors])
differences = np.array([function_vals[n] - value for n in neighbors])
normalized = differences / distances
ordered = np.argsort(normalized)
if np.all(differences < 0):
maxima.append(i)
ascent[i] = i
descent[i] = neighbors[ordered[0]]
elif np.all(differences > 0):
minima.append(i)
ascent[i] = neighbors[ordered[-1]]
descent[i] = i
else:
ascent[i] = neighbors[ordered[-1]]
descent[i] = neighbors[ordered[0]]
return maxima, minima, ascent, descent
def get_filtrations(pdist, function_vals, k=2):
if k is None:
G = graph.generate_gabriel_graph(pdist)
else:
G = graph.generate_knn_graph(pdist, k)
msc = generate_morse_smale(G, pdist, function_vals)
filtrations = find_filtrations(G, function_vals, msc)
return filtrations
if __name__ == "__main__":
import scipy.spatial
values = np.array(range(20)).reshape(20,1)
pairs = scipy.spatial.distance.pdist(values)
pdist = scipy.spatial.distance.squareform(pairs)
G = graph.generate_knn_graph(pdist, 2)
func_vals = values % 5
maxs, mins, ascent, descent = find_extrema(G, pdist, func_vals)
msc = generate_morse_smale(G, pdist, func_vals)
print(msc)
filtration = find_filtrations(G, func_vals, msc)
print("-"*20)
for f in filtration:
print(f)
get_filtrations(pdist, func_vals)
| 34.832117 | 91 | 0.615884 | 567 | 4,772 | 5.017637 | 0.241623 | 0.059051 | 0.029877 | 0.025308 | 0.165905 | 0.134622 | 0.047803 | 0 | 0 | 0 | 0 | 0.007136 | 0.295264 | 4,772 | 137 | 92 | 34.832117 | 0.838537 | 0.028709 | 0 | 0.059322 | 0 | 0 | 0.00259 | 0 | 0 | 0 | 0 | 0.007299 | 0 | 1 | 0.050847 | false | 0 | 0.059322 | 0 | 0.169492 | 0.025424 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e508ffc9689183a1879c3556805bdcd71560a700 | 4,390 | py | Python | Packs/ZeroTrustAnalyticsPlatform/Integrations/ZeroTrustAnalyticsPlatform/test_data/xsoar_data.py | mazmat-panw/content | 024a65c1dea2548e2637a9cbbe54966e9e34a722 | [
"MIT"
] | 2 | 2021-12-06T21:38:24.000Z | 2022-01-13T08:23:36.000Z | Packs/ZeroTrustAnalyticsPlatform/Integrations/ZeroTrustAnalyticsPlatform/test_data/xsoar_data.py | mazmat-panw/content | 024a65c1dea2548e2637a9cbbe54966e9e34a722 | [
"MIT"
] | 87 | 2022-02-23T12:10:53.000Z | 2022-03-31T11:29:05.000Z | Packs/ZeroTrustAnalyticsPlatform/Integrations/ZeroTrustAnalyticsPlatform/test_data/xsoar_data.py | henry-sue-pa/content | 043c6badfb4f9c80673cad9242fdea72efe301f7 | [
"MIT"
] | 2 | 2022-01-05T15:27:01.000Z | 2022-02-01T19:27:43.000Z | def event_response():
return [
{
"ata_event_count": 1,
"datetime_created": "2021-05-11T20:11:30Z",
"fields": [
{
"key": "auto_run",
"label": "Auto Run",
"value": "False",
"order": 0,
},
{
"key": "event_name",
"label": "Event Name",
"value": "threat_quarantined",
"order": 1,
},
{
"key": "event_timestamp",
"label": "Event Timestamp",
"value": "2021-05-11T20:11:30.728667",
"order": 2,
},
],
"trigger": True,
},
]
def alert_response():
return [
{
"datetime_created": "2021-05-11T20:11:31Z",
"datetime_closed": None,
"datetime_firstevent": "2021-05-11T20:11:30Z",
"datetime_events_added": "2021-05-11T20:11:31Z",
"datetime_org_assigned": "2021-05-11T20:11:31Z",
"id": 1,
"status": "assigned",
"description": "Test Alert 1",
"url": "http://some_mock_url/#/incidents/1",
"xsoar_trigger_events": event_response(),
"xsoar_trigger_kv": trigger_event_kv(),
"xsoar_mirror_direction": "Both",
"xsoar_mirror_instance": "dummy_instance",
"xsoar_mirror_id": "1",
"xsoar_mirror_tags": ["comment_tag", "escalate_tag"],
},
{
"datetime_created": "2021-05-11T20:09:50Z",
"datetime_closed": None,
"datetime_firstevent": "2021-05-11T20:09:48Z",
"datetime_events_added": "2021-05-11T20:09:50Z",
"datetime_org_assigned": "2021-05-11T20:09:50Z",
"id": 2,
"status": "assigned",
"description": "Test Alert 2",
"url": "http://some_mock_url/#/incidents/2",
"xsoar_trigger_events": event_response(),
"xsoar_trigger_kv": trigger_event_kv(),
"xsoar_mirror_direction": "Both",
"xsoar_mirror_instance": "dummy_instance",
"xsoar_mirror_id": "2",
"xsoar_mirror_tags": ["comment_tag", "escalate_tag"],
},
]
def alert_response_remote():
return {
"datetime_created": "2021-05-11T20:11:31Z",
"datetime_closed": None,
"datetime_firstevent": "2021-05-11T20:11:30Z",
"datetime_events_added": "2021-05-11T20:11:31Z",
"datetime_org_assigned": "2021-05-11T20:11:31Z",
"id": 1,
"status": "assigned",
"description": "Test Alert 1",
"url": "http://some_mock_url/#/incidents/1",
"xsoar_trigger_events": event_response(),
"in_mirror_error": "",
}
def comment_response():
return [
{
"comment": "Test comment",
"datetime_created": "2021-05-10T19:36:48Z",
"id": 1,
"user": user_response(),
},
{
"comment": "Closing alert due to duplicate.",
"datetime_created": "2021-05-10T19:50:18Z",
"id": 2,
"user": user_response(),
},
]
def user_response():
return {
"id": 1,
"name": "Active User",
"email": "test@test",
"organization": {"id": 1, "name": "dummy_org", "psa_id": "dummy_id"},
}
def trigger_event_kv():
return {
"auto_run": "False",
"event_name": "threat_quarantined",
"event_timestamp": "2021-05-11T20:11:30.728667",
}
def comment_entries():
return [
{
"Type": 1,
"ContentsFormat": "json",
"Contents": comment_response()[0],
"HumanReadable": "Test comment\n\nSent by Active User (test@test) via ZTAP",
"ReadableContentsFormat": "text",
"Note": True,
"Tags": [],
},
{
"Type": 1,
"ContentsFormat": "json",
"Contents": comment_response()[1],
"HumanReadable": "Closing alert due to duplicate.\n\nSent by Active User (test@test) via ZTAP",
"ReadableContentsFormat": "text",
"Note": True,
"Tags": [],
},
]
| 32.043796 | 107 | 0.479499 | 413 | 4,390 | 4.861985 | 0.227603 | 0.050797 | 0.082171 | 0.071215 | 0.711653 | 0.631474 | 0.542829 | 0.461155 | 0.437749 | 0.437749 | 0 | 0.098096 | 0.366059 | 4,390 | 136 | 108 | 32.279412 | 0.623428 | 0 | 0 | 0.421875 | 0 | 0 | 0.432346 | 0.070159 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054688 | true | 0 | 0 | 0.054688 | 0.109375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
e5090e4da04384e2fbcd5b4f114deb2087c6f5f4 | 288 | py | Python | postgres/scripts/test-db-connection.py | tcalmant/ldbc_snb_interactive | baf4a8150ffd0b193ba2c6d1dc7cdc3a99edeedf | [
"Apache-2.0"
] | null | null | null | postgres/scripts/test-db-connection.py | tcalmant/ldbc_snb_interactive | baf4a8150ffd0b193ba2c6d1dc7cdc3a99edeedf | [
"Apache-2.0"
] | null | null | null | postgres/scripts/test-db-connection.py | tcalmant/ldbc_snb_interactive | baf4a8150ffd0b193ba2c6d1dc7cdc3a99edeedf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import os
import psycopg2
con = psycopg2.connect(
host="localhost",
user=os.environ.get("POSTGRES_USER", "postgres"),
password=os.environ.get("POSTGRES_PASSWORD", "mysecretpassword"),
port=int(os.environ.get("POSTGRES_PORT", 5432)),
)
con.close()
| 22.153846 | 69 | 0.704861 | 37 | 288 | 5.405405 | 0.567568 | 0.135 | 0.18 | 0.3 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027778 | 0.125 | 288 | 12 | 70 | 24 | 0.765873 | 0.072917 | 0 | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.111111 | 0.222222 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 2 |
e50969a938fad964949586d116b12c2990a0ae87 | 2,054 | py | Python | pyxll_jupyter/widget.py | TanKingsley/pyxll-jupyter | 4f7b3eb361079b74683d89340dfff9576fb2ff41 | [
"MIT"
] | 1 | 2020-12-28T10:40:38.000Z | 2020-12-28T10:40:38.000Z | pyxll_jupyter/widget.py | TanKingsley/pyxll-jupyter | 4f7b3eb361079b74683d89340dfff9576fb2ff41 | [
"MIT"
] | null | null | null | pyxll_jupyter/widget.py | TanKingsley/pyxll-jupyter | 4f7b3eb361079b74683d89340dfff9576fb2ff41 | [
"MIT"
] | null | null | null | """
JupyterQtWidget is the widget that gets embedded in Excel and hosts
a tabbed browser widget containing the Jupyter notebook.
"""
from .kernel import start_kernel, launch_jupyter
from .browser import Browser
from .qtimports import QWidget, QVBoxLayout
import subprocess
import ctypes
class JupyterQtWidget(QWidget):
def __init__(self, parent=None, scale=None, initial_path=None):
super().__init__(parent)
# proc gets set to the subprocess when the jupyter is started
self.proc = None
# Get the scale from the window DPI
if scale is None:
LOGPIXELSX = 88
hwnd = self.winId()
if isinstance(hwnd, str):
hwnd = int(hwnd, 16 if hwnd.startswith("0x") else 10)
hwnd = ctypes.c_size_t(hwnd)
screen = ctypes.windll.user32.GetDC(hwnd)
try:
scale = ctypes.windll.gdi32.GetDeviceCaps(screen, LOGPIXELSX) / 96.0
finally:
ctypes.windll.user32.ReleaseDC(hwnd, screen)
# Create the browser widget
self.browser = Browser(self, scale=scale)
self.browser.closed.connect(self.close)
# Add the browser to the widgets layout
layout = QVBoxLayout()
layout.addWidget(self.browser)
self.setLayout(layout)
# Start the kernel and open Jupyter in a new tab
app = start_kernel()
self.proc, url = launch_jupyter(app.connection_file, cwd=initial_path)
self.browser.create_tab(url)
def closeEvent(self, event):
# Kill the Jupyter subprocess using taskkill (just killing the process using POpen.kill
# doesn't terminate any child processes)
if self.proc is not None:
while self.proc.poll() is None:
si = subprocess.STARTUPINFO(wShowWindow=subprocess.SW_HIDE)
subprocess.check_call(['taskkill', '/F', '/T', '/PID', str(self.proc.pid)],
startupinfo=si,
shell=True)
| 36.678571 | 95 | 0.619279 | 246 | 2,054 | 5.089431 | 0.47561 | 0.031949 | 0.028754 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011096 | 0.297955 | 2,054 | 55 | 96 | 37.345455 | 0.857143 | 0.221519 | 0 | 0 | 0 | 0 | 0.011356 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.142857 | 0 | 0.228571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e50c65e44676b2b7cbe06fd4c5deb5f102a8415d | 621 | py | Python | py/A Rule Of Divisibility By 13.py | aadithpm/code-a-day | 18d7c1847e14d32d33d09d29f8847b6252c6e9e6 | [
"Unlicense"
] | 3 | 2018-03-16T14:52:40.000Z | 2020-12-04T10:12:07.000Z | py/A Rule Of Divisibility By 13.py | aadithpm/code-a-day | 18d7c1847e14d32d33d09d29f8847b6252c6e9e6 | [
"Unlicense"
] | null | null | null | py/A Rule Of Divisibility By 13.py | aadithpm/code-a-day | 18d7c1847e14d32d33d09d29f8847b6252c6e9e6 | [
"Unlicense"
] | 5 | 2017-06-30T05:35:00.000Z | 2019-07-13T08:05:30.000Z | """
https://www.codewars.com/kata/564057bc348c7200bd0000ff/train/python
"""
def thirt(n):
seq = [1,10,9,12,3,4]
n = list(int(i) for i in reversed(str(n)))
if len(seq) < len(n):
compute1 = [i for i in seq[0:len(n)-len(seq)]]
seq.extend(compute1)
compute1 = sum(i * j for i,j in zip(n,seq))
compute1 = list(int(i) for i in reversed(str(compute1)))
compute2 = sum(i * j for i,j in zip(compute1,seq))
if compute1 == compute2:
return compute2
else:
compute1 = list(int(i) for i in reversed(str(compute2)))
return sum(i * j for i,j in zip(compute1,seq))
| 34.5 | 67 | 0.602254 | 106 | 621 | 3.528302 | 0.339623 | 0.074866 | 0.053476 | 0.074866 | 0.42246 | 0.42246 | 0.42246 | 0.42246 | 0.315508 | 0.139037 | 0 | 0.082278 | 0.236715 | 621 | 17 | 68 | 36.529412 | 0.706751 | 0.107891 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0 | 0 | 0.214286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e50ca21b180f2193d02c55bbba259bcd1c32234b | 5,292 | py | Python | lonely/system.py | LonelyPale/lonely | 328d0ef12b98a6d208ef8cf75e69f0cc421a0b2b | [
"Apache-2.0"
] | null | null | null | lonely/system.py | LonelyPale/lonely | 328d0ef12b98a6d208ef8cf75e69f0cc421a0b2b | [
"Apache-2.0"
] | null | null | null | lonely/system.py | LonelyPale/lonely | 328d0ef12b98a6d208ef8cf75e69f0cc421a0b2b | [
"Apache-2.0"
] | null | null | null | import os as _os
import platform
from lonely.cmd import command
def lsb_release():
ret = command("cat /etc/openEuler-release", capture_output=True, print_out=False, print_err=False)
if ret.returncode == 0:
#print("success:", ret)
if ret.stdout.lower().find("openeuler") > -1:
return "openeuler"
else:
return "unknown"
else:
#print("failure:", ret)
return "unknown"
os = platform.system().lower()
arch = platform.machine().lower()
release = lsb_release()
shell = _os.getenv('SHELL')
home = _os.getenv('HOME')
base = "/usr/local"
temp = "/tmp"
env_file = {
"linux": "%s/.bashrc" % home,
"darwin": "%s/.bash_profile" % home,
}
source_file = {
"/bin/bash": env_file.get(os),
"/bin/zsh": "%s/.zshrc" % home,
}
# todo: 无效的,子进程内 source,不会作用于父进程。
def source(file=None):
if file is not None and isinstance(file, str) and len(file) > 0:
return command("source %s" % file)
else:
sf = source_file.get(shell)
if sf is not None:
return command("source %s" % sf)
else:
return None
def env_add(conf):
if conf is None or type(conf) not in (type(()), type([])) or len(conf) < 2:
return False
if os not in env_file:
print("Unsupported os: %s" % os)
return False
env_file_path = env_file[os]
temp_file = env_file_path + ".tmp"
start = conf[0]
end = conf[len(conf)-1]
flag = 0
with open(env_file_path, "r", encoding="utf-8") as f1, open(temp_file, "w", encoding="utf-8") as f2:
lines = f1.readlines()
start_idx = -1
end_idx = -1
for i, line in enumerate(lines):
if flag == 0 and start in line:
flag = 1
start_idx = i
elif flag == 1 and end in line: #已存在,修改
flag = 2
end_idx = i
break
if flag == 0: #不存在,新增
new_lines = []
lines.reverse()
first_none = True
for line in lines:
if first_none:
if line != '\n':
first_none = False
new_lines.append(line)
else:
new_lines.append(line)
new_lines.reverse()
f2.writelines(new_lines + ['\n'] + conf)
elif flag == 2:
new_lines1 = []
lines1 = lines[:start_idx]
lines1.reverse()
first_none = True
for line in lines1:
if first_none:
if line != '\n':
first_none = False
new_lines1.append(line)
else:
new_lines1.append(line)
new_lines1.reverse()
new_lines2 = []
lines2 = lines[end_idx+1:]
first_none = True
for line in lines2:
if first_none:
if line != '\n':
first_none = False
new_lines2.append(line)
else:
new_lines2.append(line)
f2.writelines(new_lines1 + ['\n'] + conf + ['\n'] + new_lines2)
elif flag == 1:
raise("env file syntax error, missing end. %s" % env_file_path)
_os.remove(env_file_path)
_os.rename(temp_file, env_file_path)
# source()
return True
def env_del(conf):
if conf is None or type(conf) not in (type(()), type([])) or len(conf) < 2:
return False
if os not in env_file:
print("Unsupported os: %s" % os)
return False
env_file_path = env_file[os]
temp_file = env_file_path + ".tmp"
start = conf[0]
end = conf[len(conf)-1]
flag = 0
with open(env_file_path, "r", encoding="utf-8") as f1, open(temp_file, "w", encoding="utf-8") as f2:
lines = f1.readlines()
start_idx = -1
end_idx = -1
for i, line in enumerate(lines):
if flag == 0 and start in line:
flag = 1
start_idx = i
elif flag == 1 and end in line: #已存在,删除
flag = 2
end_idx = i
break
if flag == 1:
raise("env file syntax error, missing end. %s" % env_file_path)
elif flag == 0: #不存在,退出
return True
new_lines1 = []
lines1 = lines[:start_idx]
lines1.reverse()
first_none = True
for line in lines1:
if first_none:
if line != '\n':
first_none = False
new_lines1.append(line)
else:
new_lines1.append(line)
new_lines1.reverse()
new_lines2 = []
lines2 = lines[end_idx+1:]
first_none = True
for line in lines2:
if first_none:
if line != '\n':
first_none = False
new_lines2.append(line)
else:
new_lines2.append(line)
f2.writelines(new_lines1 + ['\n'] + new_lines2)
_os.remove(env_file_path)
_os.rename(temp_file, env_file_path)
# source()
return True
| 28.76087 | 104 | 0.493386 | 647 | 5,292 | 3.876352 | 0.185471 | 0.055821 | 0.052632 | 0.031898 | 0.665869 | 0.665869 | 0.665869 | 0.654306 | 0.63756 | 0.63756 | 0 | 0.022194 | 0.395503 | 5,292 | 183 | 105 | 28.918033 | 0.761801 | 0.022298 | 0 | 0.714286 | 0 | 0 | 0.06273 | 0.004259 | 0 | 0 | 0 | 0.005464 | 0 | 1 | 0.025974 | false | 0 | 0.019481 | 0 | 0.12987 | 0.019481 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e50e8e3032d7d7837365ea7b6780cf4d9b0c82b7 | 5,722 | py | Python | entityfactssheetsharvester/entityfactssheetsharvester.py | zazi/entityfactssheetsharvester | 150e702a763d73356adba112c0e1c1141df4884c | [
"Apache-2.0"
] | 1 | 2019-08-13T07:44:32.000Z | 2019-08-13T07:44:32.000Z | entityfactssheetsharvester/entityfactssheetsharvester.py | zazi/entityfactssheetsharvester | 150e702a763d73356adba112c0e1c1141df4884c | [
"Apache-2.0"
] | null | null | null | entityfactssheetsharvester/entityfactssheetsharvester.py | zazi/entityfactssheetsharvester | 150e702a763d73356adba112c0e1c1141df4884c | [
"Apache-2.0"
] | 1 | 2019-08-13T07:44:32.000Z | 2019-08-13T07:44:32.000Z | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
import json
import os
import socket
import sys
import requests
from threading import current_thread
from rx import create, of
from rx import operators as op
from rx.scheduler import ThreadPoolScheduler
USER_AGENT_HTTP_HEADER_KEY = 'user-agent'
USER_AGENT_PATTERN = "entityfactssheetsharvester-bot-from-{0}/0.0.1 (https://github.com/slub/entityfactssheetsharvester; zazi@smiy.org) entityfactssheetsharvester/0.0.1"
HOSTNAME = socket.getfqdn()
USER_AGENT = USER_AGENT_PATTERN.format(HOSTNAME)
HTTP_HEADERS = {USER_AGENT_HTTP_HEADER_KEY: USER_AGENT}
ENTITYFACTS_BASE_URI = "http://hub.culturegraph.org/entityfacts/"
UTF8_CHARSET_ID = 'utf-8'
LINEBREAK = "\n"
THREAD_POOL_SCHEDULER = ThreadPoolScheduler(10)
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def get_gnd_identifier(line):
gnd_identifier = line
# remove line break
lastchar = line[-1]
oslinebreak = os.linesep
if lastchar == oslinebreak:
gnd_identifier = line[0:-1]
eprint("GND identifier '{0}' (thread = '{1}')".format(gnd_identifier, current_thread().name))
return gnd_identifier
def entityfacts_request(request_uri, gnd_identifier):
eprint("try to retrieve EntityFacts sheet for GND identifier '{0}' (thread = '{1}')".format(gnd_identifier,
current_thread().name))
response = requests.get(request_uri, headers=HTTP_HEADERS, timeout=60)
if response.status_code != 200:
eprint("couldn't fetch EntityFacts sheet for GND identifier '{0}', got a '{1}' (thread = '{2}')".format(
gnd_identifier, response.status_code, current_thread().name))
return None
response_body = response.content.decode(UTF8_CHARSET_ID)
eprint("retrieved EntityFacts sheet for GND identifier '{0}' (thread = '{1}')".format(gnd_identifier,
current_thread().name))
return response_body
def retrieve_entityfacts_sheet_obs(gnd_identifier):
return of(gnd_identifier).pipe(op.map(lambda gndid: retrieve_entityfacts_sheet(gnd_identifier)),
op.filter(lambda value: value is not None))
def retrieve_entityfacts_sheet(gnd_identifier):
entityfacts_sheets_uri = ENTITYFACTS_BASE_URI + gnd_identifier
response_tuple = entityfacts_request(entityfacts_sheets_uri, gnd_identifier)
if response_tuple is None:
return None
entityfacts_sheet_tuple = (response_tuple, gnd_identifier)
return entityfacts_sheet_tuple
def format_entityfacts_sheet_obs(entityfacts_sheet_tuple_obs):
return entityfacts_sheet_tuple_obs.pipe(op.map(lambda ef_sheet_tuple: format_entityfacts_sheet(ef_sheet_tuple)))
def format_entityfacts_sheet(entityfacts_sheet_tuple):
gnd_identifier = entityfacts_sheet_tuple[1]
eprint("format EntityFacts sheet for GND identifier '{0}' (thread = '{1}')".format(gnd_identifier,
current_thread().name))
entityfacts_sheet_json = json.loads(entityfacts_sheet_tuple[0])
flat_entityfacts_sheet_json = json.dumps(entityfacts_sheet_json, indent=None)
return flat_entityfacts_sheet_json, gnd_identifier
def write_entityfacts_sheet_obs(flat_entityfacts_sheet_json_tuple_obs):
return flat_entityfacts_sheet_json_tuple_obs.pipe(op.map(lambda flat_ef_sheet_json_tuple: write_entityfacts_sheet(
flat_ef_sheet_json_tuple)))
def write_entityfacts_sheet(flat_entityfacts_sheet_json_tuple):
gnd_identifier = flat_entityfacts_sheet_json_tuple[1]
eprint("write EntityFacts sheet for GND identifier '{0}' (thread = '{1}')".format(gnd_identifier,
current_thread().name))
sys.stdout.write(flat_entityfacts_sheet_json_tuple[0] + LINEBREAK)
return gnd_identifier
def push_input(observer, scheduler):
for line in sys.stdin:
observer.on_next(line)
return observer.on_completed()
def run():
parser = argparse.ArgumentParser(prog='entityfactssheetsharvester',
description='Retrieves EntityFacts sheets from a given CSV with GND identifiers and returns them as line-delimited JSON records.',
epilog='example: entityfactssheetsharvester < [INPUT CSV FILE WITH GND IDENTIFIERS] > [PATH TO THE OUTPUT LINE-DELIMITED JSON RECORDS FILE]',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
args = parser.parse_args()
if hasattr(args, 'help') and args.help:
parser.print_usage(sys.stderr)
exit(-1)
source = create(push_input)
all_in_one = source.pipe(op.map(lambda line: get_gnd_identifier(line)),
op.map(lambda gnd_identifier: retrieve_entityfacts_sheet_obs(gnd_identifier)),
op.map(lambda ef_sheet_tuple_obs: format_entityfacts_sheet_obs(ef_sheet_tuple_obs)),
op.map(lambda flat_ef_sheet_json_tuple_obs: write_entityfacts_sheet_obs(
flat_ef_sheet_json_tuple_obs)),
op.flat_map(lambda x: x))
all_in_one.subscribe(
on_next=lambda gnd_identifier: eprint(
"PROCESSED GND identifier '{0}': {1}".format(gnd_identifier, current_thread().name)),
on_error=lambda e: eprint(e),
on_completed=lambda: eprint("PROCESS done!"),
scheduler=THREAD_POOL_SCHEDULER)
if __name__ == "__main__":
run()
| 41.463768 | 178 | 0.67791 | 677 | 5,722 | 5.420975 | 0.243722 | 0.120436 | 0.049046 | 0.045777 | 0.320163 | 0.236785 | 0.145504 | 0.118529 | 0.101635 | 0.101635 | 0 | 0.009574 | 0.23331 | 5,722 | 137 | 179 | 41.766423 | 0.826989 | 0.009962 | 0 | 0.082474 | 0 | 0.030928 | 0.164959 | 0.022783 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113402 | false | 0 | 0.103093 | 0.030928 | 0.329897 | 0.123711 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e50f03b5ad643f99fc6faba88c3fc2cee5a3768e | 473 | py | Python | mayan/apps/quotas/icons.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 2 | 2021-09-12T19:41:19.000Z | 2021-09-12T19:41:20.000Z | mayan/apps/quotas/icons.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 37 | 2021-09-13T01:00:12.000Z | 2021-10-02T03:54:30.000Z | mayan/apps/quotas/icons.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 1 | 2021-09-22T13:17:30.000Z | 2021-09-22T13:17:30.000Z | from mayan.apps.appearance.classes import Icon
icon_quota_create = Icon(
driver_name='fontawesome-dual', primary_symbol='tachometer-alt',
secondary_symbol='plus'
)
icon_quota_delete = Icon(driver_name='fontawesome', symbol='times')
icon_quota_edit = Icon(driver_name='fontawesome', symbol='pencil-alt')
icon_quota_list = Icon(driver_name='fontawesome', symbol='tachometer-alt')
icon_quota_setup = Icon(driver_name='fontawesome', symbol='tachometer-alt')
| 43 | 76 | 0.77167 | 61 | 473 | 5.704918 | 0.409836 | 0.12931 | 0.201149 | 0.359195 | 0.431034 | 0.252874 | 0.252874 | 0 | 0 | 0 | 0 | 0 | 0.097252 | 473 | 10 | 77 | 47.3 | 0.814988 | 0 | 0 | 0 | 0 | 0 | 0.261339 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 |
e51032e8f05343cce31308455d21b22aca3ea53e | 5,086 | py | Python | pyner/util/optimizer.py | chantera/pyner | 6de19713871e923c997495c07e2ec249bded8671 | [
"MIT"
] | 1 | 2019-06-16T00:52:26.000Z | 2019-06-16T00:52:26.000Z | pyner/util/optimizer.py | chantera/pyner | 6de19713871e923c997495c07e2ec249bded8671 | [
"MIT"
] | null | null | null | pyner/util/optimizer.py | chantera/pyner | 6de19713871e923c997495c07e2ec249bded8671 | [
"MIT"
] | null | null | null | from chainer import optimizer_hooks
from chainer import optimizers
from chainer import training
import numpy
import logging
logger = logging.getLogger(__name__)
def create_optimizer(configs):
"""
:param optimizer_config: dict, 学習のパラメータを含む辞書
"""
if 'optimizer' not in configs:
raise Exception('Optimizer configurations are not found')
optimizer_configs = configs['optimizer']
optimizer_ = optimizer_configs['name']
optimizer_ = optimizer_.lower()
if optimizer_ == 'sgd':
optimizer = optimizers.SGD(lr=optimizer_configs['learning_rate'])
elif optimizer_ == 'momentumsgd':
optimizer = optimizers.MomentumSGD(
lr=optimizer_configs['learning_rate'])
elif optimizer_ == 'adadelta':
optimizer = optimizers.AdaDelta()
elif optimizer_ == 'adam':
optimizer = optimizers.Adam(alpha=optimizer_configs['alpha'],
beta1=optimizer_configs['beta1'],
beta2=optimizer_configs['beta2'])
elif optimizer_ == 'adabound':
optimizer = optimizers.Adam(alpha=optimizer_configs['alpha'],
beta1=optimizer_configs['beta1'],
beta2=optimizer_configs['beta2'],
adabound=True,
final_lr=optimizer_configs['final_lr']) # NOQA
else:
raise Exception
return optimizer
def add_hooks(optimizer, configs):
"""
:param optimizer: chainer.Optimizer, chainerのオプティマイザ
:param configs: pyner.util.config.ConfigParser
"""
if 'optimizer' not in configs:
raise Exception('Optimizer configurations are not found')
optimizer_configs = configs['optimizer']
if optimizer_configs.get('weight_decay'):
logger.debug('\x1b[31mSet weight decay\x1b[0m')
optimizer.add_hook(optimizer_hooks.WeightDecay(
optimizer_configs['weight_decay']))
if 'gradient_clipping' in optimizer_configs:
clipping_threshold = optimizer_configs['gradient_clipping']
msg = 'Enable gradient clipping:'
msg += f' threshold \x1b[31m{clipping_threshold}\x1b[0m'
logger.debug(msg)
optimizer.add_hook(
optimizer_hooks.GradientClipping(clipping_threshold)
)
return optimizer
class LearningRateDecay(training.extension.Extension):
"""Exception to decay learning rate as in Ma+
(http://www.aclweb.org/anthology/P16-1101)
Learning rate would be updated to
``rate * / (1 + (1 + iteration)) * decay``
This extension is also called before the training loop starts by default.
Args:
attr (str): Name of the attribute to shift.
rate (float): Exponent of polynomial shift.
max_count (int): Number of this extension to be invoked.
init (float): Initial value of the attribute. If it is ``None``, the
extension extracts the attribute at the first call and uses it as
the initial value.
target (float): Target value of the attribute. If the attribute reaches
this value, the shift stops.
optimizer (~chainer.Optimizer): Target optimizer to adjust the
attribute. If it is ``None``, the main optimizer of the updater is
used.
"""
invoke_before_training = True
def __init__(self, attr, rate, decay, target=None,
optimizer=None):
self._attr = attr
self._rate = rate
self._decay = decay
self._target = target
self._optimizer = optimizer
self._t = 0
self._last_value = None
def initialize(self, trainer):
optimizer = self._get_optimizer(trainer)
if self._last_value is not None: # resuming from a snapshot
self._update_value(optimizer, self._last_value)
else:
self._update_value(optimizer, self._rate)
def __call__(self, trainer):
self._t += 1
optimizer = self._get_optimizer(trainer)
value = self._rate / (1 + (self._decay * self._t))
if self._target is not None:
if self._rate > 0:
# almost same as value = min(value, self._target), but this
# line supports negative values, too
if self._target / value > 1:
value = self._target
else:
# ditto
if self._target / value < 1:
value = self._target
self._update_value(optimizer, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
self._last_value = serializer('_last_value', self._last_value)
if isinstance(self._last_value, numpy.ndarray):
self._last_value = self._last_value.item()
def _get_optimizer(self, trainer):
return self._optimizer or trainer.updater.get_optimizer('main')
def _update_value(self, optimizer, value):
setattr(optimizer, self._attr, value)
self._last_value = value
| 33.682119 | 83 | 0.621707 | 557 | 5,086 | 5.468582 | 0.278276 | 0.09455 | 0.038411 | 0.023638 | 0.282994 | 0.200263 | 0.200263 | 0.155614 | 0.133946 | 0.133946 | 0 | 0.008845 | 0.288635 | 5,086 | 150 | 84 | 33.906667 | 0.833057 | 0.220016 | 0 | 0.215909 | 0 | 0 | 0.101453 | 0.009081 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.056818 | 0.011364 | 0.204545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e510c7426f5e3c38449cb80f147daf9524ba1a19 | 4,553 | py | Python | 5th_pipeline.py | Jose-Oton/airflow_project | 1b65a83975be63ad15cab95ad2947f6526400368 | [
"Apache-2.0"
] | 1 | 2021-07-08T12:29:34.000Z | 2021-07-08T12:29:34.000Z | 5th_pipeline.py | Jose-Oton/airflow_project | 1b65a83975be63ad15cab95ad2947f6526400368 | [
"Apache-2.0"
] | null | null | null | 5th_pipeline.py | Jose-Oton/airflow_project | 1b65a83975be63ad15cab95ad2947f6526400368 | [
"Apache-2.0"
] | null | null | null | #1. Documentación de un DAG
"""
## PYSPARK DAG
Este pipeline toma data de Covid compartida de forma pública por Google y calcula unos KPIs.
"""
from airflow import DAG
from datetime import timedelta, datetime
from airflow.utils.dates import days_ago
from airflow.models import Variable
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import BranchPythonOperator
from airflow.providers.google.cloud.operators.dataproc import DataprocCreateClusterOperator
from airflow.providers.google.cloud.operators.dataproc import DataprocDeleteClusterOperator
from airflow.providers.google.cloud.operators.dataproc import DataprocSubmitPySparkJobOperator
from airflow.providers.google.cloud.operators.dataproc import DataprocSubmitJobOperator
from airflow.utils import trigger_rule
# DataprocSubmitPySparkJobOperator(
# task_id="store_stock",
# main="gs://your_bucket/datapipelines/pyspark/pyspark_transformation_joseOton.py",
# cluster_name="spark-cluster-{{ ds_nodash }}",
# dataproc_jars=["gs://spark-lib/bigquery/spark-bigquery-latest.jar"], #JAR para que Spark pueda leer de BigQuery
# region='us-central1',
# gcp_conn_id='google_cloud_default'
# ).generate_job()
#2. Utilizar Variables
PROJECT_ID = Variable.get("project")
STORAGE_BUCKET = Variable.get("storage_bucket")
default_dag_args = {
"start_date": days_ago(1),
"owner": "José Otón"
}
def is_weekend(execution_date=None):
date = datetime.strptime(execution_date, "%Y-%m-%d")
if date.isoweekday() < 6:
return "store_stock"
return "weekend"
# DEFINIMOS DAG
with DAG(
dag_id='5th_exercise',
description='Running a PySpark Job on GCP',
schedule_interval='@daily',
default_args=default_dag_args,
max_active_runs=1,
user_defined_macros={"project": PROJECT_ID},#5. Macros en Airflow
) as dag:
dag.doc_md = __doc__ #Para documentar un DAG
create_dataproc = DataprocCreateClusterOperator(
task_id="create_dataproc",
project_id='{{ project }}',
cluster_name="spark-cluster-{{ ds_nodash }}",
num_workers=2,
storage_bucket=STORAGE_BUCKET,
region="us-central1"
)
create_dataproc.doc_md = """## Crear cluster de Dataproc
Crea un cluster de Dataproc en el proyecto de GCP
"""
# 3. Agregar elementos de lógica para ejecutar uno u otro pipeline
do_analytics = BranchPythonOperator(
task_id="do_analytics",
python_callable=is_weekend,
op_kwargs={"execution_date": "{{ ds }}"}, # 4. Jinja Templating
)
do_analytics.doc_md = """## Evalua que dia de la semana es
Crea un cluster de Dataproc en el proyecto de GCP.
"""
store_stock = DataprocSubmitJobOperator(
task_id="store_stock",
project_id=PROJECT_ID,
location='us-central1',
job={
'reference': {'project_id': '{{ project }}',
'job_id': '{{task.task_id}}_{{ds_nodash}}_2446afcc_joseOton'}, ## si puede haber cambio.
'placement': {'cluster_name': 'spark-cluster-{{ ds_nodash }}'},
'labels': {'airflow-version': 'v2-1-0'},
'pyspark_job': {
'jar_file_uris': ['gs://spark-lib/bigquery/spark-bigquery-latest_2.12.jar'],
'main_python_file_uri': 'gs://your_bucket/datapipelines/pyspark/pyspark_transformation_joseOton.py'
}
},
gcp_conn_id='google_cloud_default'
)
store_stock.doc_md = """## Spark Transformation
Ejecuta las transformaciones con Spark.
"""
weekend = BashOperator(
task_id="weekend",
bash_command='echo "\'$TODAY\' is weekend so the pipeline hasnt been executed."',
env={'TODAY': '2021-06-20'},
)
weekend.doc_md = """## Imprime el día de la semana
Se ejecuta en caso sea fin de semana.
"""
delete_cluster = DataprocDeleteClusterOperator(
task_id="delete_cluster",
project_id=PROJECT_ID,
cluster_name="spark-cluster-{{ ds_nodash }}",
trigger_rule="all_done",
region='us-central1'
#zone='us-central1-a'
)
delete_cluster.doc_md = """## Borrar Cluster de Dataproc
Elimina el cluster de Dataproc.
"""
# SETEAR LAS DEPEDENDENCIAS DEL DAG
(create_dataproc >>
do_analytics >> [
store_stock,
weekend,
] >> delete_cluster)
| 34.755725 | 123 | 0.656929 | 524 | 4,553 | 5.494275 | 0.385496 | 0.038208 | 0.029524 | 0.036124 | 0.234109 | 0.234109 | 0.172282 | 0.146579 | 0.071553 | 0.027787 | 0 | 0.009461 | 0.233912 | 4,553 | 130 | 124 | 35.023077 | 0.81594 | 0.175928 | 0 | 0.097826 | 0 | 0 | 0.317385 | 0.048679 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01087 | false | 0 | 0.119565 | 0 | 0.152174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
e510db97dd6ed101594891f1033e5947097f3261 | 127 | py | Python | muve/sumo_server/__init__.py | muve-traffic/sumo-server | 0a857ba9555569db1c118367668a507600c12cdf | [
"MIT"
] | null | null | null | muve/sumo_server/__init__.py | muve-traffic/sumo-server | 0a857ba9555569db1c118367668a507600c12cdf | [
"MIT"
] | null | null | null | muve/sumo_server/__init__.py | muve-traffic/sumo-server | 0a857ba9555569db1c118367668a507600c12cdf | [
"MIT"
] | null | null | null | """Muve Traffic SUMO server.
Server for simulating traffic and relaying traffic information programatically through SUMO.
"""
| 25.4 | 92 | 0.80315 | 15 | 127 | 6.8 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.133858 | 127 | 4 | 93 | 31.75 | 0.927273 | 0.937008 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 4 |
e5115a401bd8efaf8cc688760dfac54cb7c36ec4 | 2,613 | py | Python | tests/unit/sagemaker/cli/compatibility/v2/modifiers/test_shuffle_config.py | LastRemote/sagemaker-python-sdk | fddf29d9e4383cd3f939253eef47ee79a464dd37 | [
"Apache-2.0"
] | 1,690 | 2017-11-29T20:13:37.000Z | 2022-03-31T12:58:11.000Z | tests/unit/sagemaker/cli/compatibility/v2/modifiers/test_shuffle_config.py | LastRemote/sagemaker-python-sdk | fddf29d9e4383cd3f939253eef47ee79a464dd37 | [
"Apache-2.0"
] | 2,762 | 2017-12-04T05:18:03.000Z | 2022-03-31T23:40:11.000Z | tests/unit/sagemaker/cli/compatibility/v2/modifiers/test_shuffle_config.py | LastRemote/sagemaker-python-sdk | fddf29d9e4383cd3f939253eef47ee79a464dd37 | [
"Apache-2.0"
] | 961 | 2017-11-30T16:44:03.000Z | 2022-03-30T23:12:09.000Z | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import pasta
import pytest
from sagemaker.cli.compatibility.v2.modifiers import training_input
from tests.unit.sagemaker.cli.compatibility.v2.modifiers.ast_converter import ast_call, ast_import
@pytest.fixture
def constructors():
return (
"sagemaker.session.ShuffleConfig(seed)",
"session.ShuffleConfig(seed)",
)
@pytest.fixture
def modified_constructors(constructors):
return [c.replace("session", "inputs") for c in constructors]
def test_constructor_node_should_be_modified(constructors):
modifier = training_input.ShuffleConfigModuleRenamer()
for constructor in constructors:
node = ast_call(constructor)
assert modifier.node_should_be_modified(node)
def test_constructor_node_should_be_modified_random_call():
modifier = training_input.ShuffleConfigModuleRenamer()
node = ast_call("FileSystemInput()")
assert not modifier.node_should_be_modified(node)
def test_constructor_modify_node(constructors, modified_constructors):
modifier = training_input.ShuffleConfigModuleRenamer()
for before, expected in zip(constructors, modified_constructors):
node = ast_call(before)
modifier.modify_node(node)
assert expected == pasta.dump(node)
def test_import_from_node_should_be_modified_training_input():
modifier = training_input.ShuffleConfigImportFromRenamer()
node = ast_import("from sagemaker.session import ShuffleConfig")
assert modifier.node_should_be_modified(node)
def test_import_from_node_should_be_modified_random_import():
modifier = training_input.ShuffleConfigImportFromRenamer()
node = ast_import("from sagemaker.session import Session")
assert not modifier.node_should_be_modified(node)
def test_import_from_modify_node():
modifier = training_input.ShuffleConfigImportFromRenamer()
node = ast_import("from sagemaker.session import ShuffleConfig")
modifier.modify_node(node)
assert "from sagemaker.inputs import ShuffleConfig" == pasta.dump(node)
| 34.84 | 98 | 0.778416 | 323 | 2,613 | 6.071207 | 0.321981 | 0.053034 | 0.048955 | 0.081591 | 0.466599 | 0.395207 | 0.395207 | 0.298317 | 0.298317 | 0.229985 | 0 | 0.002686 | 0.145044 | 2,613 | 74 | 99 | 35.310811 | 0.875112 | 0.204746 | 0 | 0.380952 | 0 | 0 | 0.125424 | 0.030993 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.190476 | false | 0 | 0.357143 | 0.047619 | 0.595238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 2 |