hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2f1989e325bb85e0738bbeae4175fa2a163031d0
| 1,750
|
py
|
Python
|
Problem 001-150 Python/pb035.py
|
Adamssss/projectEuler
|
25881b1bd82876e81197756f62ab5b0d73e3e6c8
|
[
"MIT"
] | 2
|
2015-02-11T05:47:42.000Z
|
2015-02-11T05:47:51.000Z
|
Problem 001-150 Python/pb035.py
|
Adamssss/projectEuler
|
25881b1bd82876e81197756f62ab5b0d73e3e6c8
|
[
"MIT"
] | 1
|
2015-04-13T06:36:21.000Z
|
2015-04-13T06:36:21.000Z
|
Problem 001-150 Python/pb035.py
|
Adamssss/projectEuler
|
25881b1bd82876e81197756f62ab5b0d73e3e6c8
|
[
"MIT"
] | null | null | null |
import math
import time
t1 = time.time()
N = 1000000
n = (N+1)//2
p = [True]*(n)
i = 1
prime = [2]
while i < n:
if p[i]:
t = 2*i+1
prime.append(t)
j = i
while j < n:
p[j] = False
j += t
i += 1
def isPrime(item):
root = math.floor(math.sqrt(item))
i = 0
t = prime[i]
while t <= root:
if item%t == 0:
return False
if t < prime[-1]:
i += 1
t = prime[i]
else:
t += 2
return True
# define a binary search
def isInList(item,lst):
firstPoint = 0
endPoint = len(lst)-1
index = -1
while firstPoint <= endPoint:
midPoint = (firstPoint+endPoint)//2
if lst[midPoint] == item:
index = midPoint
return index
elif item > lst[midPoint]:
firstPoint = midPoint +1
else:
endPoint = midPoint -1
return index
target = prime[:]
count = 0
while len(target) > 0:
#print(target)
#print (count)
test = target[0]
dig = math.floor(math.log10(test))+1
target.pop(0)
if dig == 1:
count += 1
continue
if dig > 1:
i = 1
counted = 0
tl = True
while i < dig:
test = test//10 + (test%10)*math.pow(10,dig-1)
if isPrime(test):
i += 1
ind = isInList(test,target)
if ind >= 0:
target.pop(ind)
else:
counted += 1
else:
tl = False
break
if tl:
count += dig - counted
print (count)
print("time:",time.time()-t1)
| 18.617021
| 58
| 0.430857
| 213
| 1,750
| 3.539906
| 0.248826
| 0.015915
| 0.018568
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051255
| 0.453714
| 1,750
| 93
| 59
| 18.817204
| 0.737448
| 0.028
| 0
| 0.178082
| 0
| 0
| 0.002946
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027397
| false
| 0
| 0.027397
| 0
| 0.109589
| 0.027397
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f1a5c2760e9a1b86d6eb2f562c21e3dbc87be05
| 2,190
|
py
|
Python
|
BAP/adapters.py
|
EleutherAGI/summarisation
|
d432873e1ba171f47371b8b0df7235478b52ca99
|
[
"CC-BY-4.0"
] | 11
|
2021-05-12T14:11:58.000Z
|
2022-01-25T04:23:38.000Z
|
BAP/adapters.py
|
EleutherAGI/summarisation
|
d432873e1ba171f47371b8b0df7235478b52ca99
|
[
"CC-BY-4.0"
] | 3
|
2021-05-13T11:37:35.000Z
|
2021-05-13T11:50:15.000Z
|
BAP/adapters.py
|
EleutherAGI/summarisation
|
d432873e1ba171f47371b8b0df7235478b52ca99
|
[
"CC-BY-4.0"
] | null | null | null |
import torch
import torch.nn as nn
from collections import OrderedDict
class AdapterLayer(nn.Module):
def __init__(self, input_size, reduction_factor):
super(AdapterLayer, self).__init__()
self.skip_adapter = False
self.adapter = nn.Sequential(nn.Linear(input_size, input_size//reduction_factor),
nn.ReLU(),
nn.Linear(input_size//reduction_factor, input_size))
self.adapter.apply(self.init_weights)
def init_weights(self, m, std = 1e-2):
if type(m) == nn.Linear:
torch.nn.init.normal_(m.weight, std = std)
torch.nn.init.normal_(m.bias, std = std)
m.weight.data = torch.clamp(m.weight.data, min = -2*std, max = 2*std)
m.bias.data = torch.clamp(m.bias.data, min = -2*std, max = 2*std)
def forward(self, X):
if self.skip_adapter:
return X
else:
return self.adapter(X) + X
### GPT NEO VERSION ######
'''
# couldn't get it to work with class inheritance
def add_adapters(model, reduction_factor):
n_layers = len(model.h)
hidden_size = model.config.hidden_size
for n in range(n_layers):
model.h[n].mlp = nn.Sequential(OrderedDict([('MLP', model.h[n].mlp),
('Adapter', AdapterLayer(hidden_size, reduction_factor))]))
return model
'''
# couldn't get it to work with class inheritance
def add_adapters(model, reduction_factor):
n_layers = len(model.transformer.h)
hidden_size = model.config.hidden_size
for n in range(n_layers):
model.transformer.h[n].mlp = nn.Sequential(OrderedDict([('MLP', model.transformer.h[n].mlp),
('Adapter', AdapterLayer(hidden_size, reduction_factor))]))
return model
def add_adapter_skip(model):
def adapter_skip(self, skip):
n_layers = len(self.transformer.h)
for n in range(n_layers):
self.transformer.h[n].mlp.Adapter.skip_adapter = skip
model.adapter_skip = adapter_skip.__get__(model)
return model
| 39.818182
| 111
| 0.594977
| 281
| 2,190
| 4.459075
| 0.241993
| 0.083799
| 0.075818
| 0.057462
| 0.48763
| 0.422985
| 0.408619
| 0.379888
| 0.322426
| 0.322426
| 0
| 0.003856
| 0.289498
| 2,190
| 55
| 112
| 39.818182
| 0.801414
| 0.028767
| 0
| 0.111111
| 0
| 0
| 0.006031
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.083333
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f222448d0c305c6158a8a8cb410ef32dcbf5429
| 7,090
|
py
|
Python
|
util.py
|
gmshashank/pytorch_yolo
|
9736006639acba9743b4e3ff56285668357097f9
|
[
"MIT"
] | null | null | null |
util.py
|
gmshashank/pytorch_yolo
|
9736006639acba9743b4e3ff56285668357097f9
|
[
"MIT"
] | null | null | null |
util.py
|
gmshashank/pytorch_yolo
|
9736006639acba9743b4e3ff56285668357097f9
|
[
"MIT"
] | null | null | null |
from __future__ import division
from torch.autograd import Variable
import cv2
import numpy as np
import torch
def bbox_iou(box1, box2):
# returns IoU of two bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
if torch.cuda.is_available():
inter_area = torch.max(
inter_rect_x2 - inter_rect_x1 + 1, torch.zeros(inter_rect_x2.shape).cuda()
) * torch.max(
inter_rect_y2 - inter_rect_y1 + 1, torch.zeros(inter_rect_x2.shape).cuda()
)
else:
inter_area = torch.max(
inter_rect_x2 - inter_rect_x1 + 1, torch.zeros(inter_rect_x2.shape)
) * torch.max(
inter_rect_y2 - inter_rect_y1 + 1, torch.zeros(inter_rect_x2.shape)
)
b1_area = (b1_x2 - b1_x1 + 1) * (b1_y1 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
return iou
def load_classes(namesfile):
fp = open(namesfile, "r")
names = fp.read().split("\n")[:-1]
return names
def get_test_input_cv(imglist, input_dim, CUDA):
img = cv2.imread(imglist[0])
img = cv2.resize(img, (input_dim, input_dim))
img_ = img[:, :, ::-1].transpose((2, 0, 1))
img_ = img_[np.newaxis, :, :, :] / 255.0
img_ = torch.from_numpy(img_).float()
img_ = Variable(img_)
if CUDA:
img_ = img_.cuda()
return img_
def predict_transform(prediction, input_dim, anchors, num_classes, use_gpu=True):
batch_size = prediction.size(0)
stride = input_dim // prediction.size(2)
grid_size = input_dim // stride
bbox_attrs = 5 + num_classes
num_anchors = len(anchors)
prediction = prediction.view(
batch_size, bbox_attrs * num_anchors, grid_size * grid_size
)
prediction = prediction.transpose(1, 2).contiguous()
prediction = prediction.view(
batch_size, grid_size * grid_size * num_anchors, bbox_attrs
)
anchors = [(anchor[0] / stride, anchor[1] / stride) for anchor in anchors]
# Sigmoid the centerX,centerY and objectness score
prediction[:, :, 0] = torch.sigmoid(prediction[:, :, 0])
prediction[:, :, 1] = torch.sigmoid(prediction[:, :, 1])
prediction[:, :, 4] = torch.sigmoid(prediction[:, :, 4])
# Add center offsets
grid = np.arange(grid_size)
a, b = np.meshgrid(grid, grid)
x_offset = torch.FloatTensor(a).view(-1, 1)
y_offset = torch.FloatTensor(b).view(-1, 1)
if use_gpu:
prediction = prediction.cuda()
x_offset = x_offset.cuda()
y_offset = y_offset.cuda()
x_y_offset = (
torch.cat((x_offset, y_offset), 1)
.repeat(1, num_anchors)
.view(-1, 2)
.unsqueeze(0)
)
prediction[:, :, :2] += x_y_offset
# Log Space transform of height and width
anchors = torch.FloatTensor(anchors)
if use_gpu:
anchors = anchors.cuda()
anchors = anchors.repeat(grid_size * grid_size, 1).unsqueeze(0)
prediction[:, :, 2:4] = torch.exp(prediction[:, :, 2:4]) * anchors
# sigmoid activation to the the class scores
prediction[:, :, 5 : 5 + num_classes] = torch.sigmoid(
(prediction[:, :, 5 : 5 + num_classes])
)
prediction[
:, :, :4
] *= stride # resize the detections map to the size of the input image
return prediction
def unique(tensor):
tensor_np = tensor.cpu().numpy()
unique_np = np.unique(tensor_np)
unique_tensor = torch.from_numpy(unique_np)
tensor_res = tensor.new(unique_tensor.shape)
tensor_res.copy_(unique_tensor)
return tensor_res
def write_results(prediction, confidence, num_classes, nms=True, nms_conf=0.4):
# Object Confidence Thresholding
conf_mask = (prediction[:, :, 4] > confidence).float().unsqueeze(2)
prediction = prediction * conf_mask
# NMS
box_corner = prediction.new(prediction.shape)
box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
box_corner[:, :, 1] = prediction[:, :, 1] - prediction[:, :, 3] / 2
box_corner[:, :, 2] = prediction[:, :, 0] + prediction[:, :, 2] / 2
box_corner[:, :, 3] = prediction[:, :, 1] + prediction[:, :, 3] / 2
prediction[:, :, :4] = box_corner[:, :, :4]
batch_size = prediction.size(0)
output = prediction.new(1, prediction.size(2) + 1)
write = False
for ind in range(batch_size):
# select the image from the batch
img_pred = prediction[ind] # Image Tensor
max_conf, max_conf_score = torch.max(img_pred[:, 5 : 5 + num_classes], 1)
max_conf = max_conf.float().unsqueeze(1)
max_conf_score = max_conf_score.float().unsqueeze(1)
seq = (img_pred[:, :5], max_conf, max_conf_score)
img_pred = torch.cat(seq, 1)
# Get rid of the zero entries
non_zero_ind = torch.nonzero((img_pred[:, 4]))
img_pred_ = img_pred[non_zero_ind.squeeze(), :].view(-1, 7)
try:
img_classes = unique(img_pred_[:, -1])
except:
continue
for cls in img_classes:
# get detections with one particular class
cls_mask = img_pred_ * (img_pred_[:, -1] == cls).float().unsqueeze(1)
class_mask_ind = torch.nonzero(cls_mask[:, -2]).squeeze()
img_pred_class = img_pred_[class_mask_ind].view(-1, 7)
# sort the detections for maximum objectness confidence
conf_sort_index = torch.sort(img_pred_class[:, 4], descending=True)[1]
img_pred_class = img_pred_class[conf_sort_index]
idx = img_pred_class.size(0)
if nms:
# for each detection
for i in range(idx):
try:
ious = bbox_iou(
img_pred_class[i].unsqueeze(0), img_pred_class[i + 1 :]
)
except ValueError:
break
except IndexError:
break
iou_mask = (ious < nms_conf).float().unsqueeze(1)
img_pred_class[i + 1 :] *= iou_mask
non_zero_ind = torch.nonzero(img_pred_class[:, 4]).squeeze()
img_pred_class = img_pred_class[non_zero_ind].view(-1, 7)
batch_ind = img_pred_class.new(img_pred_class.size(0), 1).fill_(ind)
seq = batch_ind, img_pred_class
if not write:
output = torch.cat(seq, 1)
write = True
else:
out = torch.cat(seq, 1)
output = torch.cat((output, out))
return output
| 34.754902
| 87
| 0.572779
| 923
| 7,090
| 4.141928
| 0.187432
| 0.045776
| 0.047083
| 0.017787
| 0.210829
| 0.120848
| 0.11457
| 0.06644
| 0.064347
| 0.064347
| 0
| 0.039702
| 0.300141
| 7,090
| 203
| 88
| 34.926108
| 0.730754
| 0.065303
| 0
| 0.108108
| 0
| 0
| 0.000468
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040541
| false
| 0
| 0.033784
| 0
| 0.114865
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f2228d6057ad9c4100fbf0aed98528ab280f726
| 743
|
py
|
Python
|
922.py
|
BLUECARVIN/LeetCode
|
0d085ed2dbee47c57d22ac368872161076369ff9
|
[
"MIT"
] | null | null | null |
922.py
|
BLUECARVIN/LeetCode
|
0d085ed2dbee47c57d22ac368872161076369ff9
|
[
"MIT"
] | null | null | null |
922.py
|
BLUECARVIN/LeetCode
|
0d085ed2dbee47c57d22ac368872161076369ff9
|
[
"MIT"
] | null | null | null |
class Solution:
def sortArrayByParityII(self, A: List[int]) -> List[int]:
A.sort(key=lambda x: (x % 2 != 0))
b = []
for i in range(int(len(A) / 2)):
b.append(A[i])
b.append(A[-(1+i)])
return b
# ---------- 320ms, 15.9MB ---------- #
class Solution:
def sortArrayByParityII(self, A: List[int]) -> List[int]:
odd = []
even = []
ans = []
A.sort()
for i in range(len(A)):
if A[i] % 2 == 0:
even.append(A[i])
else:
odd.append(A[i])
for i in range(len(odd)):
ans.append(even[i])
ans.append(odd[i])
return ans
# ---------- 320ms, 16.1MB ---------- #
| 28.576923
| 61
| 0.414536
| 95
| 743
| 3.242105
| 0.347368
| 0.090909
| 0.058442
| 0.107143
| 0.441558
| 0.350649
| 0.350649
| 0.350649
| 0.350649
| 0.350649
| 0
| 0.038793
| 0.375505
| 743
| 26
| 62
| 28.576923
| 0.625
| 0.096904
| 0
| 0.173913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f224c8f917dc2d903a60f297bdfff121e03b7dc
| 1,190
|
py
|
Python
|
mainConsumer.py
|
cmoshe390/pythonProj
|
7123255abbb53e4330c9548be16dd9e237f8a51d
|
[
"Unlicense",
"MIT"
] | null | null | null |
mainConsumer.py
|
cmoshe390/pythonProj
|
7123255abbb53e4330c9548be16dd9e237f8a51d
|
[
"Unlicense",
"MIT"
] | null | null | null |
mainConsumer.py
|
cmoshe390/pythonProj
|
7123255abbb53e4330c9548be16dd9e237f8a51d
|
[
"Unlicense",
"MIT"
] | null | null | null |
from rabbitConsumer import *
from socketConsumer import SocketConsumer
from dlx import *
import threading
import sys
if __name__ == '__main__':
work_with = sys.argv[1]
r_k = ['*.jpg', '*.jpeg', '#']
threads = []
dlx = ReconnectingDlx()
threads.append(threading.Thread(target=dlx.run))
for j in range(1, 4):
if work_with == 'rabbit':
# consumer = RabbitConsumer(_id_consumer=j, _exchange='exchange1',
# _queue=f'queue{j}', _routing_key=r_k[j - 1], _exchange_type='topic',
# _producer_to_dlx=dlx)
consumer = RabbitReconnectingConsumer(_id_consumer=j, _exchange='exchange1',
_queue=f'queue{j}', _routing_key=r_k[j - 1], _exchange_type='topic',
_producer_to_dlx=dlx)
elif work_with == 'socket':
consumer = SocketConsumer(_id_consumer=j)
else:
print("the parameter in args must be 'rabbit' or 'socket'!")
threads.append(threading.Thread(target=consumer.run))
for thread in threads:
thread.start()
| 34
| 118
| 0.561345
| 127
| 1,190
| 4.96063
| 0.425197
| 0.038095
| 0.052381
| 0.088889
| 0.384127
| 0.27619
| 0.27619
| 0.27619
| 0.27619
| 0.27619
| 0
| 0.008728
| 0.32605
| 1,190
| 34
| 119
| 35
| 0.776808
| 0.172269
| 0
| 0
| 0
| 0
| 0.107034
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.217391
| 0
| 0.217391
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f239d716de5c5b3e73637e42e5427fd0197839a
| 1,991
|
py
|
Python
|
analyses/quantifications/scripts/2019_11_12_CC414022_quantifications.py
|
brendano257/Zugspitze-Schneefernerhaus
|
64bb86ece2eec147f2a7fb412f87ff2313388753
|
[
"MIT"
] | null | null | null |
analyses/quantifications/scripts/2019_11_12_CC414022_quantifications.py
|
brendano257/Zugspitze-Schneefernerhaus
|
64bb86ece2eec147f2a7fb412f87ff2313388753
|
[
"MIT"
] | null | null | null |
analyses/quantifications/scripts/2019_11_12_CC414022_quantifications.py
|
brendano257/Zugspitze-Schneefernerhaus
|
64bb86ece2eec147f2a7fb412f87ff2313388753
|
[
"MIT"
] | null | null | null |
"""
A set of CC412022, CC416168 were run back to back without blanks on 2019-11-12.
Rough quantification is done by the below.
"""
__package__ = 'Z'
from datetime import datetime
from settings import CORE_DIR, DB_NAME
from IO.db import connect_to_db, GcRun, Integration, Standard, SampleQuant
from processing import blank_subtract
from reporting import compile_quant_report
engine, session = connect_to_db(DB_NAME, CORE_DIR)
standard_to_quantify_with = session.query(Standard).filter(Standard.name == 'cc416168').one_or_none()
# get standard cert values for the quantifier
certified_values_of_sample = (session.query(Standard)
.filter(Standard.name == 'cc412022_noaa_provided')
.one().quantifications)
# get standard cert values for the sample being quantified
vocs = session.query(Standard).filter(Standard.name == 'vocs').one_or_none()
vocs = [q.name for q in vocs.quantifications]
samples = (session.query(GcRun).join(Integration, Integration.run_id == GcRun.id)
.filter(GcRun.date > datetime(2019, 11, 12), GcRun.date < datetime(2019, 11, 13))
.filter(Integration.filename.like('%CC412022___.D'))
.order_by(GcRun.date)
.all())
standards = (session.query(GcRun).join(Integration, Integration.run_id == GcRun.id)
.filter(GcRun.date > datetime(2019, 11, 12), GcRun.date < datetime(2019, 11, 13))
.filter(Integration.filename.like('%CC416168___.D'))
.order_by(GcRun.date)
.all())
quants = []
for sample, standard in zip(samples, standards):
blank_subtract(sample, vocs, session, blank=None, force_no_blank=True)
blank_subtract(standard, vocs, session, blank=None, force_no_blank=True)
quant = SampleQuant(sample, standard, None, standard_to_quantify_with)
quant.quantify()
quants.append(quant)
compile_quant_report(quants, 'CC412022', 'CC416168', certified_values_of_sample, date=datetime(2019, 11, 12))
| 40.632653
| 109
| 0.70668
| 260
| 1,991
| 5.215385
| 0.330769
| 0.026549
| 0.058997
| 0.066372
| 0.427729
| 0.412979
| 0.259587
| 0.259587
| 0.20649
| 0.20649
| 0
| 0.058752
| 0.179307
| 1,991
| 48
| 110
| 41.479167
| 0.771114
| 0.113009
| 0
| 0.193548
| 0
| 0
| 0.044963
| 0.012521
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.16129
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f2590662675a6fa11503eafa56e671b78fe7a23
| 10,473
|
py
|
Python
|
srcds/events/csgo.py
|
w4rum/pysrcds
|
a9dbc198c6f087757e40d9af14ca8de9a39cef74
|
[
"MIT"
] | 17
|
2015-06-26T08:49:07.000Z
|
2021-09-11T09:02:40.000Z
|
srcds/events/csgo.py
|
w4rum/pysrcds
|
a9dbc198c6f087757e40d9af14ca8de9a39cef74
|
[
"MIT"
] | 5
|
2015-04-27T13:44:58.000Z
|
2022-02-07T19:00:42.000Z
|
srcds/events/csgo.py
|
w4rum/pysrcds
|
a9dbc198c6f087757e40d9af14ca8de9a39cef74
|
[
"MIT"
] | 12
|
2015-02-13T15:34:47.000Z
|
2021-09-11T09:02:30.000Z
|
# Copyright (C) 2013 Peter Rowlands
"""csgo events module
Contains event classes for CS:S and CS:GO events
"""
from __future__ import absolute_import, unicode_literals
from future.utils import python_2_unicode_compatible
from .generic import (BaseEvent, PlayerEvent, PlayerTargetEvent, KillEvent,
AttackEvent)
@python_2_unicode_compatible
class SwitchTeamEvent(PlayerEvent):
"""Player switched team event"""
regex = ''.join([
BaseEvent.regex,
r'"(?P<player_name>.*)<(?P<uid>\d*)><(?P<steam_id>[\w:]*)>" ',
r'switched from team <(?P<orig_team>\w*)> to <(?P<new_team>\w*)>',
])
def __init__(self, timestamp, player_name, uid, steam_id, orig_team,
new_team):
super(SwitchTeamEvent, self).__init__(timestamp, player_name, uid,
steam_id, team=None)
self.orig_team = orig_team
self.new_team = new_team
def text(self):
player = self.player
player.team = None
msg = ' '.join([
'"%s"' % player,
'switched from team <%s> to <%s>' % (self.orig_team,
self.new_team),
])
return ' '.join([super(PlayerEvent, self).text(), msg])
__str__ = text
@python_2_unicode_compatible
class BuyEvent(PlayerEvent):
"""Player buy event"""
regex = ''.join([
PlayerEvent.regex,
r'purchased "(?P<item>\w*)"',
])
def __init__(self, timestamp, player_name, uid, steam_id, team, item):
super(BuyEvent, self).__init__(timestamp, player_name, uid, steam_id,
team)
self.item = item
def text(self):
msg = 'purchased "%s"' % (self.item)
return ' '.join([super(BuyEvent, self).text(), msg])
__str__ = text
@python_2_unicode_compatible
class ThrowEvent(PlayerEvent):
"""Player threw grenade event"""
regex = ''.join([
PlayerEvent.regex,
r'threw (?P<nade>\w*) \[(?P<location>-?\d+ -?\d+ -?\d+)\]',
])
def __init__(self, timestamp, player_name, uid, steam_id, team, nade,
location):
if not isinstance(location, tuple) or not len(location) == 3:
raise TypeError('Expected 3-tuple for location')
super(ThrowEvent, self).__init__(timestamp, player_name, uid, steam_id,
team)
self.location = location
self.nade = nade
def text(self):
msg = 'threw %s [%d %d %d]' % (self.nade, self.location[0],
self.location[1], self.location[2])
return ' '.join([super(ThrowEvent, self).text(), msg])
__str__ = text
@classmethod
def from_re_match(cls, match):
"""Return an event constructed from a self.regex match"""
kwargs = match.groupdict()
location = kwargs['location'].split()
kwargs['location'] = (int(location[0]), int(location[1]),
int(location[2]))
return cls(**kwargs)
@python_2_unicode_compatible
class CsgoAssistEvent(PlayerTargetEvent):
"""Player assist event"""
regex = ''.join([
BaseEvent.regex,
PlayerTargetEvent.player_regex,
r' assisted killing ',
PlayerTargetEvent.target_regex
])
def __init__(self, timestamp, player_name, player_uid, player_steam_id,
player_team, target_name, target_uid, target_steam_id,
target_team):
super(CsgoAssistEvent, self).__init__(timestamp, player_name,
player_uid, player_steam_id,
player_team, target_name, target_uid,
target_steam_id, target_team)
def text(self):
msg = '"%s" assisted killing "%s" ' % (self.player, self.target)
return ' '.join([super(CsgoAssistEvent, self).text(), msg])
__str__ = text
@python_2_unicode_compatible
class CsgoKillEvent(KillEvent):
"""CS:GO specific kill event"""
regex = ''.join([
BaseEvent.regex,
PlayerTargetEvent.player_regex,
r'\[(?P<player_location>-?\d+ -?\d+ -?\d+)\]',
r' killed ',
PlayerTargetEvent.target_regex,
r'\[(?P<target_location>-?\d+ -?\d+ -?\d+)\]',
r' with "(?P<weapon>\w*)"',
r'( \(headshot\))?',
])
def __init__(self, timestamp, player_name, player_uid, player_steam_id,
player_team, player_location, target_name, target_uid,
target_steam_id, target_team, target_location, weapon,
headshot=False):
super(CsgoKillEvent, self).__init__(timestamp, player_name, player_uid,
player_steam_id, player_team,
target_name, target_uid,
target_steam_id, target_team,
weapon)
if (not isinstance(player_location, tuple)
or not len(player_location) == 3):
raise TypeError('Expected 3-tuple for player_location')
if (not isinstance(target_location, tuple)
or not len(target_location) == 3):
raise TypeError('Expected 3-tuple for target_location')
self.player_location = player_location
self.target_location = target_location
self.headshot = headshot
def text(self):
msg = [
'L %s:' % (self.timestamp_to_str(self.timestamp)),
'"%s" [%d %d %d]' % (self.player, self.player_location[0],
self.player_location[1],
self.player_location[2]),
'killed',
'"%s" [%d %d %d]' % (self.target, self.target_location[0],
self.target_location[1],
self.target_location[2]),
'with "%s"' % (self.weapon),
]
if self.headshot:
msg.append('(headshot)')
return ' '.join(msg)
__str__ = text
@classmethod
def from_re_match(cls, match):
"""Return an event constructed from a self.regex match"""
kwargs = match.groupdict()
player_location = kwargs['player_location'].split()
kwargs['player_location'] = (int(player_location[0]),
int(player_location[1]),
int(player_location[2]))
target_location = kwargs['target_location'].split()
kwargs['target_location'] = (int(target_location[0]),
int(target_location[1]),
int(target_location[2]))
if match.string.endswith('(headshot)'):
kwargs['headshot'] = True
return cls(**kwargs)
@python_2_unicode_compatible
class CsgoAttackEvent(AttackEvent):
"""CS:GO specific attack event"""
regex = ''.join([
BaseEvent.regex,
PlayerTargetEvent.player_regex,
r'\[(?P<player_location>-?\d+ -?\d+ -?\d+)\]',
r' attacked ',
PlayerTargetEvent.target_regex,
r'\[(?P<target_location>-?\d+ -?\d+ -?\d+)\]',
r' with "(?P<weapon>\w*)"',
r' \(damage "(?P<damage>\d+)"\)',
r' \(damage_armor "(?P<damage_armor>\d+)"\)',
r' \(health "(?P<health>\d+)"\)',
r' \(armor "(?P<armor>\d+)"\)',
r' \(hitgroup "(?P<hitgroup>[\w ]+)"\)',
])
def __init__(self, timestamp, player_name, player_uid, player_steam_id,
player_team, player_location, target_name, target_uid,
target_steam_id, target_team, target_location, weapon,
damage, damage_armor, health, armor, hitgroup):
super(CsgoAttackEvent, self).__init__(timestamp, player_name,
player_uid, player_steam_id,
player_team, target_name,
target_uid, target_steam_id,
target_team, weapon, damage)
if (not isinstance(player_location, tuple)
or not len(player_location) == 3):
raise TypeError('Expected 3-tuple for player_location')
if (not isinstance(target_location, tuple)
or not len(target_location) == 3):
raise TypeError('Expected 3-tuple for target_location')
self.player_location = player_location
self.target_location = target_location
self.damage_armor = int(damage_armor)
self.health = int(health)
self.armor = int(armor)
self.hitgroup = hitgroup
def text(self):
msg = [
'L %s:' % (self.timestamp_to_str(self.timestamp)),
'"%s" [%d %d %d]' % (self.player, self.player_location[0],
self.player_location[1],
self.player_location[2]),
'attacked',
'"%s" [%d %d %d]' % (self.target, self.target_location[0],
self.target_location[1],
self.target_location[2]),
'with "%s"' % (self.weapon),
'(damage "%d")' % (self.damage),
'(damage_armor "%d")' % (self.damage_armor),
'(health "%d")' % (self.health),
'(armor "%d")' % (self.armor),
'(hitgroup "%s")' % (self.hitgroup),
]
return ' '.join(msg)
__str__ = text
@classmethod
def from_re_match(cls, match):
"""Return an event constructed from a self.regex match"""
kwargs = match.groupdict()
player_location = kwargs['player_location'].split()
kwargs['player_location'] = (int(player_location[0]),
int(player_location[1]),
int(player_location[2]))
target_location = kwargs['target_location'].split()
kwargs['target_location'] = (int(target_location[0]),
int(target_location[1]),
int(target_location[2]))
return cls(**kwargs)
CSGO_EVENTS = [
SwitchTeamEvent,
BuyEvent,
ThrowEvent,
CsgoAssistEvent,
CsgoKillEvent,
CsgoAttackEvent,
]
| 36.491289
| 83
| 0.531175
| 1,070
| 10,473
| 4.945794
| 0.109346
| 0.084656
| 0.043084
| 0.031746
| 0.664588
| 0.63511
| 0.623394
| 0.623205
| 0.599017
| 0.580499
| 0
| 0.00736
| 0.338394
| 10,473
| 286
| 84
| 36.618881
| 0.756386
| 0.038575
| 0
| 0.554054
| 0
| 0.004505
| 0.123902
| 0.01897
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067568
| false
| 0
| 0.013514
| 0
| 0.202703
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f27bd70a0bac448a69a312f5b0f06826fe66bdd
| 670
|
py
|
Python
|
Listing_19-1.py
|
PrinceChou/Play-Python-with-Alisa
|
808ab2744a99c548de4633b5707af27112bcdccf
|
[
"Apache-2.0"
] | null | null | null |
Listing_19-1.py
|
PrinceChou/Play-Python-with-Alisa
|
808ab2744a99c548de4633b5707af27112bcdccf
|
[
"Apache-2.0"
] | null | null | null |
Listing_19-1.py
|
PrinceChou/Play-Python-with-Alisa
|
808ab2744a99c548de4633b5707af27112bcdccf
|
[
"Apache-2.0"
] | null | null | null |
# Listing_19-1.py
# Copyright Warren & Carter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Trying out sounds in Pygame
import pygame
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode([640,480])
pygame.time.delay(1000) # Wait a second for the mixer to finish initializing
splat = pygame.mixer.Sound("splat.wav") # Create the Sound object
splat.play() # Play the sound
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
| 29.130435
| 81
| 0.649254
| 89
| 670
| 4.865169
| 0.674157
| 0.046189
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032015
| 0.207463
| 670
| 22
| 82
| 30.454545
| 0.783428
| 0.444776
| 0
| 0
| 0
| 0
| 0.024793
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f2f6a510aa43446af03b23b36744744444b6c67
| 1,532
|
py
|
Python
|
docker_emperor/commands/context/set.py
|
workon-io/docker-emperor
|
d827bb2806494dcba97920dd83c5934d0a300089
|
[
"Apache-2.0"
] | null | null | null |
docker_emperor/commands/context/set.py
|
workon-io/docker-emperor
|
d827bb2806494dcba97920dd83c5934d0a300089
|
[
"Apache-2.0"
] | null | null | null |
docker_emperor/commands/context/set.py
|
workon-io/docker-emperor
|
d827bb2806494dcba97920dd83c5934d0a300089
|
[
"Apache-2.0"
] | null | null | null |
import six
import docker_emperor.logger as logger
from docker_emperor.nodes.context import Context
def run(root, *args, **kwargs):
name = args[0].strip() if args else None
if name:
if name in root.project['contexts']:
root.project.config['context'] = name
logger.success(u'Context <b>%s</b> selected.' % root.context.name)
else:
logger.error(u'Context <b>%s</b> unknow.' % name)
exit(0)
else:
contexts = root.project['contexts']
if not contexts:
contexts['default'] = Context('default')
root.project.config['context'] = 'default'
logger.warning(u'No context defines, use <b>%s</b>.' % root.context.name)
else:
def select_context_name(contexts):
logger.ask(u'Please select the <b>{}</b> context to work on'.format(root.project.name))
for i, c in enumerate(contexts):
logger.choice(u'<b>%s</b>] %s' % (i+1, c.name))
ci = six.moves.input(': ')
try:
if ci == '0':
raise Exception
return contexts[int(ci)-1].name
except Exception as e:
logger.error(u'<b>%s/b> is not a valid choice' % ci)
return select_context_name(contexts)
root.project.config['context'] = select_context_name(contexts)
logger.success(u'Context <b>%s</b> selected.' % root.context.name)
| 39.282051
| 103
| 0.539817
| 189
| 1,532
| 4.333333
| 0.343915
| 0.094017
| 0.021978
| 0.087912
| 0.274725
| 0.114774
| 0.114774
| 0.114774
| 0.114774
| 0.114774
| 0
| 0.00484
| 0.325718
| 1,532
| 39
| 104
| 39.282051
| 0.787996
| 0
| 0
| 0.151515
| 0
| 0
| 0.171559
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.090909
| 0
| 0.212121
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f2f9ccd72b1ada4944e0fb6d3cba3a6b6b3d3fc
| 759
|
py
|
Python
|
bnc/scripts/instance_lock_test.py
|
dotzhou/geodesy-ausgeoid
|
7d4fbcc1d88738de6ab84ccdba362407cbaeb117
|
[
"Apache-2.0"
] | null | null | null |
bnc/scripts/instance_lock_test.py
|
dotzhou/geodesy-ausgeoid
|
7d4fbcc1d88738de6ab84ccdba362407cbaeb117
|
[
"Apache-2.0"
] | null | null | null |
bnc/scripts/instance_lock_test.py
|
dotzhou/geodesy-ausgeoid
|
7d4fbcc1d88738de6ab84ccdba362407cbaeb117
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
from instance_lock import InstanceLock
################################################################################
def main():
print(sys.argv[0])
instance_lock = InstanceLock("/home/ted/BNC/logs/.__MY_TEST_LOCK__", sys.argv[0], 3)
try:
instance_lock.lock()
except Exception as e:
print("Failed to start: " + e.message)
sys.exit(-1)
print("sleeping ..")
time.sleep(60*10)
print("Exit ..")
instance_lock.unlock()
################################################################################
if __name__ == '__main__':
main()
| 19.973684
| 88
| 0.524374
| 79
| 759
| 4.632911
| 0.56962
| 0.131148
| 0.131148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012719
| 0.171278
| 759
| 37
| 89
| 20.513514
| 0.569157
| 0.02635
| 0
| 0
| 0
| 0
| 0.137153
| 0.0625
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.3
| 0
| 0.35
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f30a5cc06c93cc21cd8f006b81cb7e3a4339ab4
| 1,194
|
py
|
Python
|
examples/Sans_Sphere/guiFitSphere.py
|
DomiDre/modelexp
|
1ec25f71e739dac27716f9a8637fa6ab067499b9
|
[
"MIT"
] | null | null | null |
examples/Sans_Sphere/guiFitSphere.py
|
DomiDre/modelexp
|
1ec25f71e739dac27716f9a8637fa6ab067499b9
|
[
"MIT"
] | null | null | null |
examples/Sans_Sphere/guiFitSphere.py
|
DomiDre/modelexp
|
1ec25f71e739dac27716f9a8637fa6ab067499b9
|
[
"MIT"
] | null | null | null |
import modelexp
from modelexp.experiments.sas import Sans
from modelexp.models.sas import Sphere
from modelexp.data import XyeData
from modelexp.fit import LevenbergMarquardt
from modelexp.models.sas import InstrumentalResolution
app = modelexp.App()
app.setExperiment(Sans)
dataRef = app.setData(XyeData)
dataRef.loadFromFile('./sansSphereData_sa.xye', 'sa')
dataRef.loadFromFile('./sansSphereData_la.xye', 'la')
dataRef.plotData()
modelRef = app.setModel(Sphere, InstrumentalResolution)
modelRef.setParam("r", 50.115979438653525, minVal = 0, maxVal = 100, vary = True)
modelRef.setParam("sldSphere", 4.5e-05, minVal = 0, maxVal = 0.00045000000000000004, vary = False)
modelRef.setParam("sldSolvent", 1e-05, minVal = 0, maxVal = 0.0001, vary = False)
modelRef.setParam("sigR", 0.0446, minVal = 0, maxVal = 0.2, vary = True)
modelRef.setParam("i0", 1.0082741570299425, minVal = 0, maxVal = 10, vary = True)
modelRef.setParam("bg", 0.0, minVal = 0, maxVal = 1, vary = False)
modelRef.setParam("dTheta_sa", 0.000174, minVal = 0, maxVal = 0.001, vary = True)
modelRef.setParam("dTheta_la", 0.000765, minVal = 0, maxVal = 0.001, vary = True)
app.setFit(LevenbergMarquardt)
app.show()
| 39.8
| 99
| 0.742881
| 160
| 1,194
| 5.51875
| 0.3625
| 0.14496
| 0.11778
| 0.079275
| 0.15402
| 0.056625
| 0.056625
| 0
| 0
| 0
| 0
| 0.107414
| 0.118928
| 1,194
| 30
| 100
| 39.8
| 0.731939
| 0
| 0
| 0
| 0
| 0
| 0.080335
| 0.038494
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.26087
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f373ae8b308ab8313e26c9ce9ba782726162914
| 2,273
|
py
|
Python
|
almanac/pages/abstract_page.py
|
welchbj/almanac
|
91db5921a27f7d089b4ad8463ffb6e1453c5126a
|
[
"MIT"
] | 4
|
2020-08-04T10:59:10.000Z
|
2021-08-23T13:42:03.000Z
|
almanac/pages/abstract_page.py
|
welchbj/almanac
|
91db5921a27f7d089b4ad8463ffb6e1453c5126a
|
[
"MIT"
] | null | null | null |
almanac/pages/abstract_page.py
|
welchbj/almanac
|
91db5921a27f7d089b4ad8463ffb6e1453c5126a
|
[
"MIT"
] | 2
|
2021-07-20T04:49:22.000Z
|
2021-08-23T13:42:23.000Z
|
from __future__ import annotations
from abc import ABC, abstractmethod, abstractproperty
from typing import Any, Optional, Set
from .page_path import PagePath, PagePathLike
class AbstractPage(ABC):
"""The base abstract page interface."""
def __init__(
self,
path: PagePathLike,
) -> None:
self._path = PagePath(path)
self._parent: Optional[AbstractPage] = None
self._children: Set[AbstractPage] = set()
@abstractproperty
def help_text(
self
) -> str:
"""The help text about this page.
Think of this as a static explanation about the page type's role within the
greater application, rather than reflecting the current state of this
particular page.
"""
@abstractproperty
def info_text(
self
) -> str:
"""The info text about this page.
Think of this as a more dynamic output (in contrast to :meth:`help_text`),
which reflect the current state of this page.
"""
@abstractmethod
def get_prompt(
self
) -> str:
"""Return the prompt text for this page.
This is what is shown on the application's current line, acting as the
input prompt.
"""
@property
def path(
self
) -> PagePath:
"""This page's path."""
return self._path
@property
def parent(
self
) -> Optional[AbstractPage]:
"""The parent page of this page."""
return self._parent
@parent.setter
def parent(
self,
new_parent: AbstractPage
) -> None:
self._parent = new_parent
@property
def children(
self
) -> Set[AbstractPage]:
"""The immediate children of this page."""
return self._children
def __hash__(
self
) -> int:
return hash(self._path)
def __eq__(
self,
other: Any
) -> bool:
if not isinstance(other, AbstractPage):
return NotImplemented
return self._path == other._path
def __str__(
self
) -> str:
return str(self.path)
def __repr__(
self
) -> str:
return f'<{self.__class__.__qualname__} [{self.path}]>'
| 21.647619
| 83
| 0.57985
| 252
| 2,273
| 5.035714
| 0.345238
| 0.044129
| 0.023641
| 0.022065
| 0.113475
| 0.048857
| 0.048857
| 0.048857
| 0.048857
| 0
| 0
| 0
| 0.33216
| 2,273
| 104
| 84
| 21.855769
| 0.835968
| 0.259569
| 0
| 0.40625
| 0
| 0
| 0.028828
| 0.019218
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.0625
| 0.046875
| 0.390625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f3740dbe908121e76457672fb1354e03d0a203a
| 3,022
|
py
|
Python
|
examples/VTK/PerfTests/scene-export-time.py
|
ajpmaclean/trame
|
48ab4e80c6050a2bea8b04ef32fd7d8b2cc7f787
|
[
"BSD-3-Clause"
] | null | null | null |
examples/VTK/PerfTests/scene-export-time.py
|
ajpmaclean/trame
|
48ab4e80c6050a2bea8b04ef32fd7d8b2cc7f787
|
[
"BSD-3-Clause"
] | null | null | null |
examples/VTK/PerfTests/scene-export-time.py
|
ajpmaclean/trame
|
48ab4e80c6050a2bea8b04ef32fd7d8b2cc7f787
|
[
"BSD-3-Clause"
] | null | null | null |
from trame import state
from trame.html import vuetify, vtk
from trame.layouts import SinglePage
from vtkmodules.vtkImagingCore import vtkRTAnalyticSource
from vtkmodules.vtkFiltersGeometry import vtkGeometryFilter
from vtkmodules.vtkRenderingCore import (
vtkRenderer,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkDataSetMapper,
vtkActor,
)
# VTK factory initialization
from vtkmodules.vtkInteractionStyle import vtkInteractorStyleSwitch # noqa
import vtkmodules.vtkRenderingOpenGL2 # noqa
# -----------------------------------------------------------------------------
# VTK pipeline
# -----------------------------------------------------------------------------
DEFAULT_RESOLUTION = 10
renderer = vtkRenderer()
renderWindow = vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera()
source = vtkRTAnalyticSource()
filter = vtkGeometryFilter()
filter.SetInputConnection(source.GetOutputPort())
mapper = vtkDataSetMapper()
actor = vtkActor()
mapper.SetInputConnection(filter.GetOutputPort())
actor.SetMapper(mapper)
renderer.AddActor(actor)
renderer.ResetCamera()
renderWindow.Render()
filter.Update()
_min, _max = filter.GetOutput().GetPointData().GetScalars().GetRange()
mapper.SetScalarRange(_min, _max)
actor.GetProperty().SetEdgeVisibility(1)
actor.GetProperty().SetEdgeColor(1, 1, 1)
# -----------------------------------------------------------------------------
@state.change("resolution")
def update_resolution(resolution=DEFAULT_RESOLUTION, **kwargs):
source.SetWholeExtent(
-resolution, resolution, -resolution, resolution, -resolution, resolution
)
html_view.reset_camera()
html_view.update()
# -----------------------------------------------------------------------------
# GUI
# -----------------------------------------------------------------------------
# html_view = vtk.VtkLocalView(renderWindow)
# html_view = vtk.VtkRemoteView(renderWindow)
html_view = vtk.VtkRemoteLocalView(renderWindow, mode="local")
layout = SinglePage("Geometry export", on_ready=html_view.update)
layout.logo.click = html_view.reset_camera
layout.title.set_text("Geometry export")
with layout.toolbar as tb:
vuetify.VSpacer()
tb.add_child("{{ resolution }}")
vuetify.VSlider(
v_model=("resolution", DEFAULT_RESOLUTION),
min=10,
max=100,
step=1,
hide_details=True,
dense=True,
style="max-width: 300px",
)
vuetify.VBtn("Update", click=html_view.update)
with layout.content:
vuetify.VContainer(
fluid=True,
classes="pa-0 fill-height",
children=[html_view],
)
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
layout.start()
| 29.627451
| 81
| 0.617141
| 242
| 3,022
| 7.57438
| 0.475207
| 0.03928
| 0.065466
| 0.065466
| 0.032733
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006381
| 0.118465
| 3,022
| 101
| 82
| 29.920792
| 0.681682
| 0.228657
| 0
| 0
| 0
| 0
| 0.05054
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0
| 0.117647
| 0
| 0.132353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f382211712726ce3bebece3524ea17b01c0cd4f
| 2,540
|
py
|
Python
|
saleor/dashboard/store/special_page/views.py
|
Chaoslecion123/Diver
|
8c5c493701422eada49cbf95b0b0add08f1ea561
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/dashboard/store/special_page/views.py
|
Chaoslecion123/Diver
|
8c5c493701422eada49cbf95b0b0add08f1ea561
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/dashboard/store/special_page/views.py
|
Chaoslecion123/Diver
|
8c5c493701422eada49cbf95b0b0add08f1ea561
|
[
"BSD-3-Clause"
] | null | null | null |
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils.translation import pgettext_lazy
from ....store.models import SpecialPage
from ...views import staff_member_required
from .forms import SpecialPageForm
@staff_member_required
@permission_required('site.manage_settings')
def special_page_add(request, site_settings_pk):
special_page = SpecialPage(site_settings_id=site_settings_pk)
form = SpecialPageForm(request.POST or None, instance=special_page)
if form.is_valid():
special_page = form.save()
msg = pgettext_lazy(
'Dashboard message', 'Added special page %s') % (special_page,)
messages.success(request, msg)
return redirect('dashboard:site-details', pk=site_settings_pk)
ctx = {'form': form, 'site_settings_pk': site_settings_pk,
'special_page': special_page}
return TemplateResponse(
request, 'dashboard/store/special_pages/form.html', ctx)
@staff_member_required
@permission_required('site.manage_settings')
def special_page_edit(request, site_settings_pk, pk):
special_page = get_object_or_404(SpecialPage, pk=pk)
form = SpecialPageForm(request.POST or None, instance=special_page)
if form.is_valid():
special_page = form.save()
msg = pgettext_lazy(
'dashboard message', 'Updated special page %s') % (special_page,)
messages.success(request, msg)
return redirect('dashboard:site-details', pk=site_settings_pk)
ctx = {'form': form, 'site_settings_pk': site_settings_pk,
'special_page': special_page}
return TemplateResponse(
request, 'dashboard/store/special_pages/form.html', ctx)
@staff_member_required
@permission_required('site.manage_settings')
def special_page_delete(request, site_settings_pk, pk):
special_page = get_object_or_404(SpecialPage, pk=pk)
if request.method == 'POST':
special_page.delete()
messages.success(
request,
pgettext_lazy(
'Dashboard message',
'Removed site special page %s') %
(special_page,))
return redirect(
'dashboard:site-details', pk=site_settings_pk)
return TemplateResponse(
request, 'dashboard/store/special_pages/modal/confirm_delete.html',
{'special_page': special_page, 'site_settings_pk': site_settings_pk})
| 40.31746
| 77
| 0.715748
| 306
| 2,540
| 5.663399
| 0.215686
| 0.14599
| 0.10502
| 0.055395
| 0.693595
| 0.665897
| 0.64974
| 0.618003
| 0.618003
| 0.589152
| 0
| 0.004358
| 0.187008
| 2,540
| 62
| 78
| 40.967742
| 0.834867
| 0
| 0
| 0.527273
| 0
| 0
| 0.188189
| 0.078346
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.145455
| 0
| 0.309091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f3aae6740fa544f6fcbafd5b09e5b47c616d5d2
| 2,449
|
py
|
Python
|
satstac/landsat/cli.py
|
developmentseed/sat-stac-landsat
|
f2263485043a827b4153aecc12f45a3d1363e9e2
|
[
"MIT"
] | null | null | null |
satstac/landsat/cli.py
|
developmentseed/sat-stac-landsat
|
f2263485043a827b4153aecc12f45a3d1363e9e2
|
[
"MIT"
] | null | null | null |
satstac/landsat/cli.py
|
developmentseed/sat-stac-landsat
|
f2263485043a827b4153aecc12f45a3d1363e9e2
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import sys
from datetime import datetime
import satstac
from satstac import Catalog
import satstac.landsat as landsat
from .version import __version__
# quiet loggers
logging.getLogger('urllib3').propagate = False
logging.getLogger('requests').propagate = False
logger = logging.getLogger(__name__)
def parse_args(args):
desc = 'sat-stac-landsat (v%s)' % __version__
dhf = argparse.ArgumentDefaultsHelpFormatter
parser0 = argparse.ArgumentParser(description=desc)
pparser = argparse.ArgumentParser(add_help=False)
pparser.add_argument('--version', help='Print version and exit', action='version', version=__version__)
pparser.add_argument('--log', default=2, type=int,
help='0:all, 1:debug, 2:info, 3:warning, 4:error, 5:critical')
# add subcommands
subparsers = parser0.add_subparsers(dest='command')
# command 1
parser = subparsers.add_parser('ingest', parents=[pparser], help='Ingest records into catalog', formatter_class=dhf)
parser.add_argument('catalog', help='Catalog that contains the Collection')
valid_date = lambda d: datetime.strptime(d, '%Y-%m-%d').date()
parser.add_argument('-c', '--collections', help='Collection to ingest (pre, c1, or all)', default='all')
parser.add_argument('--realtime', help='Also ingest realtime data', action='store_true', default=False)
parser.add_argument('--missing', help='Only ingest missing items', action='store_true', default=False)
parser.add_argument('--start', help='Start date of ingestion', default=None, type=valid_date)
parser.add_argument('--end', help='End date of ingestion', default=None, type=valid_date)
# command 2
#parser = subparsers.add_parser('cmd2', parents=[pparser], help='Command 2', formatter_class=dhf)
# parser.add_argument()
# turn Namespace into dictinary
parsed_args = vars(parser0.parse_args(args))
return parsed_args
def cli():
args = parse_args(sys.argv[1:])
logging.basicConfig(stream=sys.stdout, level=args.pop('log') * 10)
cmd = args.pop('command')
if cmd == 'ingest':
cat = Catalog.open(args['catalog'])
landsat.add_items(cat, collections=args['collections'], realtime=args['realtime'],
missing=args['missing'], start_date=args['start'], end_date=args['end'])
elif cmd == 'cmd2':
print(cmd)
if __name__ == "__main__":
cli()
| 37.106061
| 120
| 0.694978
| 309
| 2,449
| 5.339806
| 0.385113
| 0.06
| 0.072121
| 0.030303
| 0.141818
| 0.141818
| 0.100606
| 0.100606
| 0
| 0
| 0
| 0.00978
| 0.164965
| 2,449
| 66
| 121
| 37.106061
| 0.797066
| 0.080441
| 0
| 0
| 0
| 0.02381
| 0.22049
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.261905
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f3ae02cd059cdf4b269302e970b02d87301e8cf
| 3,005
|
py
|
Python
|
database.py
|
pratik-choudhari/squ.ez-url-shortener
|
ebd13da15501806d0ef30353fe77a9d3d6d1081a
|
[
"MIT"
] | 5
|
2020-12-20T14:50:31.000Z
|
2021-09-20T06:39:18.000Z
|
database.py
|
pratik-choudhari/squ.ez-url-shortener
|
ebd13da15501806d0ef30353fe77a9d3d6d1081a
|
[
"MIT"
] | null | null | null |
database.py
|
pratik-choudhari/squ.ez-url-shortener
|
ebd13da15501806d0ef30353fe77a9d3d6d1081a
|
[
"MIT"
] | 3
|
2020-12-20T18:18:09.000Z
|
2021-11-14T09:42:07.000Z
|
import sqlite3
import random
import string
import re
import sys
# domain name
args = sys.argv
if len(args)==2:
if args[1] == 'localhost':
domain = "localhost:5000/"
else:
domain = "https://squez-url-shortener.herokuapp.com/"
else:
domain = "https://squez-url-shortener.herokuapp.com/"
# URL verification regex
regex = r"""(?i)\b((?:https?://|www\d{0,3}[.]{1}|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))"""
# check_same_thread=False to disable thread sync
conn = sqlite3.connect("url.db", check_same_thread=False)
def check_if_exists(id: str, flag: bool):
"""
returns true if record exists
params:
id: data to check in db
flag: True if shortened URL, else False
returns:
True if record exists else False
"""
if flag:
query = f'''SELECT COUNT(*) FROM URLS WHERE ID="{id}";'''
else:
query = f'''SELECT COUNT(*) FROM URLS WHERE ORIGINAL="{id}";'''
db_res = conn.execute(query)
if [i[0] for i in db_res] == [0]:
return False
return True
def insert_data(id: str, og: str, value: int):
"""
Insert data in db
Params:
id: short url(primary key)
og: original url
value: number of visit
returns:
True if successful else False
"""
query = f'''INSERT INTO URLS (ID, ORIGINAL, VISITS) VALUES ("{str(id)}", "{str(og)}", {int(value)});'''
db_res = conn.execute(query)
conn.commit()
if not db_res:
return False
return True
def get_original_url(id: str, flag: bool):
"""
returns record data if exists
params:
id: shortened or original url
flag: True for shortened id else False
returns:
False if data doesn't exist else return data
"""
if flag:
query = f'''SELECT ORIGINAL FROM URLS WHERE ID="{str(id)}";'''
else:
query = f'''SELECT ID FROM URLS WHERE ORIGINAL="{str(id)}";'''
db_res = conn.execute(query)
url = [i[0] for i in db_res]
if url:
return url[0]
return False
def get_valid_combination(url: str)-> str:
"""
finds and returns shortened URL
params:
url: original url
returns:
False if operation failed else return whole shortened link
"""
res = re.findall(regex, url)
url = re.sub(r"^(http://|https://){0,1}(www.|ww.|w.){0,1}", "", url)
data = False
if res:
if not check_if_exists(url, False):
while 1:
shrt = ''.join(random.choice(string.ascii_letters) for _ in range(8))
if not check_if_exists(shrt, True):
if not insert_data(shrt, url, 0):
return False
data = "".join([domain, shrt])
break
else:
shrt = get_original_url(url, False)
data = "".join([domain, shrt])
return data
| 28.084112
| 200
| 0.547088
| 403
| 3,005
| 4.019851
| 0.290323
| 0.018519
| 0.007407
| 0.007407
| 0.31358
| 0.135802
| 0.107407
| 0.054321
| 0
| 0
| 0
| 0.012071
| 0.283195
| 3,005
| 106
| 201
| 28.349057
| 0.73909
| 0.231281
| 0
| 0.344828
| 0
| 0.034483
| 0.28301
| 0.095106
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.086207
| 0
| 0.293103
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f3e4585789dca549a8fbdd15c298b8c2bf0a041
| 1,954
|
py
|
Python
|
ball.py
|
b3mery/Python-Pong-Game
|
d0051942412c331a752cbade11815002be8d4d1e
|
[
"MIT"
] | null | null | null |
ball.py
|
b3mery/Python-Pong-Game
|
d0051942412c331a752cbade11815002be8d4d1e
|
[
"MIT"
] | null | null | null |
ball.py
|
b3mery/Python-Pong-Game
|
d0051942412c331a752cbade11815002be8d4d1e
|
[
"MIT"
] | null | null | null |
from turtle import Turtle
from scoreboard import Scoreboard
WIDTH = 800
HEIGHT = 600
START_SPEED = 0.1
class Ball(Turtle):
"""Class for creating and moving the ball. Extends Turtle"""
def __init__(self) -> None:
super().__init__()
self.y_trajectory = 10
self.x_trajectory = 10
self.shape('circle')
self.penup()
self.shapesize(stretch_len=1,stretch_wid=1)
self.color('white')
self.move_speed = START_SPEED
def move(self):
"""Move the ball forward by x and y trajectories"""
new_x = self.xcor() + self.x_trajectory
new_y = self.ycor() + self.y_trajectory
self.goto(new_x,new_y)
def detect_wall_collision(self):
"""Detect a wall colision, reverse y trajectory to "bounce" the ball"""
if self.ycor() >= HEIGHT/2 - 15 or self.ycor() <= HEIGHT/-2 + 15:
self.y_trajectory *= -1
def detect_paddle_collision(self, r_paddle, l_paddle):
"""Detect a collision with the paddles
If collision, reverse x trajectory"""
if ((self.distance(r_paddle) < 50 and self.xcor() > WIDTH/2 -60)
or (self.distance(l_paddle) < 50 and self.xcor() < WIDTH/-2 +60) ):
self.x_trajectory *= -1
self.move_speed *= 0.9
def detect_goal(self,score:Scoreboard):
"""Detect a collision with walls. If collision, then goal.
Reset ball to startign values, move in opposite of previous x trajectory """
if self.xcor() > WIDTH/2 -20:
print("Left player scored a goal")
score.left_point()
self.goto(0,0)
self.move_speed = START_SPEED
self.x_trajectory *=-1
if self.xcor() < WIDTH/-2 +20:
print("Right player scored a goal")
score.right_point()
self.goto(0,0)
self.move_speed = START_SPEED
self.x_trajectory *=-1
| 34.892857
| 84
| 0.590583
| 263
| 1,954
| 4.231939
| 0.323194
| 0.069182
| 0.067385
| 0.050314
| 0.277628
| 0.186882
| 0.186882
| 0.145553
| 0.097035
| 0.097035
| 0
| 0.033528
| 0.297851
| 1,954
| 56
| 85
| 34.892857
| 0.777697
| 0.18782
| 0
| 0.2
| 0
| 0
| 0.040103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.05
| 0
| 0.2
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f43d99fa4ec9d66bba52027500997441d643a8e
| 1,216
|
py
|
Python
|
baseq/bed/__init__.py
|
basedata10/baseq
|
0f1786c3392a51a6ec7cb0f32355cd28eaa5df29
|
[
"MIT"
] | 1
|
2018-08-30T20:29:17.000Z
|
2018-08-30T20:29:17.000Z
|
baseq/bed/__init__.py
|
basedata10/baseq
|
0f1786c3392a51a6ec7cb0f32355cd28eaa5df29
|
[
"MIT"
] | null | null | null |
baseq/bed/__init__.py
|
basedata10/baseq
|
0f1786c3392a51a6ec7cb0f32355cd28eaa5df29
|
[
"MIT"
] | null | null | null |
import subprocess, re, os
from baseq.utils.runcommand import run_it, run_generator
import pandas as pd
import random
"""
baseq dev bed ./bed
"""
import click, os, sys
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
@click.group(context_settings=CONTEXT_SETTINGS)
def cli():
pass
class BEDFILE:
def __init__(self, path):
self.bed = pd.read_table(path, usecols=range(3), names=['chr', 'start', 'end'], comment='@', converters={'chr':str})
self.stats()
def stats(self):
lengths = []
for index, row in self.bed.iterrows():
length = row['end'] - row['start']
lengths.append(length)
self.length = sum(lengths)
self.counts = len(lengths)
print("[info] Intervels {} Length {}.".format(self.counts, self.length))
def sampling(self, numbers=100):
df_s = self.bed.sample(n=numbers)
return df_s.values.tolist()
def sample_split_files(self, lines=100, files=10):
paths = []
for x in range(files):
path = "sample.{}.bed".format(x)
paths.append(path)
self.bed.sample(n=lines).to_csv(path, index=False, sep="\t", header=False)
return paths
| 31.179487
| 124
| 0.612664
| 161
| 1,216
| 4.521739
| 0.521739
| 0.038462
| 0.03022
| 0.038462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009667
| 0.234375
| 1,216
| 39
| 125
| 31.179487
| 0.772288
| 0
| 0
| 0
| 0
| 0
| 0.063866
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0.032258
| 0.16129
| 0
| 0.419355
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f4660a8cf58761bb602bec1315943879f761718
| 4,264
|
py
|
Python
|
swtstore/application.py
|
janastu/swtstore
|
7326138bf2fbf2a4ed8c7300c68092f91709dfc2
|
[
"BSD-2-Clause"
] | 2
|
2015-04-28T00:35:21.000Z
|
2016-02-11T19:31:15.000Z
|
swtstore/application.py
|
janastu/swtstore
|
7326138bf2fbf2a4ed8c7300c68092f91709dfc2
|
[
"BSD-2-Clause"
] | 9
|
2015-02-02T11:24:23.000Z
|
2017-12-29T07:49:07.000Z
|
swtstore/application.py
|
janastu/swtstore
|
7326138bf2fbf2a4ed8c7300c68092f91709dfc2
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
__init__.py
"""
import os
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask, request, jsonify, render_template, make_response
from classes.database import db
from config import DefaultConfig
from classes import views
#from classes import models
from classes import oauth
__all__ = ['create_app', 'getDBInstance']
DEFAULT_APP_NAME = __name__
DEFAULT_MODULES = (
(views.frontend, ''),
(views.api, '/api'),
(views.user, '/users'),
(views.context, '/contexts'),
(views.sweet, '/sweets'),
(views.app, '/apps'),
(views.Oauth, '/oauth')
)
def create_app(config=None, app_name=None, modules=None):
if app_name is None:
app_name = DEFAULT_APP_NAME
if modules is None:
modules = DEFAULT_MODULES
app = Flask(app_name)
configure_app(app, config)
configure_logging(app)
configure_errorhandlers(app)
configure_extensions(app)
#configure_beforehandlers(app)
configure_modules(app, modules)
return app
def configure_app(app, config):
app.config.from_object(DefaultConfig())
if config is not None:
app.config.from_object(config)
app.config.from_envvar('APP_CONFIG', silent=True)
def configure_modules(app, modules):
for module, url_prefix in modules:
app.register_module(module, url_prefix=url_prefix)
def configure_extensions(app):
db.init_app(app)
db.app = app
oauth.init_app(app)
# return the current db instance
# TODO: is this needed so much?
def getDBInstance():
return db
def configure_errorhandlers(app):
if app.testing:
return
# TODO: with all these request can we send back the respective HTTP status
# codes instead of 200?
@app.errorhandler(404)
def not_found(error):
response = make_response()
response.status_code = 404
if request.is_xhr:
response.data = jsonify(error=error)
else:
response.data = render_template('errors/404.html')
return response
@app.errorhandler(403)
def forbidden(error):
response = make_response()
response.status_code = 403
if request.is_xhr:
response.data = jsonify(error=error)
else:
response.data = render_template('errors/403.html')
return response
@app.errorhandler(401)
def unauthorized(error):
response = make_response()
response.status_code = 401
if request.is_xhr:
response.data = jsonify(error=error)
else:
response.data = render_template('errors/401.html')
return response
@app.errorhandler(400)
def bad_request(error):
response = make_response()
response.status_code = 400
# Check if we have any custom error messages
#if g.error:
# print 'g.error:'
# print g.error
# error = g.error
if request.is_xhr:
response.data = jsonify(error=error)
else:
response.data = render_template('errors/400.html', error=error)
return response
@app.errorhandler(500)
def server_error(error):
response = make_response()
response.status_code = 500
if request.is_xhr:
response.data = jsonify(error=error)
else:
response.data = render_template('errors/500.html')
return response
def configure_logging(app):
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]')
# Also error can be sent out via email. So we can also have a SMTPHandler?
log_file = os.path.join(os.path.dirname(__file__), '..',
app.config['LOG_FILE'])
max_size = 1024 * 1024 * 20 # Max Size for a log file: 20MB
log_handler = RotatingFileHandler(log_file, maxBytes=max_size,
backupCount=10)
if 'LOG_LEVEL' in app.config:
log_level = app.config['LOG_LEVEL'] or 'ERROR'
else:
log_level = 'ERROR'
log_handler.setLevel(log_level)
log_handler.setFormatter(formatter)
app.logger.addHandler(log_handler)
| 24.090395
| 78
| 0.633912
| 515
| 4,264
| 5.07767
| 0.281553
| 0.034417
| 0.032505
| 0.047801
| 0.281836
| 0.23327
| 0.23327
| 0.151052
| 0.151052
| 0.151052
| 0
| 0.020128
| 0.265947
| 4,264
| 176
| 79
| 24.227273
| 0.815335
| 0.107645
| 0
| 0.240741
| 0
| 0
| 0.066138
| 0.006349
| 0
| 0
| 0
| 0.005682
| 0
| 1
| 0.111111
| false
| 0
| 0.074074
| 0.009259
| 0.259259
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f46d633a48c16504cc0737a6f08d56b6c8d1caf
| 2,313
|
py
|
Python
|
2018/12a.py
|
apie/advent-of-code
|
c49abec01b044166a688ade40ebb1e642f0e5ce0
|
[
"MIT"
] | 4
|
2018-12-04T23:33:46.000Z
|
2021-12-07T17:33:27.000Z
|
2018/12a.py
|
apie/advent-of-code
|
c49abec01b044166a688ade40ebb1e642f0e5ce0
|
[
"MIT"
] | 17
|
2018-12-12T23:32:09.000Z
|
2020-01-04T15:50:31.000Z
|
2018/12a.py
|
apie/advent-of-code
|
c49abec01b044166a688ade40ebb1e642f0e5ce0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pytest
import fileinput
import sys
DAY=12
class Plants():
def __init__(self, in_lines):
self.generation = 0
lines = iter(in_lines)
initial_state = next(lines).replace('initial state: ', '')
self.pots = {i:s for i, s in enumerate(initial_state)
if s == '#'
}
self.rules = {
r.split('=>')[0].strip():r.split('=>')[1].strip()
for r in lines if r and r.split('=>')[1].strip() == '#'
}
def gen(self):
pots_new = {}
for p in range(min(self.pots.keys())-4, max(self.pots.keys())+1+4):
key = '{}{}{}{}{}'.format(
'#' if p-2 in self.pots else '.',
'#' if p-1 in self.pots else '.',
'#' if p-0 in self.pots else '.',
'#' if p+1 in self.pots else '.',
'#' if p+2 in self.pots else '.',
)
if key in self.rules:
pots_new[p] = '#'
pots_new_str = ''.join(['#' if i in pots_new else '.'
for i in range(min(pots_new.keys()), max(pots_new.keys())+1)])
self.pots = pots_new
self.generation += 1
def print_pots(self):
return ''.join(['#' if i in self.pots else '.'
#for i in range(min(self.pots.keys()), max(self.pots.keys())+1)])
for i in range(min(-3, *self.pots.keys()), max(35, *self.pots.keys())+1)])
def sum_pots(self):
return sum(self.pots.keys())
@pytest.fixture
def example_result():
with open('12.testresult', 'r') as in_file:
return in_file.read().split('\n')
@pytest.fixture
def example_input():
with open('12.input.test', 'r') as in_file:
return in_file.read().split('\n')
def test_answer(example_input, example_result):
plants = Plants(example_input)
print('Rules: ',plants.rules)
for i in range(0, 20+1):
if i > 0:
plants.gen()
print('Pots after {:2} generations: {}'.format(plants.generation, plants.print_pots()))
assert '{:2}: {}'.format(i, plants.print_pots()) == example_result[2+i]
assert plants.sum_pots() == 325
if __name__ == '__main__':
in_lines = [l.strip() for l in fileinput.input(sys.argv[1:] or '{:02}.input'.format(DAY))]
plants = Plants(in_lines)
for i in range(0, 20+1):
if i > 0:
plants.gen()
print('Pots after {:2} generations: {}'.format(plants.generation, plants.print_pots()))
print('Answer: {}'.format(plants.sum_pots()))
| 31.684932
| 92
| 0.587981
| 357
| 2,313
| 3.686275
| 0.221289
| 0.097264
| 0.06383
| 0.06383
| 0.342705
| 0.307751
| 0.254559
| 0.254559
| 0.238602
| 0.238602
| 0
| 0.025178
| 0.210117
| 2,313
| 72
| 93
| 32.125
| 0.695129
| 0.036749
| 0
| 0.193548
| 0
| 0
| 0.083596
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 1
| 0.112903
| false
| 0
| 0.048387
| 0.032258
| 0.241935
| 0.096774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f47e0e4afa3b0ef06fd5508f958beec6b26eb72
| 826
|
py
|
Python
|
03-Spark DFs/24-Solution (Group By).py
|
PacktPublishing/PySpark-and-AWS-Master-Big-Data-with-PySpark-and-AWS
|
28726ada2a8f03557180b472eecf3efc72cab5a2
|
[
"MIT"
] | 3
|
2021-09-29T04:11:44.000Z
|
2021-12-21T06:28:48.000Z
|
Part 3/Code/03-Spark DFs/24-Solution (Group By).py
|
PacktPublishing/50-Hours-of-Big-Data-PySpark-AWS-Scala-and-Scraping
|
8993a8ee10534a29aeee18fa91bdc48e3093bec5
|
[
"MIT"
] | null | null | null |
Part 3/Code/03-Spark DFs/24-Solution (Group By).py
|
PacktPublishing/50-Hours-of-Big-Data-PySpark-AWS-Scala-and-Scraping
|
8993a8ee10534a29aeee18fa91bdc48e3093bec5
|
[
"MIT"
] | 5
|
2021-11-17T15:47:36.000Z
|
2022-03-09T05:13:09.000Z
|
# Databricks notebook source
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, lit
from pyspark.sql.functions import sum,avg,max,min,mean,count
spark = SparkSession.builder.appName("Spark DataFrames").getOrCreate()
# COMMAND ----------
df = spark.read.options(header='True', inferSchema='True').csv('/FileStore/tables/StudentData.csv')
df.show()
# COMMAND ----------
# 1
df.groupBy("course").count().show()
df.groupBy("course").agg(count("*").alias("total_enrollment")).show()
# COMMAND ----------
# 2
df.groupBy("course", "gender").agg(count("*").alias("total_enrollment")).show()
# COMMAND ----------
# 3
df.groupBy("course", "gender").agg(sum("marks").alias("total_marks")).show()
# COMMAND ----------
# 4
df.groupBy("course", "age").agg(min("marks"), max("marks"), avg("marks")).show()
| 25.8125
| 99
| 0.659806
| 104
| 826
| 5.211538
| 0.451923
| 0.083026
| 0.138376
| 0.084871
| 0.333948
| 0.143911
| 0.143911
| 0
| 0
| 0
| 0
| 0.005284
| 0.083535
| 826
| 31
| 100
| 26.645161
| 0.7107
| 0.156174
| 0
| 0
| 0
| 0
| 0.24344
| 0.048105
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f4adf626e0639100f39276c7a36ef5fa92541f9
| 1,185
|
py
|
Python
|
parse_xlsx.py
|
UoA-eResearch/OPIMD
|
63d2279eea8de7db53b01c50e8e35b483ab572c4
|
[
"MIT"
] | null | null | null |
parse_xlsx.py
|
UoA-eResearch/OPIMD
|
63d2279eea8de7db53b01c50e8e35b483ab572c4
|
[
"MIT"
] | 2
|
2021-03-03T06:11:30.000Z
|
2021-03-05T02:57:02.000Z
|
parse_xlsx.py
|
UoA-eResearch/OPIMD
|
63d2279eea8de7db53b01c50e8e35b483ab572c4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import pandas as pd
import json
df = pd.read_excel("OPIMD Calc_checked_03Feb21AL.xlsx", sheet_name=None)
obj = {}
dz = df["OPIMD15ACCESSDATAZONERANK"]
dz = dz.dropna(subset=["datazone"])
dz.datazone = dz.datazone.astype(int)
dz.index = dz.datazone
obj["dz"] = dz.OPIMDAccPopRank_AL.to_dict()
hlth = df["HEALTHCALC"]
hlth.index = hlth.HlthPattern
obj["hlth"] = hlth.HlthRank.to_dict()
inc = df["INCOMECALC"]
inc.index = inc.IncPattern
obj["inc"] = inc.IncRank.to_dict()
house = df["HOUSECALC"]
house.index = house.HouPattern
obj["house"] = house.HouRank.to_dict()
con = df["CONNECTCALC"]
con = con.dropna(subset=["ConPattern"])
con.ConPattern = con.ConPattern.astype(int)
con.index = con.ConPattern
obj["con"] = con.ConRank.to_dict()
assets = df["ASSETSCALC"]
assets = assets.dropna(subset=["AsPattern"])
assets.AsPattern = assets.AsPattern.astype(int)
assets.index = assets.AsPattern
obj["assets"] = assets.AsRank.to_dict()
breaks = df["OPIMDRankDecile"]
breaks = breaks.iloc[3:13,0:3]
breaks.columns = ["min", "max", "decile"]
obj["breaks"] = breaks.to_dict(orient='records')
with open("data.json", "w") as f:
json.dump(obj, f)
print("Saved")
| 25.212766
| 72
| 0.709705
| 170
| 1,185
| 4.876471
| 0.429412
| 0.050663
| 0.043426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011353
| 0.108017
| 1,185
| 47
| 73
| 25.212766
| 0.772942
| 0.017722
| 0
| 0
| 0
| 0
| 0.18299
| 0.044674
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.057143
| 0
| 0.057143
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f4cad023005927c7b37c2c98bbb63ef5319fadc
| 1,336
|
py
|
Python
|
python/src/main/python/pyalink/alink/common/sql/sql_query_utils.py
|
wenwei8268/Alink
|
c00702538c95a32403985ebd344eb6aeb81749a7
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pyalink/alink/common/sql/sql_query_utils.py
|
wenwei8268/Alink
|
c00702538c95a32403985ebd344eb6aeb81749a7
|
[
"Apache-2.0"
] | null | null | null |
python/src/main/python/pyalink/alink/common/sql/sql_query_utils.py
|
wenwei8268/Alink
|
c00702538c95a32403985ebd344eb6aeb81749a7
|
[
"Apache-2.0"
] | null | null | null |
import re
__all__ = ['register_table_name', 'sql_query']
batch_table_name_map = dict()
stream_table_name_map = dict()
def register_table_name(op, name: str, op_type: str):
if op_type == "batch":
batch_table_name_map[name] = op
elif op_type == "stream":
stream_table_name_map[name] = op
else:
raise Exception("op_type should be 'batch' or 'stream'.")
def clear_table_names():
batch_table_name_map.clear()
stream_table_name_map.clear()
def sql_query(query: str, op_type: str):
if op_type == "batch":
from pyalink.alink.batch.common import PySqlCmdBatchOp
table_name_map = batch_table_name_map
sql_cmd_op_cls = PySqlCmdBatchOp
elif op_type == "stream":
table_name_map = stream_table_name_map
from pyalink.alink.stream.common import PySqlCmdStreamOp
sql_cmd_op_cls = PySqlCmdStreamOp
else:
raise Exception("op_type should be 'batch' or 'stream'.")
counter = 0
ops = []
for (name, op) in table_name_map.items():
pattern = "\\b" + name + "\\b"
match = re.findall(pattern, query)
if match is None or len(match) == 0:
continue
ops.append(op)
counter += 1
sql_cmd_op = sql_cmd_op_cls().setCommand(query)
sql_cmd_op.linkFrom(*ops)
return sql_cmd_op
| 28.425532
| 65
| 0.654192
| 188
| 1,336
| 4.303191
| 0.281915
| 0.144623
| 0.163164
| 0.111248
| 0.217553
| 0.173053
| 0.173053
| 0.173053
| 0.111248
| 0.111248
| 0
| 0.002962
| 0.241766
| 1,336
| 46
| 66
| 29.043478
| 0.795656
| 0
| 0
| 0.216216
| 0
| 0
| 0.098802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.081081
| 0
| 0.189189
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f4d2891267d928eb5b2260208cbd4b134295605
| 3,790
|
py
|
Python
|
salt/utils/win_chcp.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 9,425
|
2015-01-01T05:59:24.000Z
|
2022-03-31T20:44:05.000Z
|
salt/utils/win_chcp.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 33,507
|
2015-01-01T00:19:56.000Z
|
2022-03-31T23:48:20.000Z
|
salt/utils/win_chcp.py
|
Noah-Huppert/salt
|
998c382f5f2c3b4cbf7d96aa6913ada6993909b3
|
[
"Apache-2.0"
] | 5,810
|
2015-01-01T19:11:45.000Z
|
2022-03-31T02:37:20.000Z
|
"""
Functions for working with the codepage on Windows systems
"""
import logging
from contextlib import contextmanager
from salt.exceptions import CodePageError
log = logging.getLogger(__name__)
try:
import pywintypes
import win32console
HAS_WIN32 = True
except ImportError:
HAS_WIN32 = False
# Although utils are often directly imported, it is also possible to use the loader.
def __virtual__():
"""
Only load if Win32 Libraries are installed
"""
if not HAS_WIN32:
return False, "This utility requires pywin32"
return "win_chcp"
@contextmanager
def chcp(page_id, raise_error=False):
"""
Gets or sets the codepage of the shell.
Args:
page_id (str, int):
A number representing the codepage.
raise_error (bool):
``True`` will raise an error if the codepage fails to change.
``False`` will suppress the error
Returns:
int: A number representing the codepage
Raises:
CodePageError: On unsuccessful codepage change
"""
if not isinstance(page_id, int):
try:
page_id = int(page_id)
except ValueError:
error = "The `page_id` needs to be an integer, not {}".format(type(page_id))
if raise_error:
raise CodePageError(error)
log.error(error)
return -1
previous_page_id = get_codepage_id(raise_error=raise_error)
if page_id and previous_page_id and page_id != previous_page_id:
set_code_page = True
else:
set_code_page = False
try:
if set_code_page:
set_codepage_id(page_id, raise_error=raise_error)
# Subprocesses started from now will use the set code page id
yield
finally:
if set_code_page:
# Reset to the old code page
set_codepage_id(previous_page_id, raise_error=raise_error)
def get_codepage_id(raise_error=False):
"""
Get the currently set code page on windows
Args:
raise_error (bool):
``True`` will raise an error if the codepage fails to change.
``False`` will suppress the error
Returns:
int: A number representing the codepage
Raises:
CodePageError: On unsuccessful codepage change
"""
try:
return win32console.GetConsoleCP()
except pywintypes.error as exc:
_, _, msg = exc.args
error = "Failed to get the windows code page: {}".format(msg)
if raise_error:
raise CodePageError(error)
else:
log.error(error)
return -1
def set_codepage_id(page_id, raise_error=False):
"""
Set the code page on windows
Args:
page_id (str, int):
A number representing the codepage.
raise_error (bool):
``True`` will raise an error if the codepage fails to change.
``False`` will suppress the error
Returns:
int: A number representing the codepage
Raises:
CodePageError: On unsuccessful codepage change
"""
if not isinstance(page_id, int):
try:
page_id = int(page_id)
except ValueError:
error = "The `page_id` needs to be an integer, not {}".format(type(page_id))
if raise_error:
raise CodePageError(error)
log.error(error)
return -1
try:
win32console.SetConsoleCP(page_id)
return get_codepage_id(raise_error=raise_error)
except pywintypes.error as exc:
_, _, msg = exc.args
error = "Failed to set the windows code page: {}".format(msg)
if raise_error:
raise CodePageError(error)
else:
log.error(error)
return -1
| 25.608108
| 88
| 0.61715
| 470
| 3,790
| 4.806383
| 0.231915
| 0.061089
| 0.053121
| 0.048694
| 0.656043
| 0.604692
| 0.588756
| 0.533865
| 0.533865
| 0.533865
| 0
| 0.007675
| 0.312401
| 3,790
| 147
| 89
| 25.782313
| 0.859171
| 0.341953
| 0
| 0.565217
| 0
| 0
| 0.087803
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057971
| false
| 0
| 0.086957
| 0
| 0.26087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f4d57d728b00fc588f9af5da19650e009e95339
| 827
|
py
|
Python
|
application/server.py
|
comov/fucked-up_schedule
|
3e6a2972f46686829b655798cd641cd82559db24
|
[
"MIT"
] | null | null | null |
application/server.py
|
comov/fucked-up_schedule
|
3e6a2972f46686829b655798cd641cd82559db24
|
[
"MIT"
] | null | null | null |
application/server.py
|
comov/fucked-up_schedule
|
3e6a2972f46686829b655798cd641cd82559db24
|
[
"MIT"
] | null | null | null |
from flask import Flask, render_template
from application.settings import STATIC
from application.storage import storage
app = Flask(__name__, static_url_path=STATIC)
@app.route('/')
def hello_world():
storage_dataset = storage.load_data()
labels = []
datasets = {}
for label, dataset in storage_dataset.items():
labels.append(label)
for name, data in dataset.items():
d = datasets.get(name) or None
if d is None:
d = data
d['data'] = [d['value']]
d['name'] = name
else:
d['data'].append(data['value'])
datasets[name] = d
return render_template('index.html', **{
'country': 'Kyrgyzstan',
'labels': labels,
'dataset': list(datasets.values()),
})
| 25.84375
| 50
| 0.562273
| 92
| 827
| 4.923913
| 0.456522
| 0.033113
| 0.02649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.308343
| 827
| 31
| 51
| 26.677419
| 0.791958
| 0
| 0
| 0
| 0
| 0
| 0.076179
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.12
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f4e64d9de5293438f0fe185689a4d11efc8c4c9
| 1,857
|
py
|
Python
|
cli_fun/commands/fun.py
|
e4r7hbug/cli-fun
|
43f9a1bf788745783a24f315d80ceb969ff853e4
|
[
"MIT"
] | null | null | null |
cli_fun/commands/fun.py
|
e4r7hbug/cli-fun
|
43f9a1bf788745783a24f315d80ceb969ff853e4
|
[
"MIT"
] | null | null | null |
cli_fun/commands/fun.py
|
e4r7hbug/cli-fun
|
43f9a1bf788745783a24f315d80ceb969ff853e4
|
[
"MIT"
] | null | null | null |
"""Fun section of CLI command."""
import json
import logging
import time
from pprint import pformat, pprint
import click
from fabric.colors import red
@click.group()
def cli():
"""My fun program!"""
pass
@cli.command()
def progress():
"""Sample progress bar."""
i = range(0, 200)
logging.debug('%s -> %s', i[0], i[-1])
with click.progressbar(i, width=0, fill_char=red('#')) as items:
for _ in items:
time.sleep(.01)
@cli.command('open')
def fun_open():
"""Trying out click.launch."""
sites = {
'Google': 'https://google.com',
'The Verge': 'https://theverge.com',
'Liliputing': 'https://liliputing.com'
}
sites_keys = sites.keys()
for index, site in enumerate(sites_keys):
click.echo('%i %s' % (index, site))
choice = click.prompt('Which site to open?', default=0, type=int)
click.launch(sites[sites_keys[choice]])
@cli.command()
def party():
"""Get this party started!"""
for i in range(10):
click.echo('Wub wub wub')
logging.debug(i)
@cli.command('to')
@click.option('-d', '--destination', prompt=True)
def fun_to(destination):
"""Connecting fun to stuffs!"""
click.echo('Apparently you are going to ' + destination)
@cli.command('max')
def fun_max():
"""Maximum levels achieved."""
click.echo('You found the highest peak!')
@cli.command()
def hop():
"""The hopping function."""
click.echo('Hop hop hop, \'til you just can stop!')
@cli.command()
def j():
"""Example JSON."""
test_object = {'this': 'that', 'up': 'down', 'sub': {'can': 'do'}}
print(json.dumps(test_object, indent=2))
pprint(test_object, indent=2)
print(pformat(test_object, indent=2))
print(pformat(test_object, indent=2, depth=1))
print(test_object.items())
print(test_object.values())
| 22.925926
| 70
| 0.611739
| 253
| 1,857
| 4.43083
| 0.438735
| 0.071365
| 0.046387
| 0.06066
| 0.066905
| 0.066905
| 0.066905
| 0.066905
| 0.066905
| 0.066905
| 0
| 0.01151
| 0.204631
| 1,857
| 80
| 71
| 23.2125
| 0.747461
| 0.107701
| 0
| 0.076923
| 0
| 0
| 0.151459
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0.019231
| 0.115385
| 0
| 0.269231
| 0.134615
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f4ee2585931ea1270d6eb83cfe79d8eaf1f4d33
| 1,851
|
py
|
Python
|
tests/algorithms/descriptor_generator/test_colordescriptor.py
|
joshanderson-kw/SMQTK
|
594e7c733fe7f4e514a1a08a7343293a883a41fc
|
[
"BSD-3-Clause"
] | 82
|
2015-01-07T15:33:29.000Z
|
2021-08-11T18:34:05.000Z
|
tests/algorithms/descriptor_generator/test_colordescriptor.py
|
joshanderson-kw/SMQTK
|
594e7c733fe7f4e514a1a08a7343293a883a41fc
|
[
"BSD-3-Clause"
] | 230
|
2015-04-08T14:36:51.000Z
|
2022-03-14T17:55:30.000Z
|
tests/algorithms/descriptor_generator/test_colordescriptor.py
|
joshanderson-kw/SMQTK
|
594e7c733fe7f4e514a1a08a7343293a883a41fc
|
[
"BSD-3-Clause"
] | 65
|
2015-01-04T15:00:16.000Z
|
2021-11-19T18:09:11.000Z
|
import unittest
import unittest.mock as mock
import pytest
from smqtk.algorithms.descriptor_generator import DescriptorGenerator
from smqtk.algorithms.descriptor_generator.colordescriptor.colordescriptor \
import ColorDescriptor_Image_csift # arbitrary leaf class
from smqtk.utils.configuration import configuration_test_helper
@pytest.mark.skipif(not ColorDescriptor_Image_csift.is_usable(),
reason="ColorDescriptor generator is not currently usable")
class TestColorDescriptor (unittest.TestCase):
def test_impl_findable(self):
self.assertIn(ColorDescriptor_Image_csift.__name__,
DescriptorGenerator.get_impls())
@mock.patch('smqtk.algorithms.descriptor_generator'
'.colordescriptor.colordescriptor.safe_create_dir')
def test_configuration(self, _mock_scd):
i = ColorDescriptor_Image_csift(
model_directory='test model dir',
work_directory='test work dir',
model_gen_descriptor_limit=123764,
kmeans_k=42, flann_distance_metric='hik',
flann_target_precision=0.92, flann_sample_fraction=0.71,
flann_autotune=True, random_seed=7, use_spatial_pyramid=True,
parallel=3,
)
for inst in configuration_test_helper(i):
assert inst._model_dir == 'test model dir'
assert inst._work_dir == 'test work dir'
assert inst._model_gen_descriptor_limit == 123764
assert inst._kmeans_k == 42
assert inst._flann_distance_metric == 'hik'
assert inst._flann_target_precision == 0.92
assert inst._flann_sample_fraction == 0.71
assert inst._flann_autotune is True
assert inst._rand_seed == 7
assert inst._use_sp is True
assert inst.parallel == 3
| 42.068182
| 79
| 0.690438
| 211
| 1,851
| 5.729858
| 0.379147
| 0.090984
| 0.082713
| 0.084367
| 0.263027
| 0.105873
| 0
| 0
| 0
| 0
| 0
| 0.02276
| 0.240411
| 1,851
| 43
| 80
| 43.046512
| 0.837127
| 0.010805
| 0
| 0
| 0
| 0
| 0.106069
| 0.046473
| 0
| 0
| 0
| 0
| 0.324324
| 1
| 0.054054
| false
| 0
| 0.162162
| 0
| 0.243243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f59a50ee0f4047fe095b3e0f94aa7691fc20820
| 2,139
|
py
|
Python
|
tests/server/datasets/test_dao.py
|
davidkartchner/rubrix
|
33faa006d7498a806a9fd594036d4a42c7d70da2
|
[
"Apache-2.0"
] | 1
|
2022-01-06T09:05:06.000Z
|
2022-01-06T09:05:06.000Z
|
tests/server/datasets/test_dao.py
|
davidkartchner/rubrix
|
33faa006d7498a806a9fd594036d4a42c7d70da2
|
[
"Apache-2.0"
] | null | null | null |
tests/server/datasets/test_dao.py
|
davidkartchner/rubrix
|
33faa006d7498a806a9fd594036d4a42c7d70da2
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from rubrix.server.commons.errors import ClosedDatasetError
from rubrix.server.commons.es_wrapper import create_es_wrapper
from rubrix.server.datasets.dao import DatasetsDAO
from rubrix.server.datasets.model import DatasetDB
from rubrix.server.tasks.commons import TaskType
from rubrix.server.tasks.commons.dao.dao import dataset_records_dao
from rubrix.server.tasks.text_classification.dao.es_config import (
text_classification_mappings,
)
es_wrapper = create_es_wrapper()
records = dataset_records_dao(es_wrapper)
records.register_task_mappings(
TaskType.text_classification, text_classification_mappings()
)
dao = DatasetsDAO.get_instance(es_wrapper, records)
def test_retrieve_ownered_dataset_for_no_owner_user():
dataset = "test_retrieve_ownered_dataset_for_no_owner_user"
created = dao.create_dataset(
DatasetDB(name=dataset, owner="other", task=TaskType.text_classification)
)
assert dao.find_by_name(created.name, owner=created.owner) == created
assert dao.find_by_name(created.name, owner=None) == created
assert dao.find_by_name(created.name, owner="me") is None
def test_close_dataset():
dataset = "test_close_dataset"
created = dao.create_dataset(
DatasetDB(name=dataset, owner="other", task=TaskType.text_classification)
)
dao.close(created)
with pytest.raises(ClosedDatasetError, match=dataset):
records.search_records(dataset=created)
dao.open(created)
records.search_records(dataset=created)
| 36.254237
| 81
| 0.777466
| 295
| 2,139
| 5.461017
| 0.39322
| 0.043451
| 0.069522
| 0.039106
| 0.303538
| 0.226567
| 0.226567
| 0.226567
| 0.155183
| 0.103042
| 0
| 0.004894
| 0.140252
| 2,139
| 58
| 82
| 36.87931
| 0.871126
| 0.276765
| 0
| 0.176471
| 0
| 0
| 0.050294
| 0.030699
| 0
| 0
| 0
| 0
| 0.088235
| 1
| 0.058824
| false
| 0
| 0.235294
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f5cb793e2e748f1c572ea256bcf2c1a860ee543
| 2,344
|
py
|
Python
|
blinpy/tests/test_models.py
|
solbes/blinpy
|
89b4f26066c383fc07ca6b1cbfdc8a61397f3f08
|
[
"MIT"
] | 3
|
2021-02-11T14:00:08.000Z
|
2021-10-13T20:41:21.000Z
|
blinpy/tests/test_models.py
|
solbes/blinpy
|
89b4f26066c383fc07ca6b1cbfdc8a61397f3f08
|
[
"MIT"
] | null | null | null |
blinpy/tests/test_models.py
|
solbes/blinpy
|
89b4f26066c383fc07ca6b1cbfdc8a61397f3f08
|
[
"MIT"
] | null | null | null |
import pytest
import pandas as pd
import numpy as np
from blinpy import models
data = pd.DataFrame(
{'x': np.array(
[0.0, 1.0, 1.0, 2.0, 1.8, 3.0, 4.0, 5.2, 6.5, 8.0, 10.0]),
'y': np.array([5.0, 5.0, 5.1, 5.3, 5.5, 5.7, 6.0, 6.3, 6.7, 7.1, 7.5])}
)
def test_linear_model():
# 1) linear model, no priors
lm = models.LinearModel(
output_col='y',
input_cols=['x'],
bias=True,
theta_names=['th1'],
).fit(data)
np.testing.assert_allclose(
np.array([4.883977, 0.270029]),
lm.post_mu,
rtol=1e-5
)
# 2) partial prior
lm = models.LinearModel(
output_col='y',
input_cols=['x'],
bias=True,
theta_names=['th1'],
pri_cols=['th1']
).fit(data, pri_mu=[0.35], pri_cov=0.001)
np.testing.assert_allclose(
np.array([4.603935457929664, 0.34251082265349875]),
lm.post_mu,
rtol=1e-5
)
# prior for both parameters
lm = models.LinearModel(
output_col='y',
input_cols=['x'],
bias=True,
theta_names=['th1'],
).fit(data, pri_mu=[4.0, 0.35], pri_cov=[1.0, 0.001])
np.testing.assert_allclose(
np.array([4.546825637808106, 0.34442570226594676]),
lm.post_mu,
rtol=1e-5
)
def test_gam_line_fit():
# 1) line fit, no priors
gam_specs = [{
'fun': lambda df: df['x'].values[:, np.newaxis],
'name': 'slope'
},
{
'fun': lambda df: np.ones((len(df),1)),
'name': 'bias'
}
]
post_mu = models.GamModel('y', gam_specs).fit(data).post_mu
np.testing.assert_allclose(
np.array([0.270029, 4.883977]),
post_mu,
rtol=1e-5
)
# 2) partial prior
gam_specs = [{
'fun': lambda df: df['x'].values[:, np.newaxis],
'name': 'slope',
'prior': {
'B': np.eye(1),
'mu': np.array([0.35]),
'cov': np.array([0.001])
}
},
{
'fun': lambda df: np.ones((len(df), 1)),
'name': 'bias'
}
]
post_mu = models.GamModel('y', gam_specs).fit(data).post_mu
np.testing.assert_allclose(
np.array([0.34251082265349875, 4.603935457929664]),
post_mu,
rtol=1e-5
)
| 22.980392
| 80
| 0.50128
| 326
| 2,344
| 3.490798
| 0.245399
| 0.05536
| 0.035149
| 0.101054
| 0.625659
| 0.596661
| 0.568541
| 0.541301
| 0.495606
| 0.434095
| 0
| 0.139169
| 0.322526
| 2,344
| 101
| 81
| 23.207921
| 0.577456
| 0.046502
| 0
| 0.5
| 0
| 0
| 0.036355
| 0
| 0
| 0
| 0
| 0
| 0.0625
| 1
| 0.025
| false
| 0
| 0.05
| 0
| 0.075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f5e89412b184aa3f2abac3805b9bf927e055845
| 204
|
py
|
Python
|
valid.py
|
whitereaper25/test_2
|
47212fc977bcd36e8879ada22f319691073accb1
|
[
"Apache-2.0"
] | null | null | null |
valid.py
|
whitereaper25/test_2
|
47212fc977bcd36e8879ada22f319691073accb1
|
[
"Apache-2.0"
] | null | null | null |
valid.py
|
whitereaper25/test_2
|
47212fc977bcd36e8879ada22f319691073accb1
|
[
"Apache-2.0"
] | null | null | null |
import re
def verify(phn_no):
design = "[789]\d{9}$"
if re.match(design,phn_no):
return "yes"
else:
return "No"
n = int(input())
for i in range(n):
print(verify(input()))
| 18.545455
| 31
| 0.553922
| 32
| 204
| 3.46875
| 0.71875
| 0.09009
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026846
| 0.269608
| 204
| 11
| 32
| 18.545455
| 0.718121
| 0
| 0
| 0
| 0
| 0
| 0.078049
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.4
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f6096fad4d8b4fcb9ab49eab731fdc3465207c6
| 1,632
|
py
|
Python
|
MAPLEAF/Rocket/sampleStatefulRocketComponent.py
|
henrystoldt/MAPLEAF
|
af970d3e8200832f5e70d537b15ad38dd74fa551
|
[
"MIT"
] | 15
|
2020-09-11T19:25:07.000Z
|
2022-03-12T16:34:53.000Z
|
MAPLEAF/Rocket/sampleStatefulRocketComponent.py
|
henrystoldt/MAPLEAF
|
af970d3e8200832f5e70d537b15ad38dd74fa551
|
[
"MIT"
] | null | null | null |
MAPLEAF/Rocket/sampleStatefulRocketComponent.py
|
henrystoldt/MAPLEAF
|
af970d3e8200832f5e70d537b15ad38dd74fa551
|
[
"MIT"
] | 3
|
2021-12-24T19:39:53.000Z
|
2022-03-29T01:06:28.000Z
|
from MAPLEAF.Motion import ForceMomentSystem, Inertia, Vector
from MAPLEAF.Rocket import RocketComponent
__all__ = [ "SampleStatefulComponent" ]
class SampleStatefulComponent(RocketComponent):
def __init__(self, componentDictReader, rocket, stage):
self.rocket = rocket
self.stage = stage
self.name = componentDictReader.getDictName()
def getExtraParametersToIntegrate(self):
# Examples below for a single parameter to be integrated, can put as many as required in these lists
paramNames = [ "tankLevel" ]
initValues = [ 1.0 ]
derivativeFunctions = [ self.getTankLevelDerivative ]
return paramNames, initValues, derivativeFunctions
def getTankLevelDerivative(self, time, rocketState):
return -2*rocketState.tankLevel # tankLevel will asymptotically approach 0
def getAppliedForce(self, rocketState, time, envConditions, rocketCG):
mag = -2000*self.getTankLevelDerivative(time, rocketState) # Force magnitude proportional to flow rate out of the tank
forceVector = Vector(0, 0, mag)
self.rocket.appendToForceLogLine(" {:>6.4f}".format(mag)) # This will end up in the log file, in the SampleZForce column
return ForceMomentSystem(forceVector)
def getInertia(self, time, rocketState):
mass = 5 + rocketState.tankLevel*4.56 # Fixed Mass + fluid mass
MOI = Vector(mass, mass, mass*0.05) # Related to current mass
CGz = -3 + rocketState.tankLevel # Moves depending on current tank level
CG = Vector(0, 0, CGz)
return Inertia(MOI, CG, mass)
| 42.947368
| 128
| 0.694853
| 176
| 1,632
| 6.397727
| 0.539773
| 0.039964
| 0.033748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017433
| 0.226716
| 1,632
| 38
| 129
| 42.947368
| 0.874802
| 0.210784
| 0
| 0
| 0
| 0
| 0.032006
| 0.017955
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192308
| false
| 0
| 0.076923
| 0.038462
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f610ddfbb4015ca897145b09e2fa1a4b5263289
| 866
|
py
|
Python
|
Array/Final450/Sort_Array_Of_0s_1s_2s.py
|
prash-kr-meena/GoogleR
|
27aca71e51cc2442e604e07ab00406a98d8d63a4
|
[
"Apache-2.0"
] | null | null | null |
Array/Final450/Sort_Array_Of_0s_1s_2s.py
|
prash-kr-meena/GoogleR
|
27aca71e51cc2442e604e07ab00406a98d8d63a4
|
[
"Apache-2.0"
] | null | null | null |
Array/Final450/Sort_Array_Of_0s_1s_2s.py
|
prash-kr-meena/GoogleR
|
27aca71e51cc2442e604e07ab00406a98d8d63a4
|
[
"Apache-2.0"
] | null | null | null |
from Utils.Array import input_array
ZERO, ONE, TWO = 0, 1, 2
# Time -> O(n)
# Space -> O(1) inplace
def sort_by_counting(A):
cnt_0 = cnt_1 = cnt_2 = 0
# Count the number of 0s, 1s and 2s in the array
for num in A:
if num == ZERO:
cnt_0 += 1
elif num == ONE:
cnt_1 += 1
elif num == TWO:
cnt_2 += 1
# Update the array
i = 0
# Store all the 0s in the beginning
while cnt_0 > 0:
A[i] = 0
i += 1
cnt_0 -= 1
# Then all the 1s
while cnt_1 > 0:
A[i] = 1
i += 1
cnt_1 -= 1
# Finally all the 2s
while cnt_2 > 0:
A[i] = 2
i += 1
cnt_2 -= 1
if __name__ == "__main__":
A = input_array()
sort_by_counting(A)
print(A)
"""
2 1 0 1 2 0 0 0 1 2 2 2 1 1
1 1 1 1
2 1 0 2 1 0
2 1 0
"""
| 16.339623
| 52
| 0.469977
| 159
| 866
| 2.396226
| 0.289308
| 0.036745
| 0.031496
| 0.031496
| 0.03937
| 0.023622
| 0
| 0
| 0
| 0
| 0
| 0.132797
| 0.426097
| 866
| 52
| 53
| 16.653846
| 0.633803
| 0.19515
| 0
| 0.107143
| 0
| 0
| 0.012739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.035714
| 0
| 0.071429
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f61d9c0592b835198eb2ed4703fc9cefded5f37
| 1,911
|
py
|
Python
|
miradar_node/scripts/ppi_visualizer.py
|
QibiTechInc/miradar_ros1_pkgs
|
65b339147c2a1a990696d77e75b58f5fba84dc22
|
[
"Apache-2.0"
] | null | null | null |
miradar_node/scripts/ppi_visualizer.py
|
QibiTechInc/miradar_ros1_pkgs
|
65b339147c2a1a990696d77e75b58f5fba84dc22
|
[
"Apache-2.0"
] | null | null | null |
miradar_node/scripts/ppi_visualizer.py
|
QibiTechInc/miradar_ros1_pkgs
|
65b339147c2a1a990696d77e75b58f5fba84dc22
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import rospy
from miradar_node.msg import PPI, PPIData
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Point
import dynamic_reconfigure.client
class PPIVisualizer:
def __init__(self):
self.pub = rospy.Publisher("/miradar/markers", MarkerArray, queue_size=20)
self.sub = rospy.Subscriber("/miradar/ppidata", PPIData, self.visualizePPI)
def visualizePPI(self, data):
markerArraydel = MarkerArray()
marker = Marker()
marker.header.frame_id = "miradar"
marker.action = marker.DELETEALL
markerArraydel.markers.append(marker)
self.pub.publish(markerArraydel)
cli = dynamic_reconfigure.client.Client("miradar_node")
dynparam = cli.get_configuration()
markerArray = MarkerArray()
mindb = dynparam["min_dB"]
maxdb = dynparam["max_dB"]
for i in range(len(data.data)):
marker = Marker()
marker.header.frame_id = "miradar"
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale.x = 0.2
marker.scale.y = 0.2
marker.scale.z = 0.2
marker.color.a = 1.0
a = 1.0/(float(maxdb) - float(mindb))
b = - (float(mindb)/(float(maxdb) - float(mindb)))
print("a : {0}, b : {1}".format(a, b))
marker.color.r = data.data[i].db * a + b
marker.color.b = 1.0 - marker.color.r
marker.color.g = 0.0
marker.pose.orientation.w = 1.0
marker.pose.position = data.data[i].position
marker.id = i
markerArray.markers.append(marker)
self.pub.publish(markerArray)
if __name__ == "__main__":
rospy.init_node("ppi_visualizer")
ppiVisualizer = PPIVisualizer()
rospy.spin()
| 32.948276
| 83
| 0.591837
| 222
| 1,911
| 4.977477
| 0.382883
| 0.049774
| 0.021719
| 0.043439
| 0.139367
| 0.139367
| 0.079638
| 0.079638
| 0
| 0
| 0
| 0.015521
| 0.291994
| 1,911
| 57
| 84
| 33.526316
| 0.801183
| 0.010989
| 0
| 0.088889
| 0
| 0
| 0.057173
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.111111
| 0
| 0.177778
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f62350af98cfe5e5bc543e35cb2ce81345228a2
| 3,561
|
py
|
Python
|
app/dashboard.py
|
nidheesh6/earlyearthquake
|
d0ab976629f126206afcd3dc15a76c66992f8a9e
|
[
"Apache-2.0"
] | null | null | null |
app/dashboard.py
|
nidheesh6/earlyearthquake
|
d0ab976629f126206afcd3dc15a76c66992f8a9e
|
[
"Apache-2.0"
] | null | null | null |
app/dashboard.py
|
nidheesh6/earlyearthquake
|
d0ab976629f126206afcd3dc15a76c66992f8a9e
|
[
"Apache-2.0"
] | null | null | null |
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import psycopg2
import json
import pandas as pd
import time
app = dash.Dash(__name__)
#app.css.config.serve_locally=False
#app.css.append_css(
# {'external_url': 'https://codepen.io/amyoshino/pen/jzXypZ.css'})
conn = psycopg2.connect(host='ec2-18-232-24-132.compute-1.amazonaws.com',database='earthquake', user='postgres', password='********')
cur = conn.cursor()
location = pd.read_csv("data_file.csv")
location=location.astype(str)
app.layout = html.Div([
html.Div([
html.Div([
dcc.Graph(id='graph', style={'margin-top': '20'})], className="six columns"),
html.Div([
dcc.Graph(
id='bar-graph'
)
], className='twelve columns'
),
dcc.Interval(
id='interval-component',
interval=5*1000, # in milliseconds
n_intervals=0)
], className="row")
], className="ten columns offset-by-one")
@app.callback(Output('graph', 'figure'), [Input('interval-component', 'n_intervals')])
def update_map(n):
"""
Args n: int
:rtype: dict
"""
try:
latest_reading = "select * from ereadings limit 90;"
df_map = pd.read_sql(latest_reading, conn)
map_data = df_map.merge(location, how='left', left_on=["device_id", "country_code"], right_on=["device_id","country_code"])
clrred = 'rgb(222,0,0)'
clrgrn = 'rgb(0,222,0)'
def SetColor(gal):
if gal >= .17:
return clrred
else:
return clrgrn
layout = {
'autosize': True,
'height': 500,
'font': dict(color="#191A1A"),
'titlefont': dict(color="#191A1A", size='18'),
'margin': {
'l': 35,
'r': 35,
'b': 35,
't': 45
},
'hovermode': "closest",
'plot_bgcolor': '#fffcfc',
'paper_bgcolor': '#fffcfc',
'showlegend': False,
'legend': dict(font=dict(size=10), orientation='h', x=0, y=1),
'name': map_data['country_code'],
'title': 'earthquake activity for the last 3 seconds',
'mapbox': {
'accesstoken':'*********************************',
'center': {
'lon':-98.49,
'lat':18.29
},
'zoom': 5,
'style': "dark"
}
}
return {
"data": [{
"type": "scattermapbox",
"lat": list(location['latitude']),
"lon": list(location['longitude']),
"hoverinfo": "text",
"hovertext": [["sensor_id: {} <br>country_code: {} <br>gal: {} <br>x: {} <br>y: {}".format(i, j, k, l, m)]
for i, j, k, l, m in zip(location['device_id'],location['country_code'].tolist(),map_data['gal'].tolist(),map_data['avg_x'].tolist(), map_data['avg_y'].tolist())],
"mode": "markers",
"marker": {
"size": 10,
"opacity": 1,
"color": list(map(SetColor, map_data['gal']))
}
}],
"layout": layout
}
except Exception as e:
print("Error: Couldn't update map")
print(e)
if __name__ == '__main__':
app.run_server(debug=False)
| 30.965217
| 193
| 0.48975
| 380
| 3,561
| 4.455263
| 0.521053
| 0.024808
| 0.023036
| 0.016539
| 0.050797
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031436
| 0.33895
| 3,561
| 114
| 194
| 31.236842
| 0.687766
| 0.045493
| 0
| 0.032609
| 0
| 0.01087
| 0.244964
| 0.021919
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021739
| false
| 0.01087
| 0.086957
| 0
| 0.141304
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f6638f61b3058472b08244c7bbaf61f509b9975
| 4,525
|
py
|
Python
|
scripts/main_experiment.py
|
wsavran/relm_pycsep_reproducibility
|
29294dc37627e74b4fcc4d05add1efc5950ded82
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/main_experiment.py
|
wsavran/relm_pycsep_reproducibility
|
29294dc37627e74b4fcc4d05add1efc5950ded82
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/main_experiment.py
|
wsavran/relm_pycsep_reproducibility
|
29294dc37627e74b4fcc4d05add1efc5950ded82
|
[
"BSD-3-Clause"
] | null | null | null |
# imports
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as pyplot
# pycsep imports
import csep
from csep.utils import stats, plots
# experiment imports
from experiment_utilities import (
load_zechar_catalog,
plot_consistency_test_comparison,
read_zechar_csv_to_dict
)
from experiment_config import config
# runtime flags
show_target_event_rates = True
plot = False
compute_evaluations = True
# catalog from manuscript
catalog = csep.load_catalog('./data/evaluation_catalog_zechar2013_merge.txt', loader=load_zechar_catalog)
evaluation_results = defaultdict(list)
# load results from zechar
zechar_dict = read_zechar_csv_to_dict('./data/consistency_quantile_scores_from_zechar.csv')
# main evaluation loop
for name, path in config['forecasts'].items():
# load forecast
fore = csep.load_gridded_forecast(
config['forecasts'][name],
start_date=config['start_date'],
end_date=config['end_date'],
name=name
)
# assign region of forecast to catalog
catalog.region = fore.region
cat_filt = catalog.filter_spatial(in_place=False)
# assign region to new catalog
cat_filt.region = fore.region
# compute likelihood and expected number of events
spatial_magnitude_counts = cat_filt.spatial_magnitude_counts()
ll = stats.poisson_log_likelihood(spatial_magnitude_counts, fore.data).sum()
# print summary statistics
print(f"{name}\n==========================")
print(f"Nfore: {fore.sum()}\nNobs: {cat_filt.event_count}\nLL/Nobs: {ll / cat_filt.event_count}")
print("")
if show_target_event_rates:
print("Target event rates")
for lon, lat, mag in zip(cat_filt.get_longitudes(), cat_filt.get_latitudes(), cat_filt.get_magnitudes()):
try:
rate = fore.get_rates([lon], [lat], [mag])
print(lon, lat, mag, rate[0])
except ValueError:
print(lon, lat, mag, "ERROR")
print("")
# n-test
if compute_evaluations:
n_test_result = csep.poisson_evaluations.number_test(
fore,
cat_filt
)
evaluation_results['n-test'].append(n_test_result)
print(f"N-test result: {n_test_result.quantile}")
# m-test
m_test_result = csep.poisson_evaluations.magnitude_test(
fore,
cat_filt,
num_simulations=config['nsims'],
seed=config['seed']
)
evaluation_results['m-test'].append(m_test_result)
print(f"M-test result: {m_test_result.quantile}")
# s-test
s_test_result = csep.poisson_evaluations.spatial_test(
fore,
cat_filt,
num_simulations=config['nsims'],
seed=config['seed'],
)
evaluation_results['s-test'].append(s_test_result)
print(f"S-test result: {s_test_result.quantile}")
# l-test
l_test_result = csep.poisson_evaluations.likelihood_test(
fore,
cat_filt,
num_simulations=config['nsims'],
seed=config['seed'],
)
evaluation_results['l-test'].append(l_test_result)
print(f"L-test result: {l_test_result.quantile}")
print("")
# plot and save results
ax = plot_consistency_test_comparison(evaluation_results, zechar_dict)
ax.get_figure().savefig('./output/pycsep_zechar_comparison.pdf')
# visualizations
if plot:
ax = plots.plot_poisson_consistency_test(
evaluation_results['n-test'],
plot_args={'xlabel': 'Observed earthquakes'}
)
ax.set_xlim([0,100])
ax.get_figure().savefig('./output/number_test_pycsep.pdf')
ax = plots.plot_poisson_consistency_test(
evaluation_results['l-test'],
plot_args={'xlabel': 'log-likelihood'},
one_sided_lower=True
)
ax.set_xlim([-600,0])
ax.get_figure().savefig('./output/likelihood_test_pycsep.pdf')
ax = plots.plot_poisson_consistency_test(
evaluation_results['s-test'],
plot_args={'xlabel': 'log-likelihood'},
one_sided_lower=True
)
ax.set_xlim([-220, -100])
ax.get_figure().savefig('./output/spatial_test_pycsep.pdf')
ax = plots.plot_poisson_consistency_test(
evaluation_results['m-test'],
plot_args={'xlabel': 'log-likelihood'},
one_sided_lower=True
)
ax.set_xlim([-35, -10])
ax.get_figure().savefig('./output/magnitude_test_pycsep.pdf')
| 31.206897
| 113
| 0.651271
| 556
| 4,525
| 5.016187
| 0.25
| 0.057368
| 0.01972
| 0.03227
| 0.328075
| 0.242739
| 0.223378
| 0.223378
| 0.20545
| 0.20545
| 0
| 0.006605
| 0.230497
| 4,525
| 144
| 114
| 31.423611
| 0.794371
| 0.076022
| 0
| 0.242718
| 0
| 0.009709
| 0.183085
| 0.107641
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.067961
| 0
| 0.067961
| 0.116505
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f677c0ab09b208476f962949003f247df030535
| 10,284
|
py
|
Python
|
kakuro.py
|
PanPapag/Kakuro
|
c2de75fff059fdb479c6c435205cf864bd057510
|
[
"MIT"
] | 5
|
2020-01-01T19:12:34.000Z
|
2020-05-16T08:57:08.000Z
|
kakuro.py
|
PanPapag/Kakuro
|
c2de75fff059fdb479c6c435205cf864bd057510
|
[
"MIT"
] | 1
|
2020-04-26T09:51:55.000Z
|
2020-04-26T10:41:25.000Z
|
kakuro.py
|
PanPapag/Kakuro
|
c2de75fff059fdb479c6c435205cf864bd057510
|
[
"MIT"
] | null | null | null |
import os
import re
import sys
import time
import puzzles
from csp import *
from search import *
from utils import *
class Kakuro(CSP):
def __init__(self, puzzle):
self.puzzle = puzzle
self.rows_size = len(puzzle)
self.cols_size = len(puzzle[0])
self.variables = self.get_variables()
self.domain = self.get_domain()
self.neighbors = self.get_neighbors()
self.sums = self.get_sums()
self.constraints = self.get_constraints
CSP.__init__(self, self.variables, self.domain, self.neighbors, self.constraints)
def get_variables(self):
variables = []
for i, row in enumerate(self.puzzle):
for j, cell in enumerate(row):
if cell == 'W':
variables.append('x' + '_' + str(i) + '_' + str(j))
return variables
def get_domain(self):
domain = {}
for variable in self.variables:
domain[variable] = []
for i in range(1,10):
domain[variable].append(i)
return domain
def get_neighbors(self):
neighbors = {}
for variable in self.variables:
neighbors[variable] = []
# Get row and col of current variable
row = int(re.search('_(.*)_', variable).group(1))
col = int(variable.rsplit('_', 1)[-1])
# Check same row for neighbors
for i in range(self.cols_size):
if i < col - 1 or i > col + 1:
continue
if isinstance(self.puzzle[row][i], str):
if self.puzzle[row][i] == 'W':
neighbor_variable = 'x' + '_' + str(row) + '_' + str(i)
if neighbor_variable in self.variables and neighbor_variable != variable:
neighbors[variable].append(neighbor_variable)
# Check same col for neighbors
for i in range(self.rows_size):
if i < row -1 or i > row + 1:
continue
if isinstance(self.puzzle[i][col], str):
if self.puzzle[i][col] == 'W':
neighbor_variable = 'x' + '_' + str(i) + '_' + str(col)
if neighbor_variable in self.variables and neighbor_variable != variable:
neighbors[variable].append(neighbor_variable)
return neighbors
def get_constraints(self, A, a, B, b):
# if two neighbors have the same value constraints are not satisfied
if a == b:
return False
# store assignments that have been made so far
assignment = self.infer_assignment()
# In this step check if a is equal to any other A's neighbor variable assigned value. In this case
# the constraints are not being satisfied
for var in self.neighbors[A]:
if var in assignment:
if assignment[var] == a:
return False
# Similarly to B
for var in self.neighbors[B]:
if var in assignment:
if assignment[var] == b:
return False
# Check if neighbors A and B satisfy their common constraints
for sum in self.sums:
if (A in sum[1]) and (B in sum[1]):
sum_of_neighbors = 0
assigned_neighbors = 0
for var in sum[1]:
if var in assignment:
if (var != A) and (var != B):
sum_of_neighbors += assignment[var]
assigned_neighbors += 1
sum_of_neighbors += a + b
assigned_neighbors += 2
if (len(sum[1]) > assigned_neighbors) and (sum_of_neighbors >= sum[0]):
return False
if (len(sum[1]) == assigned_neighbors) and (sum_of_neighbors != sum[0]):
return False
# Check if A's constraints are being satisfied
for sum in self.sums:
if (A in sum[1]) and (B not in sum[1]):
sum_of_neighbors = 0
assigned_neighbors = 0
for variable in sum[1]:
if variable in assignment:
if variable != A:
sum_of_neighbors += assignment[variable]
assigned_neighbors += 1
sum_of_neighbors += a
assigned_neighbors += 1
if (len(sum[1]) > assigned_neighbors) and (sum_of_neighbors >= sum[0]):
return False
if (len(sum[1]) == assigned_neighbors) and (sum_of_neighbors != sum[0]):
return False
# Check if B's constraints are being satisfied
for sum in self.sums:
if (A not in sum[1]) and (B in sum[1]):
sum_of_neighbors = 0
assigned_neighbors = 0
for variable in sum[1]:
if variable in assignment:
if variable != B:
sum_of_neighbors += assignment[variable]
assigned_neighbors += 1
sum_of_neighbors += b
assigned_neighbors += 1
if (len(sum[1]) > assigned_neighbors) and (sum_of_neighbors >= sum[0]):
return False
if (len(sum[1]) == assigned_neighbors) and (sum_of_neighbors != sum[0]):
return False
# Everthing ok, constraints are being satisfied so return True
return True
def get_sums(self):
sums = []
for i, row in enumerate(self.puzzle):
for j, cell in enumerate(row):
if (cell != 'W' and cell != 'B'):
# down - column
if (cell[0] != ''):
x = []
for k in range(i + 1, self.rows_size):
if (self.puzzle[k][j] != 'W'):
break
x.append('x' + '_' + str(k) + '_' + str(j))
sums.append((cell[0], x))
# right - row
if (cell[1] != ''):
x = []
for k in range(j + 1, len(self.puzzle[i])):
if (self.puzzle[i][k] != 'W'):
break
x.append('x' + '_' + str(i) + '_' + str(k))
sums.append((cell[1], x))
return sums
def BT(self):
start = time.time()
result = backtracking_search(self)
end = time.time()
return (result, end - start)
def BT_MRV(self):
start = time.time()
result = backtracking_search(self, select_unassigned_variable=mrv)
end = time.time()
return (result, end - start)
def FC(self):
start = time.time()
result = (backtracking_search(self, inference=forward_checking))
end = time.time()
return (result, end - start)
def FC_MRV(self):
start = time.time()
result = (backtracking_search(self, select_unassigned_variable=mrv, inference=forward_checking))
end = time.time()
return (result, end - start)
def MAC(self):
start = time.time()
result = (backtracking_search(self, select_unassigned_variable=mrv, inference=mac))
end = time.time()
return (result, end - start)
def display_grid(self, grid):
for i in range(self.rows_size):
for j in range(self.cols_size):
if isinstance(self.puzzle[i][j], list):
if grid[i][j][0] == '':
print('B\{}'.format(grid[i][j][1]).ljust(4), end='\t')
elif grid[i][j][1] == '':
print('{}\B'.format(grid[i][j][0]).ljust(4), end='\t')
else:
print('{}\{}'.format(grid[i][j][0], grid[i][j][1]).ljust(4), end='\t')
else:
print(grid[i][j].ljust(4), end='\t')
print()
def display_solution(self, grid, solution, time_elapsed, assigns):
if solution != None:
for variable in self.variables:
# Get row and col of current variable
row = int(re.search('_(.*)_', variable).group(1))
col = int(variable.rsplit('_', 1)[-1])
# Get value
value = solution[variable]
# Assign value of the variable to the grid
grid[row][col] = str(value)
# display assigned grid
self.display_grid(grid)
print("Number of assigns: {}".format(assigns))
print("Total time elapsed: {:.4f} seconds".format(time_elapsed))
else:
print("No solution found!")
if __name__ == "__main__":
# Get all puzzles from puzzle.py
kakuro_puzzles = []
for item in vars(puzzles).keys():
if not item.startswith("__"):
kakuro_puzzles.append((item,vars(puzzles)[item]))
for puzzle_name, puzzle in kakuro_puzzles:
print("\n----------------------------- {} Kakuro puzzle -----------------------------".format(puzzle_name))
kakuro = Kakuro(puzzle)
kakuro.display_grid(kakuro.puzzle)
# BT algorithm
print("\n> Solution using BT algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.BT(), kakuro.nassigns)
# BT + MRV algorithm
print("\n> Solution using BT and MRV algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.BT_MRV(), kakuro.nassigns)
# FC algorithm
print("\n> Solution using FC algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.FC(), kakuro.nassigns)
# FC + MRV algorithm
print("\n> Solution using FC and MRV algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.FC_MRV(), kakuro.nassigns)
# MAC algorithm
print("\n> Solution using MAC algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.MAC(), kakuro.nassigns)
# print an empty line for better output
print()
| 40.809524
| 115
| 0.50564
| 1,165
| 10,284
| 4.345064
| 0.13133
| 0.011853
| 0.041486
| 0.01778
| 0.593244
| 0.531411
| 0.469972
| 0.424338
| 0.365863
| 0.337021
| 0
| 0.010034
| 0.379813
| 10,284
| 251
| 116
| 40.972112
| 0.783631
| 0.082653
| 0
| 0.397059
| 0
| 0
| 0.041454
| 0.006378
| 0.009804
| 0
| 0
| 0
| 0
| 1
| 0.063725
| false
| 0
| 0.039216
| 0
| 0.20098
| 0.073529
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f6b87929186c7b4d57d3ad6750b0986257cf867
| 662
|
py
|
Python
|
list_prime.py
|
zm6/Python-Practice
|
c2080e1104cd7cee4af8ebc3e3f4941fc7466586
|
[
"MIT"
] | null | null | null |
list_prime.py
|
zm6/Python-Practice
|
c2080e1104cd7cee4af8ebc3e3f4941fc7466586
|
[
"MIT"
] | null | null | null |
list_prime.py
|
zm6/Python-Practice
|
c2080e1104cd7cee4af8ebc3e3f4941fc7466586
|
[
"MIT"
] | null | null | null |
#!/user/bin/env python
# -*- coding:utf-8 -*-
# 作者:zm6
# 创建:2021-03-19
# 更新:2021-03-19
# 用意:打印N以内的质数
import time # 比较代码运行时间
def list_prime(n):
num = 0
for i in range(2, n + 1):
is_prime = 1 #预设质数为是
for j in range(2, i - 1):
if i % j == 0:
is_prime = 0 #设置质数为否
break
if is_prime == 1:
print(i)
num = num + 1
return num
if __name__ == "__main__":
n = int(input("please enter the number:")) # 输入n值
start = time.time() # 开始计时
num = list_prime(n)
print(n, "以内质数个数为:", num)
end = time.time() # 结束计时
print(str(end - start))
| 16.55
| 54
| 0.493958
| 97
| 662
| 3.237113
| 0.556701
| 0.066879
| 0.050955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066351
| 0.362538
| 662
| 39
| 55
| 16.974359
| 0.677725
| 0.188822
| 0
| 0
| 0
| 0
| 0.076336
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.15
| 0.15
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f6cee267527184d028d64eb983074f84ea9f058
| 2,246
|
py
|
Python
|
foyer/tests/test_forcefield.py
|
rmatsum836/foyer
|
c150d6f4c34e9ca7c5e4012e4406fb4ebab588cb
|
[
"MIT"
] | 1
|
2020-11-08T23:51:29.000Z
|
2020-11-08T23:51:29.000Z
|
foyer/tests/test_forcefield.py
|
rmatsum836/foyer
|
c150d6f4c34e9ca7c5e4012e4406fb4ebab588cb
|
[
"MIT"
] | null | null | null |
foyer/tests/test_forcefield.py
|
rmatsum836/foyer
|
c150d6f4c34e9ca7c5e4012e4406fb4ebab588cb
|
[
"MIT"
] | null | null | null |
import glob
import os
from pkg_resources import resource_filename
import mbuild as mb
import parmed as pmd
import pytest
from foyer import Forcefield
from foyer.tests.utils import get_fn
FF_DIR = resource_filename('foyer', 'forcefields')
FORCEFIELDS = glob.glob(os.path.join(FF_DIR, '*.xml'))
def test_load_files():
for ff_file in FORCEFIELDS:
ff1 = Forcefield(forcefield_files=ff_file)
assert len(ff1._atomTypes) > 0
ff2 = Forcefield(forcefield_files=ff_file)
assert len(ff1._atomTypes) == len(ff2._atomTypes)
def test_duplicate_type_definitions():
with pytest.raises(ValueError):
ff4 = Forcefield(name='oplsaa', forcefield_files=FORCEFIELDS)
def test_from_parmed():
mol2 = pmd.load_file(get_fn('ethane.mol2'), structure=True)
oplsaa = Forcefield(name='oplsaa')
ethane = oplsaa.apply(mol2)
assert sum((1 for at in ethane.atoms if at.type == 'opls_135')) == 2
assert sum((1 for at in ethane.atoms if at.type == 'opls_140')) == 6
assert len(ethane.bonds) == 7
assert all(x.type for x in ethane.bonds)
assert len(ethane.angles) == 12
assert all(x.type for x in ethane.angles)
assert len(ethane.rb_torsions) == 9
assert all(x.type for x in ethane.dihedrals)
mol2 = pmd.load_file(get_fn('ethane.mol2'), structure=True)
mol2.box_vectors = [[2, 0, 0], [0, 2, 0], [0, 0, 2]]
oplsaa = Forcefield(name='oplsaa')
ethane = oplsaa.apply(mol2)
assert ethane.box_vectors == mol2.box_vectors
def test_from_mbuild():
mol2 = mb.load(get_fn('ethane.mol2'))
oplsaa = Forcefield(name='oplsaa')
ethane = oplsaa.apply(mol2)
assert sum((1 for at in ethane.atoms if at.type == 'opls_135')) == 2
assert sum((1 for at in ethane.atoms if at.type == 'opls_140')) == 6
assert len(ethane.bonds) == 7
assert all(x.type for x in ethane.bonds)
assert len(ethane.angles) == 12
assert all(x.type for x in ethane.angles)
assert len(ethane.rb_torsions) == 9
assert all(x.type for x in ethane.dihedrals)
def test_write_refs():
mol2 = mb.load(get_fn('ethane.mol2'))
oplsaa = Forcefield(name='oplsaa')
ethane = oplsaa.apply(mol2, references_file='ethane.bib')
assert os.path.isfile('ethane.bib')
| 31.194444
| 72
| 0.684328
| 344
| 2,246
| 4.34593
| 0.232558
| 0.053512
| 0.060201
| 0.056187
| 0.638796
| 0.632776
| 0.632776
| 0.632776
| 0.632776
| 0.527759
| 0
| 0.031798
| 0.18789
| 2,246
| 71
| 73
| 31.633803
| 0.787829
| 0
| 0
| 0.509434
| 0
| 0
| 0.06545
| 0
| 0
| 0
| 0
| 0
| 0.377358
| 1
| 0.09434
| false
| 0
| 0.150943
| 0
| 0.245283
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f705c1639774ae7481bbdfb1680d2106c872e2a
| 1,418
|
py
|
Python
|
app/pipelines/load_data/load_marketing_data/__init__.py
|
mediaimprove/mara-example-project-1
|
d1cab4cf079e78a4c0f73edac73200fac4112f34
|
[
"MIT"
] | 22
|
2020-10-07T21:32:07.000Z
|
2022-03-21T19:21:36.000Z
|
app/pipelines/load_data/load_marketing_data/__init__.py
|
mediaimprove/mara-example-project-1
|
d1cab4cf079e78a4c0f73edac73200fac4112f34
|
[
"MIT"
] | 4
|
2020-07-16T15:22:46.000Z
|
2020-10-28T15:18:32.000Z
|
app/pipelines/load_data/load_marketing_data/__init__.py
|
mediaimprove/mara-example-project-1
|
d1cab4cf079e78a4c0f73edac73200fac4112f34
|
[
"MIT"
] | 4
|
2020-10-08T10:30:04.000Z
|
2022-03-19T09:21:51.000Z
|
import pathlib
from mara_pipelines.commands.sql import ExecuteSQL, Copy
from mara_pipelines.pipelines import Pipeline, Task
from mara_pipelines import config
pipeline = Pipeline(
id="load_marketing_data",
description="Jobs related with loading marketing leads data from the backend database",
max_number_of_parallel_tasks=5,
base_path=pathlib.Path(__file__).parent,
labels={"Schema": "m_data"})
pipeline.add_initial(
Task(id="initialize_schemas", description="Recreates the marketing data schema",
commands=[
ExecuteSQL(sql_file_name='../recreate_marketing_data_schema.sql',
file_dependencies=[
pathlib.Path(__file__).parent.parent / 'recreate_marketing_data_schema.sql'])]))
tables = [
'closed_deal',
'marketing_qualified_lead'
]
for table in tables:
pipeline.add(
Task(id=f"load_{table}",
description=f'Loads the {table}s from the backend database',
commands=[
ExecuteSQL(sql_file_name=f'{table}/create_{table}_table.sql'),
Copy(sql_statement=f"""
SELECT *
FROM marketing.{table}s;
""",
source_db_alias='olist',
target_db_alias='dwh',
target_table=f'm_data.{table}',
delimiter_char=';')]
)
)
| 32.227273
| 108
| 0.61213
| 154
| 1,418
| 5.337662
| 0.441558
| 0.06326
| 0.062044
| 0.053528
| 0.143552
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000987
| 0.285614
| 1,418
| 43
| 109
| 32.976744
| 0.810464
| 0
| 0
| 0.055556
| 0
| 0
| 0.311707
| 0.089563
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f710ce3e46ffdc56061d382495ca8df6e25a15b
| 320
|
py
|
Python
|
apps/urls.py
|
cijianciqing/myWX_l_ningmo
|
df4c80554b0f3c58060352fc0d5fc6c649f805c8
|
[
"Apache-2.0"
] | null | null | null |
apps/urls.py
|
cijianciqing/myWX_l_ningmo
|
df4c80554b0f3c58060352fc0d5fc6c649f805c8
|
[
"Apache-2.0"
] | null | null | null |
apps/urls.py
|
cijianciqing/myWX_l_ningmo
|
df4c80554b0f3c58060352fc0d5fc6c649f805c8
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path,include
from .views import menu, image, weixinFile
urlpatterns = [
path('menu/list', menu.get_menu),
path('menu/user', menu.UserMenu.as_view()),
path('image', image.ImageView.as_view()),
path('saveWX', weixinFile.saveWX),
path('getRecentWX', weixinFile.getRecentWX),
]
| 26.666667
| 48
| 0.696875
| 40
| 320
| 5.5
| 0.5
| 0.072727
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14375
| 320
| 12
| 49
| 26.666667
| 0.80292
| 0
| 0
| 0
| 0
| 0
| 0.125392
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f7149167478c04bd0604548dfe0f8ebb31e11a2
| 1,178
|
py
|
Python
|
mayan/apps/django_gpg/links.py
|
garrans/mayan-edms
|
e95e90cc47447a1ae72629271652824aa9868572
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/django_gpg/links.py
|
garrans/mayan-edms
|
e95e90cc47447a1ae72629271652824aa9868572
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/django_gpg/links.py
|
garrans/mayan-edms
|
e95e90cc47447a1ae72629271652824aa9868572
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from navigation import Link
from .permissions import (
permission_key_delete, permission_key_receive, permission_key_view,
permission_keyserver_query
)
link_private_keys = Link(
icon='fa fa-key', permissions=(permission_key_view,),
text=_('Private keys'), view='django_gpg:key_private_list'
)
link_public_keys = Link(
icon='fa fa-key', permissions=(permission_key_view,),
text=_('Public keys'), view='django_gpg:key_public_list'
)
link_key_delete = Link(
permissions=(permission_key_delete,), tags='dangerous', text=_('Delete'),
view='django_gpg:key_delete', args=('object.fingerprint', 'object.type',)
)
link_key_query = Link(
permissions=(permission_keyserver_query,), text=_('Query keyservers'),
view='django_gpg:key_query'
)
link_key_receive = Link(
keep_query=True, permissions=(permission_key_receive,), text=_('Import'),
view='django_gpg:key_receive', args='object.key_id'
)
link_key_setup = Link(
icon='fa fa-key', permissions=(permission_key_view,),
text=_('Key management'), view='django_gpg:key_public_list'
)
| 31.837838
| 77
| 0.749576
| 155
| 1,178
| 5.296774
| 0.264516
| 0.126675
| 0.095006
| 0.116931
| 0.274056
| 0.244823
| 0.181486
| 0.181486
| 0.181486
| 0.181486
| 0
| 0
| 0.119694
| 1,178
| 36
| 78
| 32.722222
| 0.791707
| 0
| 0
| 0.096774
| 0
| 0
| 0.241935
| 0.103565
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.16129
| 0
| 0.16129
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f73b6858df8269c4f5f2480de3342f864156c6c
| 1,489
|
py
|
Python
|
rosalind/splc/splc.py
|
TecKnow/learning
|
71d1ddf9d580027ecc62a067581da378a9e85f6d
|
[
"BSD-3-Clause"
] | null | null | null |
rosalind/splc/splc.py
|
TecKnow/learning
|
71d1ddf9d580027ecc62a067581da378a9e85f6d
|
[
"BSD-3-Clause"
] | null | null | null |
rosalind/splc/splc.py
|
TecKnow/learning
|
71d1ddf9d580027ecc62a067581da378a9e85f6d
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Problem : RNA Splicing
URL : http://rosalind.info/problems/splc/
Author : David P. Perkins
"""
import fasta
def getCodingRegion(DNAString, introns):
#print("DNA String", DNAString, "introns", introns)
codingString = list()
workingString = DNAString
while workingString:
for curIntron in introns:
if workingString.startswith(curIntron):
#print("Working String", workingString, "starts with", curIntron)
workingString = workingString[len(curIntron):]
#print("New Working String", workingString)
break
else:
codingString.append(workingString[0])
workingString = workingString[1:]
#print("Coding string so far", codingString)
return ''.join(codingString)
def RNAtoProt(RNAString):
import sys;
proFile = open('codon_to_amino_acid_table.txt')
proTab = proFile.read()
proTab = proTab.split()
proTab = zip(proTab[::2], proTab[1::2])
proTab = dict(proTab)
codons = map(''.join, zip(*[iter(RNAString)]*3))
res = [proTab[x] for x in codons]
res = ''.join(res)
res, y, z = res.partition("Stop")
return res
if __name__ == "__main__":
import sys
FASTAs = fasta.FASTA.fromList(sys.stdin.readline())
cr = getCodingRegion(FASTAs[0].value, [x.value for x in FASTAs[1:]])
rna = cr.replace('T','U')
prot = RNAtoProt(rna)
print(prot)
| 29.78
| 85
| 0.601746
| 161
| 1,489
| 5.490683
| 0.52795
| 0.036199
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007353
| 0.269308
| 1,489
| 49
| 86
| 30.387755
| 0.805147
| 0.200134
| 0
| 0
| 0
| 0
| 0.036441
| 0.024576
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.09375
| 0
| 0.21875
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f75db3882d66a7931843a46ed92b1bea9dfaf2f
| 6,448
|
py
|
Python
|
tests/integration/test_interrupt_fields.py
|
jvanstraten/vhdmmio
|
f166b07074a9159311a01af88497df91c19e09d1
|
[
"Apache-2.0"
] | 4
|
2019-07-01T14:41:38.000Z
|
2021-11-28T12:54:49.000Z
|
tests/integration/test_interrupt_fields.py
|
jvanstraten/vhdmmio
|
f166b07074a9159311a01af88497df91c19e09d1
|
[
"Apache-2.0"
] | 4
|
2019-08-23T15:05:24.000Z
|
2020-12-16T10:02:20.000Z
|
tests/integration/test_interrupt_fields.py
|
jvanstraten/vhdmmio
|
f166b07074a9159311a01af88497df91c19e09d1
|
[
"Apache-2.0"
] | 1
|
2021-07-16T13:41:21.000Z
|
2021-07-16T13:41:21.000Z
|
"""Interrupt field tests."""
from copy import deepcopy
from unittest import TestCase
from ..testbench import RegisterFileTestbench
class TestInterruptFields(TestCase):
"""Interrupt field tests"""
def test_fields(self):
"""test interrupt fields"""
fields = []
types = {
typ: idx * 4 for idx, typ in enumerate([
'volatile', 'flag', 'pend', 'enable', 'unmask', 'status', 'raw'])}
for typ, address in types.items():
typ_name = 'interrupt-%s' % typ
if typ == 'volatile':
typ_name = 'volatile-interrupt-flag'
fields.append({
'address': address,
'bitrange': 0,
'repeat': 8,
'name': 'x_%s' % typ,
'behavior': typ_name,
'interrupt': 'x',
})
if typ not in ('volatile', 'pend'):
fields.append({
'address': address,
'bitrange': 8,
'repeat': 4,
'name': 'y_%s' % typ,
'behavior': typ_name,
'interrupt': 'y',
})
if typ == 'flag':
fields[-1]['bus-write'] = 'disabled'
rft = RegisterFileTestbench({
'metadata': {'name': 'test'},
'interrupts': [
{
'repeat': 8,
'name': 'x',
},
{
'repeat': 4,
'name': 'y',
},
],
'fields': fields})
self.assertEqual(rft.ports, (
'bus',
'i_x_request',
'i_y_request',
))
with rft as objs:
objs.bus.write(types['enable'], 0x555)
objs.bus.write(types['unmask'], 0x333)
self.assertEqual(objs.bus.read(types['enable']), 0x555)
self.assertEqual(objs.bus.read(types['unmask']), 0x333)
self.assertEqual(int(objs.bus.interrupt), 0)
objs.i_x_request.val = 0xFF
objs.i_y_request.val = 0xF
self.assertEqual(objs.bus.read(types['raw']), 0xFFF)
self.assertEqual(objs.bus.read(types['flag']), 0x555)
self.assertEqual(objs.bus.read(types['status']), 0x111)
self.assertEqual(objs.bus.read(types['volatile']), 0x055)
self.assertEqual(objs.bus.read(types['raw']), 0xFFF)
self.assertEqual(objs.bus.read(types['flag']), 0x555)
self.assertEqual(objs.bus.read(types['status']), 0x111)
objs.i_x_request.val = 0x00
objs.i_y_request.val = 0x0
self.assertEqual(objs.bus.read(types['raw']), 0x000)
self.assertEqual(objs.bus.read(types['flag']), 0x055)
self.assertEqual(objs.bus.read(types['status']), 0x011)
objs.bus.write(types['flag'], 0x00F)
self.assertEqual(objs.bus.read(types['flag']), 0x050)
self.assertEqual(objs.bus.read(types['status']), 0x010)
objs.bus.write(types['unmask'], 0xFFF)
self.assertEqual(objs.bus.read(types['status']), 0x050)
self.assertEqual(int(objs.bus.interrupt), 1)
self.assertEqual(objs.bus.read(types['volatile']), 0x050)
rft.testbench.clock(3)
self.assertEqual(int(objs.bus.interrupt), 0)
self.assertEqual(objs.bus.read(types['raw']), 0x000)
self.assertEqual(objs.bus.read(types['flag']), 0x000)
self.assertEqual(objs.bus.read(types['status']), 0x000)
objs.bus.write(types['enable'], 0x555)
objs.bus.write(types['unmask'], 0x333)
objs.bus.write(types['pend'], 0xF0F)
self.assertEqual(objs.bus.read(types['flag']), 0x00F)
self.assertEqual(objs.bus.read(types['status']), 0x003)
self.assertEqual(int(objs.bus.interrupt), 1)
for typ in ['volatile', 'flag', 'pend', 'enable', 'unmask', 'status', 'raw']:
objs.bus.read(types[typ])
if typ in ['volatile', 'status', 'raw']:
with self.assertRaisesRegex(ValueError, 'decode'):
objs.bus.write(types[typ], 0)
else:
objs.bus.write(types[typ], 0)
def test_errors(self):
"""test interrupt field config errors"""
base_cfg = {
'metadata': {'name': 'test'},
'fields': [
{
'address': 0,
'bitrange': 0,
'name': 'x',
'behavior': 'interrupt-flag',
'interrupt': 'x',
},
],
'interrupts': [
{
'name': 'x',
},
],
}
RegisterFileTestbench(base_cfg)
cfg = deepcopy(base_cfg)
cfg['fields'][0]['behavior'] = 'interrupt'
with self.assertRaisesRegex(
Exception, 'bus cannot access the field; specify a read or '
'write operation'):
RegisterFileTestbench(cfg)
cfg = deepcopy(base_cfg)
cfg['fields'][0]['bitrange'] = '3..0'
with self.assertRaisesRegex(
Exception, 'interrupt fields cannot be vectors, use '
'repetition instead'):
RegisterFileTestbench(cfg)
cfg = deepcopy(base_cfg)
cfg['fields'][0]['behavior'] = 'interrupt'
cfg['fields'][0]['mode'] = 'raw'
cfg['fields'][0]['bus-write'] = 'enabled'
with self.assertRaisesRegex(
Exception, 'raw interrupt fields cannot be written'):
RegisterFileTestbench(cfg)
cfg = deepcopy(base_cfg)
cfg['fields'][0]['behavior'] = 'interrupt'
cfg['fields'][0]['mode'] = 'masked'
cfg['fields'][0]['bus-write'] = 'enabled'
with self.assertRaisesRegex(
Exception, 'masked interrupt fields cannot be written'):
RegisterFileTestbench(cfg)
cfg = deepcopy(base_cfg)
cfg['fields'][0]['behavior'] = 'interrupt'
cfg['fields'][0]['mode'] = 'masked'
cfg['fields'][0]['bus-read'] = 'clear'
with self.assertRaisesRegex(
Exception, 'only flag interrupt fields support clear-on-read'):
RegisterFileTestbench(cfg)
| 38.380952
| 89
| 0.498759
| 628
| 6,448
| 5.078025
| 0.173567
| 0.076827
| 0.075886
| 0.110379
| 0.618062
| 0.566008
| 0.525557
| 0.358733
| 0.358733
| 0.314205
| 0
| 0.032567
| 0.352357
| 6,448
| 167
| 90
| 38.610778
| 0.731082
| 0.015664
| 0
| 0.455782
| 0
| 0
| 0.165033
| 0.003636
| 0
| 0
| 0.024344
| 0
| 0.217687
| 1
| 0.013605
| false
| 0
| 0.020408
| 0
| 0.040816
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f762c138bc0fd2f04d2c1539f4eca93c9446723
| 1,745
|
py
|
Python
|
app.py
|
corsmith/openshift-webhook-webex-teams-translator
|
fc20d4cdf2ca0959d2875048e6c5e5a1477ccec5
|
[
"BSD-3-Clause"
] | null | null | null |
app.py
|
corsmith/openshift-webhook-webex-teams-translator
|
fc20d4cdf2ca0959d2875048e6c5e5a1477ccec5
|
[
"BSD-3-Clause"
] | null | null | null |
app.py
|
corsmith/openshift-webhook-webex-teams-translator
|
fc20d4cdf2ca0959d2875048e6c5e5a1477ccec5
|
[
"BSD-3-Clause"
] | null | null | null |
import tornado.ioloop
import tornado.web
import tornado.options
from tornado.log import gen_log
'''
Alert Manager Documentation: https://prometheus.io/docs/alerting/configuration/
Sample alertmanager message:
{
"version": "4",
"groupKey": <string>, // key identifying the group of alerts (e.g. to deduplicate)
"status": "<resolved|firing>",
"receiver": <string>,
"groupLabels": <object>,
"commonLabels": <object>,
"commonAnnotations": <object>,
"externalURL": <string>, // backlink to the Alertmanager.
"alerts": [
{
"status": "<resolved|firing>",
"labels": <object>,
"annotations": <object>,
"startsAt": "<rfc3339>",
"endsAt": "<rfc3339>",
"generatorURL": <string> // identifies the entity that caused the alert
},
...
]
}
'''
async def f():
http_client = AsyncHTTPClient()
try:
response = await http_client.fetch("http://www.google.com")
except Exception as e:
print("Error: %s" % e)
else:
print(response.body)
class HealthHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world\n")
class MainHandler(tornado.web.RequestHandler):
def post(self, webhookkey):
gen_log.warning(f'webhookkey = { webhookkey }\nuri: { self.request.uri }\nquery: { self.request.query }\nheaders: { self.request.headers }\nbody: { self.request.body }')
self.write("Hello, %s\n" % webhookkey)
def make_app():
return tornado.web.Application([
(r"/v1/webhooks/incoming/([^/]+)", MainHandler),
(r"/", HealthHandler),
])
if __name__ == "__main__":
tornado.options.parse_command_line()
app = make_app()
app.listen(8080)
tornado.ioloop.IOLoop.current().start()
| 27.698413
| 177
| 0.638395
| 190
| 1,745
| 5.778947
| 0.6
| 0.03643
| 0.03643
| 0.04918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009986
| 0.196562
| 1,745
| 62
| 178
| 28.145161
| 0.773181
| 0
| 0
| 0
| 0
| 0.034483
| 0.232692
| 0.027885
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.137931
| 0.034483
| 0.344828
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f7707e7a77d86241b0db0b4c74b1a925d1c197b
| 695
|
py
|
Python
|
projects/demos/location2.py
|
readysetstem/readysetstem-api
|
01e1360f4a28a6783ee1e0fa1bc239dd999de6be
|
[
"Apache-2.0"
] | 1
|
2018-02-23T20:20:45.000Z
|
2018-02-23T20:20:45.000Z
|
projects/demos/location2.py
|
readysetstem/readysetstem-api
|
01e1360f4a28a6783ee1e0fa1bc239dd999de6be
|
[
"Apache-2.0"
] | 1
|
2016-10-25T18:00:15.000Z
|
2016-10-25T18:00:15.000Z
|
projects/demos/location2.py
|
readysetstem/readysetstem-api
|
01e1360f4a28a6783ee1e0fa1bc239dd999de6be
|
[
"Apache-2.0"
] | null | null | null |
from rstem.led_matrix import FrameBuffer
from rstem.mcpi import minecraft, control
import time
control.show()
mc = minecraft.Minecraft.create()
SCALE = 25
fb = FrameBuffer()
count = 0
FLASH_COUNT = 3
flash_lit = True
while True:
pos = mc.player.getTilePos()
x = round(pos.x/SCALE + (fb.width-1)/2)
x_out_of_bounds = not 0 <= x < fb.width
x = min(fb.width-1, max(0, x))
z = round(pos.z/SCALE + (fb.height-1)/2)
z_out_of_bounds = not 0 <= z < fb.height
z = min(fb.height-1, max(0, z))
fb.erase()
count += 1
if count > FLASH_COUNT:
flash_lit = not flash_lit
count = 0
if not x_out_of_bounds and not z_out_of_bounds or flash_lit:
fb.point(z, x)
fb.show()
time.sleep(0.01)
| 19.305556
| 61
| 0.683453
| 130
| 695
| 3.507692
| 0.353846
| 0.070175
| 0.096491
| 0.052632
| 0.065789
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.179856
| 695
| 35
| 62
| 19.857143
| 0.766667
| 0
| 0
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f793c352ccb3c8ed1a615cf95be1f974da7e115
| 10,833
|
py
|
Python
|
distributed_train.py
|
ShivamShrirao/contrastive-unpaired-translation
|
e81611a5bd8b7aee6aedab10aadf9e22a0804a63
|
[
"BSD-3-Clause"
] | null | null | null |
distributed_train.py
|
ShivamShrirao/contrastive-unpaired-translation
|
e81611a5bd8b7aee6aedab10aadf9e22a0804a63
|
[
"BSD-3-Clause"
] | null | null | null |
distributed_train.py
|
ShivamShrirao/contrastive-unpaired-translation
|
e81611a5bd8b7aee6aedab10aadf9e22a0804a63
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from os.path import join as osp
import numpy as np
from tqdm import tqdm
import wandb
import torch
import torch.nn as nn
import torch.optim as optim
from torch.cuda import amp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch import autograd
from torch.optim import lr_scheduler
# from torchinfo import summary
from options.train_options import TrainOptions
from utils import AverageMeter, reduce_loss, synchronize, cleanup, seed_everything, set_grads, log_imgs_wandb
from data import CreateDataLoader
from data.unaligned_dataset import UnAlignedDataset
from models.custom_unet import Unet, NLayerDiscriminator, PatchSampleF, GANLoss, PatchNCELoss, get_norm_layer, init_weights
class TrainModel:
def __init__(self, args):
self.device = torch.device('cuda', args.local_rank)
self.netG = Unet(args.input_nc, args.output_nc, 32, self_attn=False).to(self.device)
# init_weights(self.netG, args.init_type, args.init_gain)
norm_layer = get_norm_layer(args.normD)
self.netD = NLayerDiscriminator(args.output_nc, args.ndf, args.n_layers_D, norm_layer).to(self.device)
# init_weights(self.netD, args.init_type, args.init_gain)
with torch.no_grad():
feats = self.netG(torch.randn(8, args.input_nc, 256, 512, device=self.device), get_feat=True, encode_only=True)
self.netF = PatchSampleF(use_mlp=True, nc=args.netF_nc)
self.netF.create_mlp(feats)
self.netF = self.netF.to(self.device)
# init_weights(self.netF, args.init_type, args.init_gain)
# summary(self.netG, (1, args.input_nc, 256, 512))
# summary(self.netD, (1, args.output_nc, 256, 512))
# summary(self.netF, input_data=[feats])
dist.init_process_group(backend="nccl")
if args.sync_bn:
self.netG = nn.SyncBatchNorm.convert_sync_batchnorm(self.netG)
self.netD = nn.SyncBatchNorm.convert_sync_batchnorm(self.netD)
self.netF = nn.SyncBatchNorm.convert_sync_batchnorm(self.netF)
self.netG = DDP(self.netG, device_ids=[args.local_rank], output_device=args.local_rank,
broadcast_buffers=False)
self.netD = DDP(self.netD, device_ids=[args.local_rank], output_device=args.local_rank,
broadcast_buffers=False)
self.netF = DDP(self.netF, device_ids=[args.local_rank], output_device=args.local_rank,
broadcast_buffers=False)
self.criterion_gan = GANLoss()
self.criterionNCE = [PatchNCELoss(args).to(self.device) for _ in range(len(feats))]
self.loss_names = ['lossG', 'lossD', 'nce_loss_tot']
dataset = UnAlignedDataset(args.dataroot, (256, 512), args.phase)
self.dataloader = CreateDataLoader(dataset, args.batch_size, workers=args.workers)
# if args.local_rank == 0:
# val_dataset = UnAlignedDataset(args.dataroot, 1024, phase="test")
# val_dataset.img_names = val_dataset.img_names[:20]
# self.val_loader = CreateDataLoader(val_dataset, 2, workers=args.workers, shuffle=False, distributed=False)
self.optG = optim.Adam(self.netG.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))#, weight_decay=args.wd)
self.optD = optim.Adam(self.netD.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))#, weight_decay=args.wd)
self.optF = optim.Adam(self.netF.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))#, weight_decay=args.wd)
self.scaler = amp.GradScaler(enabled=not args.no_amp)
self.GF_params = list(self.netG.parameters()) + list(self.netF.parameters())
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + args.init_epoch - args.n_epochs) / float(args.n_epochs_decay + 1)
return lr_l
self.schedulers = [lr_scheduler.LambdaLR(opt, lr_lambda=lambda_rule) for opt in [self.optG, self.optD, self.optF]]
def calculate_NCE_loss(self, args, feat_k, feat_q):
feat_k_pool, sample_ids = self.netF(feat_k, args.num_patches, None)
feat_q_pool, _ = self.netF(feat_q, args.num_patches, sample_ids)
total_nce_loss = 0.0
for f_q, f_k, crit in zip(feat_q_pool, feat_k_pool, self.criterionNCE):
total_nce_loss += crit(f_q, f_k) * args.lambda_NCE
return total_nce_loss / len(feat_k)
def forward(self, args, real_A, real_B):
with amp.autocast(enabled=not args.no_amp):
real = torch.cat((real_A, real_B), dim=0)
pred, feats = self.netG(real, get_feat=True)
batch_size = real_A.size(0)
fake_B = pred[:batch_size]
idt_B = pred[batch_size:]
fake_out = self.netD(fake_B)#.detach())
real_out = self.netD(real_B)
lossD = (self.criterion_gan(fake_out, False)
+ self.criterion_gan(real_out, True)) * 0.5
# self.scaler.scale(lossD).backward()
set_grads(autograd.grad(self.scaler.scale(lossD), self.netD.parameters(), retain_graph=True), self.netD.parameters())
self.scaler.step(self.optD)
self.optD.zero_grad(set_to_none=True)
# fake_out = self.netD(fake_B)
lossG = self.criterion_gan(fake_out, True) * args.lambda_GAN
feat_q = self.netG(fake_B, get_feat=True, encode_only=True)
feat_k = [ft[:batch_size] for ft in feats]
nce_loss_A = self.calculate_NCE_loss(args, feat_k, feat_q)
feat_q = self.netG(idt_B, get_feat=True, encode_only=True)
feat_k = [ft[batch_size:] for ft in feats]
nce_loss_B = self.calculate_NCE_loss(args, feat_k, feat_q)
nce_loss_tot = (nce_loss_A + nce_loss_B) * 0.5
lossG = lossG + nce_loss_tot
set_grads(autograd.grad(self.scaler.scale(lossG), self.GF_params), self.GF_params)
# self.scaler.scale(lossG).backward()
self.scaler.step(self.optG)
self.optG.zero_grad(set_to_none=True)
self.scaler.step(self.optF)
self.optF.zero_grad(set_to_none=True)
self.scaler.update()
self.loss_avg['lossG'].update(reduce_loss(lossG.detach()), batch_size)
self.loss_avg['lossD'].update(reduce_loss(lossD.detach()), batch_size)
self.loss_avg['nce_loss_tot'].update(reduce_loss(nce_loss_tot.detach()), batch_size)
return fake_B.detach(), idt_B.detach()
def train_epoch(self, args, epoch):
self.loss_avg = {nm: AverageMeter() for nm in self.loss_names}
info = {}
with tqdm(self.dataloader, desc=f"Epoch {epoch:>2}", disable=args.local_rank != 0) as pbar:
for step, (real_A, real_B) in enumerate(pbar):
real_A = real_A.to(self.device, non_blocking=True)
real_B = real_B.to(self.device, non_blocking=True)
fake_B, idt_B = self.forward(args, real_A, real_B)
if args.local_rank == 0:
if not step % args.log_interval:
info = {nm: float(loss.avg) for nm, loss in self.loss_avg.items()}
pbar.set_postfix(info)
if args.use_wandb:
wandb.log(info)
if not step % args.img_log_interval:
log_imgs_wandb(real_A=real_A, fake_B=fake_B, real_B=real_B, idt_B=idt_B)
for schd in self.schedulers:
schd.step()
return info
def train_loop(self, args):
# self.validate(args)
for epoch in range(args.init_epoch, args.n_epochs):
self.netG.train()
self.netD.train()
self.netF.train()
self.dataloader.sampler.set_epoch(epoch)
info = self.train_epoch(args, epoch)
info['epoch'] = epoch
if args.local_rank == 0:
if args.use_wandb:
wandb.log({'epoch': epoch})
self.save_models(args, 'latest', info)
if not epoch % 1:
self.save_models(args, epoch, info)
# self.validate(args)
def save_models(self, args, epoch='latest', info={}):
if args.local_rank == 0:
os.makedirs(osp(args.checkpoints_dir, args.name), exist_ok=True)
torch.save(self.netG.state_dict(), osp(args.checkpoints_dir, args.name, f"{epoch}_netG.pth"))
torch.save(self.netD.state_dict(), osp(args.checkpoints_dir, args.name, f"{epoch}_netD.pth"))
torch.save(self.netF.state_dict(), osp(args.checkpoints_dir, args.name, f"{epoch}_netF.pth"))
# torch.save(self.optG.state_dict(), osp(args.checkpoints_dir, args.name, f"{epoch}_optG.pth"))
# torch.save(self.optD.state_dict(), osp(args.checkpoints_dir, args.name, f"{epoch}_optD.pth"))
# torch.save(self.optF.state_dict(), osp(args.checkpoints_dir, args.name, f"{epoch}_optF.pth"))
torch.save(info, osp(args.checkpoints_dir, args.name, f"{epoch}_info.pth"))
print("[+] Weights saved.")
def load_models(self, args, epoch='latest'):
synchronize()
map_location = {'cuda:0': f'cuda:{args.local_rank}'}
try:
self.netG.load_state_dict(torch.load(osp(args.checkpoints_dir, args.name, f"{epoch}_netG.pth"), map_location=map_location))
if args.phase == 'train':
self.netD.load_state_dict(torch.load(osp(args.checkpoints_dir, args.name, f"{epoch}_netD.pth"), map_location=map_location))
self.netF.load_state_dict(torch.load(osp(args.checkpoints_dir, args.name, f"{epoch}_netF.pth"), map_location=map_location))
# self.optG.load_state_dict(torch.load(osp(args.checkpoints_dir, args.name, f"{epoch}_optG.pth"), map_location=map_location))
# self.optD.load_state_dict(torch.load(osp(args.checkpoints_dir, args.name, f"{epoch}_optD.pth"), map_location=map_location))
# self.optF.load_state_dict(torch.load(osp(args.checkpoints_dir, args.name, f"{epoch}_optF.pth"), map_location=map_location))
if args.local_rank == 0:
print(f"[+] Weights loaded for {epoch} epoch.")
except FileNotFoundError as e:
if args.local_rank == 0:
print(f"[!] {e}, skipping weights loading.")
def main():
args = TrainOptions().parse()
torch.cuda.set_device(args.local_rank)
seed_everything(args.seed)
try:
tm = TrainModel(args)
# if args.resume:
tm.load_models(args)
tm.train_loop(args)
tm.save_models(args)
except KeyboardInterrupt:
print("[!] Keyboard Interrupt! Cleaning up and shutting down.")
finally:
cleanup()
if __name__ == '__main__':
main()
| 48.361607
| 141
| 0.63925
| 1,510
| 10,833
| 4.362914
| 0.16755
| 0.020644
| 0.031573
| 0.044627
| 0.386157
| 0.346995
| 0.234214
| 0.20932
| 0.194596
| 0.184274
| 0
| 0.00775
| 0.2377
| 10,833
| 223
| 142
| 48.578475
| 0.790022
| 0.137173
| 0
| 0.07362
| 0
| 0
| 0.042065
| 0.002361
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055215
| false
| 0
| 0.110429
| 0
| 0.196319
| 0.02454
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f7b986c2b053cb63e12ef06cb1f0c6623d1ab5a
| 4,043
|
py
|
Python
|
Generator.py
|
pawelmakarov/ORM
|
1a17599b31ce6d73b08c8fa424e0a4201abfb3d3
|
[
"MIT"
] | null | null | null |
Generator.py
|
pawelmakarov/ORM
|
1a17599b31ce6d73b08c8fa424e0a4201abfb3d3
|
[
"MIT"
] | null | null | null |
Generator.py
|
pawelmakarov/ORM
|
1a17599b31ce6d73b08c8fa424e0a4201abfb3d3
|
[
"MIT"
] | null | null | null |
class Generator(object):
def __init__(self):
self.tables = []
self.alters = []
self.triggers = []
def write_to_file(self, output_file):
with open(output_file, 'w') as sql_file:
sql_file.write('{0}{1}'.format('\n'.join(table for table in self.tables), '\n'))
sql_file.write('{0}{1}'.format('\n'.join(alter for alter in self.alters), '\n'))
sql_file.write('{0}{1}'.format('\n'.join(trigger for trigger in self.triggers), '\n'))
def read_from_file(self, input_file):
import yaml
with open(input_file, 'r') as stream:
return yaml.safe_load(stream)
def get_fields(self, table, structure):
fields = ("\'{0}_{1}\' {2}".format(table, column_name, column_type)
for column_name, column_type in structure['fields'].items())
fields = ', '.join(fields)
return fields
def create_table(self, name, fields):
create_table = ('CREATE TABLE \'{0}\' (\n\t\'{0}_id\' SERIAL PRIMARY KEY,\n\t{1}\n\t\'{0}_created\''
'INTEGER NOT NULL DEFAULT cast(extract(epoch from now()) AS INTEGER),\n\t\'{0}_updated\''
'INTEGER NOT NULL DEFAULT 0\n\t);\n'
.format(name, fields))
return create_table
def alter_table(self, table, related_table):
alter_table = ('ALTER TABLE \'{0}\' ADD \'{1}_id\' INTEGER NOT NULL,\n\t'
'ADD CONSTRAINT \'fk_{0}_{1}_id\' FOREIGN KEY (\'{1}_id\')'
'REFERENCES \'{1}\' (\'{1}_id\');\n'
.format(table, related_table))
return alter_table
def join_table(self, table, related_table):
join_table = ('CREATE TABLE \'{0}__{1}\' (\n\t\'{0}_id\' INTEGER NOT NULL,\n\t\'{1}_id\''
'INTEGER NOT NULL,\n\tPRIMARY KEY (\'{0}_{1}\', \'{1}_id\')\n);\n'
.format(table, related_table))
return join_table
def get_function(self, table):
function = ('CREATE OR REPLACE FUNCTION update_{0}_timestamp()\nRETURNS TRIGGER AS '
'$$\nBEGIN\n\tNEW.{0}_updated = cast(extract(epoch from now()) as integer);\n\t'
'RETURN NEW;\nEND;\n$$ language \'plpgsql\';\n'
.format(table))
return function
def get_trigger(self, table):
trigger = ('CREATE TRIGGER \'tr_{0}_updated\' BEFORE UPDATE ON \'{0}\''
'FOR EACH ROW EXECUTE PROCEDURE\n\t update_{0}_timestamp();\n'
.format(table))
return trigger
def set_tables(self, statements):
self.tables.append(statements)
def set_alters(self, statements):
self.alters.append(statements)
def set_triggers(self, statements):
self.triggers.append(statements)
def create_statements(self, input_file, output_file):
data_map = self.read_from_file(input_file)
statements = []
for table, structure in data_map.items():
table = table.lower()
fields = self.get_fields(table, structure)
for related_table, relations_type in structure['relations'].items():
self.set_tables(self.create_table(table, fields))
relations_status = data_map[related_table]['relations'].values()[0];
related_table = related_table.lower()
if relations_type == 'one' and relations_status == 'many':
self.set_alters(self.alter_table(table, related_table))
if relations_type == relations_status:
self.set_tables(self.join_table(table, related_table))
join_table = '{0}__{1}'.format(table, related_table)
self.set_alters(self.alter_table(join_table, table))
self.set_alters(self.alter_table(join_table, related_table))
self.set_triggers(self.get_function(table))
self.set_triggers(self.get_trigger(table))
self.write_to_file(output_file)
if __name__ == '__main__':
Generator().create_statements('many_to_many.yaml', 'schema.sql')
| 43.010638
| 108
| 0.594855
| 514
| 4,043
| 4.445525
| 0.200389
| 0.06302
| 0.066958
| 0.017068
| 0.23151
| 0.18512
| 0.099344
| 0.099344
| 0.088403
| 0
| 0
| 0.012321
| 0.257235
| 4,043
| 93
| 109
| 43.473118
| 0.748585
| 0
| 0
| 0.053333
| 0
| 0.013333
| 0.183527
| 0.020777
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173333
| false
| 0
| 0.013333
| 0
| 0.293333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f80a6cd8804248e492bd75be4cdf855bd46b3e3
| 1,606
|
py
|
Python
|
location/models.py
|
swallville/driverBackEnd
|
3599e5a2e58304e08502b10a3856b77a05c7fd16
|
[
"MIT"
] | null | null | null |
location/models.py
|
swallville/driverBackEnd
|
3599e5a2e58304e08502b10a3856b77a05c7fd16
|
[
"MIT"
] | 3
|
2021-03-30T12:53:49.000Z
|
2021-09-22T18:44:52.000Z
|
location/models.py
|
swallville/driverBackEnd
|
3599e5a2e58304e08502b10a3856b77a05c7fd16
|
[
"MIT"
] | null | null | null |
from django.contrib.gis.db import models
from django.db.models import Q
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
# Create your models here.
class Location (models.Model):
name = models.CharField(
max_length=100,
verbose_name='Name of Location')
location = models.PointField(
verbose_name='Coordinates of Location'
)
address = models.CharField(
max_length=100,
verbose_name='Address of Location')
zip_code = models.CharField(
max_length=9,
verbose_name='Zip code of Location')
city = models.CharField(
max_length=100,
verbose_name='City of Location')
class Meta:
verbose_name = 'Location'
verbose_name_plural = 'Locations'
ordering = ['address']
permissions = (
('detail_location', 'Can detail %s' % verbose_name),
('list_location', 'Can list %s' % verbose_name),
)
constraints = [
models.UniqueConstraint(fields=['location'], name='unique_location'),
]
def validate_unique(self, exclude=None):
qs = Location.objects.filter(Q(location=self.location))
if qs.count() > 1:
raise ValidationError(
_('Location must have different coordinates (%.14f, %.14f)') % self.location.coords[::-1]
)
def save(self, *args, **kwargs):
self.validate_unique()
super(Location, self).save(*args, **kwargs)
def __str__(self):
return '%s - %s' % (self.city, self.address)
| 30.301887
| 105
| 0.619552
| 178
| 1,606
| 5.438202
| 0.404494
| 0.102273
| 0.07438
| 0.099174
| 0.117769
| 0.117769
| 0.117769
| 0
| 0
| 0
| 0
| 0.013571
| 0.265878
| 1,606
| 52
| 106
| 30.884615
| 0.807464
| 0.014944
| 0
| 0.071429
| 0
| 0
| 0.161392
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.095238
| 0.02381
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f8142cd627ecd115f6acdab00511ac3d94dfb10
| 14,213
|
py
|
Python
|
matroska_cache/dep/scopes.py
|
kolypto/py-matroska-cache
|
b40030f97d463aac8e3a6f4b0e0e9f081dfc92b1
|
[
"MIT"
] | null | null | null |
matroska_cache/dep/scopes.py
|
kolypto/py-matroska-cache
|
b40030f97d463aac8e3a6f4b0e0e9f081dfc92b1
|
[
"MIT"
] | null | null | null |
matroska_cache/dep/scopes.py
|
kolypto/py-matroska-cache
|
b40030f97d463aac8e3a6f4b0e0e9f081dfc92b1
|
[
"MIT"
] | null | null | null |
from __future__ import annotations
import warnings
from typing import Any, List, Callable, Tuple, Union, Collection, FrozenSet, Optional, Iterable, Set
from .base import DependencyBase, dataclass
from .tag import Tag
ExtractorFunc = Callable[[Any], Optional[dict]]
class Scopes:
""" Generate dependencies that describe lists of objects.
This tool is designed to solve the case where newly created, or freshly removed, items may enter the scope
of some listing, which may itself be filtered by some condition.
In general, if you cache a list of objects by `Id`:
cache.put(
'articles-list', [...],
dep.Id('article', 1),
dep.Id('article', 2),
...
)
you will not have this list of articles invalidated when new articles come into scope.
For instance, if your view caches the list of articles by `category`, intialize the `Scopes` object like this:
article_scopes = Scopes('article', production_mode=False)
@article_scopes.describes('category')
def article_category(article: Article):
return {'category': article.category}
This operation enables us to use `category` information as a dependency for the cached list:
# Articles filtered by category ...
articles = ssn.query(Article).filter_by(category='python').all()
cache.put(
f'articles-list:category=python', # make sure to put it here
articles,
...
# ... declare this category as their dependency
*article_scopes.condition(category='python')
)
Now, in another place, where articles are created, you can invalidate this dependency automatically
by just passing the new article to the article_scopes.invalidate_for() method:
def create_new_article(...):
...
article_scopes.invalidate_for(article, invalidate_for)
Under the hood, it will go over every condition known through @article_scopes.describes()
and invalidate all related caches.
---
NOTE: Does it seem complicated to you?
It is; but this complexity follows one goal: to make caching *declarative* and minimize hidden connections in your code.
For instance, you could have used Tag() to achieve the very same result.
Listing articles:
articles = ssn.query(Article).filter_by(category='python').all()
cache.put(
f'articles-list:category=python', # make sure to put it here
articles,
...
# ... declare this category as their dependency
dep.Tag(f'articles:category=python'),
)
Adding articles:
cache.invalidate(dep.Tag(f'books:category={article.category}'))
This code would work just fine; but then, for every caching behavior you would need *to remember* to add another line
to the place where articles are saved. Those connections would soon become numerous and lead to caching errors that
are hard to catch.
This approach with `Scopes()` is a declarative approach:
you first declare *the intention* of caching by category, and `Scopes()` will check that everything is set up properly.
"""
def __init__(self, object_type: str, *, production_mode: bool):
""" Initialize scopes for a particular kind of object
Args:
object_type: Name for the objects you're watching. Got to be unique. Example: 'article'
production_mode: Whether the cache is currently operating on a production server.
If there is an error with how you configured the `Scopes` object, its will be disabled.
In development (production_mode=False), an exception will be raised.
"""
self._object_type = object_type
self._extractor_fns: List[ExtractorInfo] = []
self._known_extractor_signatures: Set[Tuple[str]] = set()
# An invalidate-all dependency used to invalidate all caches in cases when scopes are not used properly.
# For instance, the user is attempting to cache the results that matched a filter
# .condition(category_id=10)
# but there was no extractor function that describes how `category_id` influences the cache.
self._invalidate_all = InvalidateAll(self._object_type)
self._production_mode = production_mode
def describes(self, *param_names, watch_modified: Optional[Iterable[str]] = None):
""" Decorator for a function that extracts data for a conditional dependency.
NOTE: let your function return `None` if you want a particular change to be ignored for some reason.
Whenever any object is saved, your application should call `invalidate_for()`,
and it will invalidate every cache that might see a new object enter the scope, or an old one leave it.
The arguments for the scope are described by the decorated function: if you want to cache the results of
a list filtered by `category=<something>`, you first need to define an extractor function:
@article_scopes.describes('category')
def article_category(article: Article, **info):
# Extract filter arguments from a new object
return {'category': article.category}
Only after such a condition is described, you can use it as a cache key:
cache.put(
f'articles-{category}',
articles,
...,
*article_scopes.condition(category=category),
expires=600,
)
Note that the values extracted by `article_category()` and provided to `condition()` have to match.
If they don't, cache will misbehave.
Args:
*param_names: The list of parameter names the extractor function is going to return.
These names are completely custom, but have to match those given to condition()
watch_modified: Only run this function when the following fields are modified.
Default: equal to `parameter_names`.
Setting this field manually only makes sense when your parameter names are different from attribute names.
For example:
return {'filter-by-category': article.category}
"""
def decorator(fn: ExtractorFunc):
""" Register the decorated function and return """
self._extractor_fns.append(
ExtractorInfo(
param_names=frozenset(param_names),
watch_modified=frozenset(watch_modified) if watch_modified else frozenset(param_names),
func=fn
)
)
self._known_extractor_signatures.add(tuple(sorted(param_names)))
# Done
return fn
return decorator
def invalidate_for(self, item: Any, cache: 'MatroskaCache', modified: Collection[str] = None, **info):
""" Invalidate all caches that may see `item` in their listings.
Args:
item: The new/deleted item that may enter or leave the scope of some listing
cache: MatroskaCache to invalidate
modified: (optional) list of field names that have been modified. Useful to ignore non-relevant updates.
**info: Extra info that may be passed to your extractor functions
"""
cache.invalidate(*self.object_invalidates(item, modified, **info))
def condition(self, **conditions: Any) -> List[Union[ConditionalDependency, InvalidateAll]]:
""" Get dependencies for a conditional scope.
Use this method with MatroskaCache.put() to generate dependencies for your scope.
Args:
**conditions: The description of your filtering conditions, in the `name=value` form.
Returns:
List of scope dependencies to be used on your cache entry
"""
# Signature
filter_params_signature = tuple(sorted(conditions))
if filter_params_signature in self._known_extractor_signatures:
return [
ConditionalDependency(self._object_type, conditions),
# Got to declare this kill switch as a dependency; otherwise, it won't work.
self._invalidate_all,
]
elif self._production_mode:
warnings.warn(
f'Matroska cache: no extractor @describes for {filter_params_signature!r}. '
f'Caching disabled. '
)
return [self._invalidate_all]
else:
raise RuntimeError(
f'No extractor function is described for condition {filter_params_signature!r}. '
f'Please use @.describes() on a function with matching parameters. '
f'It will not fail in production, but caching will be disabled.'
)
def object_invalidates(self, item: Any, modified: Collection[str] = None, **info) -> List[Union[ConditionalDependency, InvalidateAll]]:
""" Get dependencies that will invalidate all caches that may see `item` in their listings.
This function takes the `item` and calls every extractor function decorated by `@scope.describes()`.
The resulting value will be used to find scopes that this object will come into, and invalidate them.
Args:
item: The newly created or freshly deleted item.
modified: (optional) list of field names that have been modified. Useful to ignore non-relevant updates.
If not provided, all extractor functions will be run to invalidate dependencies.
If provided, only those that are watching those attributes will be run.
**info: Additional arguments to pass to *all* the extractor functions.
Returns:
List of dependencies to be used with `cache.invalidate()`
"""
if modified:
modified = set(modified)
ret = []
for extractor_info in self._extractor_fns:
# if `modified` was provided, skip extractors that are not interested in those fields
if modified and not (extractor_info.watch_modified & modified):
continue
# Run the extractor function and get dependency parameters
try:
params = extractor_info.func(item, **info)
except Exception:
# In production mode, just invalidate all
if self._production_mode:
return [self._invalidate_all]
# In development mode, report the error
else:
raise
# If the function returned a None, skip it altogether
if params is None:
continue
# If it returned a correct set of fields (as @describes()ed), generate a dependency
elif set(params) == extractor_info.param_names:
ret.append(ConditionalDependency(self._object_type, params))
# In production mode, just invalidate all
elif self._production_mode:
return [self._invalidate_all]
# In development mode, report an error
else:
raise RuntimeError(
f'The described extractor {extractor_info.func} was supposed to return a dict of {extractor_info.param_names!r}, '
f'but it returned only {params!r}. Please fix. '
f'It will not fail in production, but caching will be disabled.'
)
return ret
@dataclass
class ConditionalDependency(DependencyBase):
""" Internal dependency used by Scope
A dependency object of this type is generated for the output of every extractor function.
This is how the whole thing operates:
When a new article is created, it is passed to the `invalidate_for()` function.
An extractor function, described like this:
@article_scopes.describes('category')
def article_category(article: Article, **info):
# Extract filter arguments from a new object
return {'category': article.category}
will generate a dependency:
ConditionalDependency(object_type='article', conditions={'category': 'sci-fi'})
# it is just a string:
'condition:article:&category=sci-fi&'
This string invalidates any cache entries that had been created like this:
cache.put(
...
*article_scopes.condition(category=category),
)
So, in essense, this whole Scopes is just an interface to match the two strings in a declarative fashion.
"""
object_type: str
condition: str
__slots__ = 'object_type', 'condition',
def __init__(self, object_type: str, conditions: dict):
self.object_type = object_type
self.condition = '&'.join(f'{key}={value}'
# items are sorted to make sure they always match in the same way!
for key, value in sorted(conditions.items()))
# Surround it with &s to enable wildcard matching
self.condition = '&' + self.condition + '&'
PREFIX = 'condition'
def key(self) -> str:
return f'{self.PREFIX}:{self.object_type}:{self.condition}'
@dataclass
class ExtractorInfo:
# Set of parameters that the extractor function promises to return
param_names: FrozenSet[str]
# Set of parameters that it watches the modifications on.
# Default: equal to param_names_set
watch_modified: FrozenSet[str]
# The extractor function itself
func: ExtractorFunc
__slots__ = 'param_names', 'watch_modified', 'func'
class InvalidateAll(Tag):
""" A custom tag, used in production, to invalidate all scopes in cases when Scopes is misconfigured """
# Use the same prefix. Not important; just looks nice
# There will be no clashes because all `ConditionalDependency` have "&" in their names
PREFIX = ConditionalDependency.PREFIX
def __init__(self, object_type: str):
super().__init__(f'{object_type}::InvalidateAll')
| 42.810241
| 139
| 0.642651
| 1,711
| 14,213
| 5.251315
| 0.229106
| 0.017807
| 0.014023
| 0.010017
| 0.186867
| 0.162159
| 0.123317
| 0.123317
| 0.123317
| 0.116416
| 0
| 0.00069
| 0.286569
| 14,213
| 331
| 140
| 42.939577
| 0.885404
| 0.602617
| 0
| 0.166667
| 0
| 0.010417
| 0.140249
| 0.038382
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.052083
| 0.010417
| 0.364583
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f853129c44d31a1158c0bd481a49cd736cdcaa4
| 7,326
|
py
|
Python
|
sm4.py
|
cclauss/Pythonista-sm
|
ef5c6527f36334a2b4dc3f0a92f957161aa3bdd3
|
[
"Apache-2.0"
] | 3
|
2021-08-23T02:49:09.000Z
|
2021-08-24T01:48:14.000Z
|
sm4.py
|
cclauss/Pythonista-sm
|
ef5c6527f36334a2b4dc3f0a92f957161aa3bdd3
|
[
"Apache-2.0"
] | null | null | null |
sm4.py
|
cclauss/Pythonista-sm
|
ef5c6527f36334a2b4dc3f0a92f957161aa3bdd3
|
[
"Apache-2.0"
] | 1
|
2021-08-23T03:02:39.000Z
|
2021-08-23T03:02:39.000Z
|
# -*-coding:utf-8-*-
import base64
import copy
from .func import xor, rotl, get_uint32_be, put_uint32_be, bytes_to_list, list_to_bytes, padding, un_padding
BOXES_TABLE = [
0xd6,
0x90,
0xe9,
0xfe,
0xcc,
0xe1,
0x3d,
0xb7,
0x16,
0xb6,
0x14,
0xc2,
0x28,
0xfb,
0x2c,
0x05,
0x2b,
0x67,
0x9a,
0x76,
0x2a,
0xbe,
0x04,
0xc3,
0xaa,
0x44,
0x13,
0x26,
0x49,
0x86,
0x06,
0x99,
0x9c,
0x42,
0x50,
0xf4,
0x91,
0xef,
0x98,
0x7a,
0x33,
0x54,
0x0b,
0x43,
0xed,
0xcf,
0xac,
0x62,
0xe4,
0xb3,
0x1c,
0xa9,
0xc9,
0x08,
0xe8,
0x95,
0x80,
0xdf,
0x94,
0xfa,
0x75,
0x8f,
0x3f,
0xa6,
0x47,
0x07,
0xa7,
0xfc,
0xf3,
0x73,
0x17,
0xba,
0x83,
0x59,
0x3c,
0x19,
0xe6,
0x85,
0x4f,
0xa8,
0x68,
0x6b,
0x81,
0xb2,
0x71,
0x64,
0xda,
0x8b,
0xf8,
0xeb,
0x0f,
0x4b,
0x70,
0x56,
0x9d,
0x35,
0x1e,
0x24,
0x0e,
0x5e,
0x63,
0x58,
0xd1,
0xa2,
0x25,
0x22,
0x7c,
0x3b,
0x01,
0x21,
0x78,
0x87,
0xd4,
0x00,
0x46,
0x57,
0x9f,
0xd3,
0x27,
0x52,
0x4c,
0x36,
0x02,
0xe7,
0xa0,
0xc4,
0xc8,
0x9e,
0xea,
0xbf,
0x8a,
0xd2,
0x40,
0xc7,
0x38,
0xb5,
0xa3,
0xf7,
0xf2,
0xce,
0xf9,
0x61,
0x15,
0xa1,
0xe0,
0xae,
0x5d,
0xa4,
0x9b,
0x34,
0x1a,
0x55,
0xad,
0x93,
0x32,
0x30,
0xf5,
0x8c,
0xb1,
0xe3,
0x1d,
0xf6,
0xe2,
0x2e,
0x82,
0x66,
0xca,
0x60,
0xc0,
0x29,
0x23,
0xab,
0x0d,
0x53,
0x4e,
0x6f,
0xd5,
0xdb,
0x37,
0x45,
0xde,
0xfd,
0x8e,
0x2f,
0x03,
0xff,
0x6a,
0x72,
0x6d,
0x6c,
0x5b,
0x51,
0x8d,
0x1b,
0xaf,
0x92,
0xbb,
0xdd,
0xbc,
0x7f,
0x11,
0xd9,
0x5c,
0x41,
0x1f,
0x10,
0x5a,
0xd8,
0x0a,
0xc1,
0x31,
0x88,
0xa5,
0xcd,
0x7b,
0xbd,
0x2d,
0x74,
0xd0,
0x12,
0xb8,
0xe5,
0xb4,
0xb0,
0x89,
0x69,
0x97,
0x4a,
0x0c,
0x96,
0x77,
0x7e,
0x65,
0xb9,
0xf1,
0x09,
0xc5,
0x6e,
0xc6,
0x84,
0x18,
0xf0,
0x7d,
0xec,
0x3a,
0xdc,
0x4d,
0x20,
0x79,
0xee,
0x5f,
0x3e,
0xd7,
0xcb,
0x39,
0x48,
]
# 系统参数
FK = [0xa3b1bac6, 0x56aa3350, 0x677d9197, 0xb27022dc]
# 固定参数
CK = [
0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, 0x70777e85, 0x8c939aa1,
0xa8afb6bd, 0xc4cbd2d9, 0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249,
0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9, 0xc0c7ced5, 0xdce3eaf1,
0xf8ff060d, 0x141b2229, 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299,
0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, 0x10171e25, 0x2c333a41,
0x484f565d, 0x646b7279
]
ENCRYPT = 0
DECRYPT = 1
class Crypt(object):
def __init__(self, mode=ENCRYPT):
self.sk = [0] * 32
self.mode = mode
@classmethod
def bb(cls, ka):
b = [0, 0, 0, 0]
a = put_uint32_be(ka)
b[0] = BOXES_TABLE[a[0]]
b[1] = BOXES_TABLE[a[1]]
b[2] = BOXES_TABLE[a[2]]
b[3] = BOXES_TABLE[a[3]]
bb = get_uint32_be(b[0:4])
return bb
# 计算圆形加密密钥
# args: [in] a: a is a 32 bits unsigned value;
# return: sk[i]: i{0,1,2,3,...31}.
@classmethod
def _round_key(cls, ka):
bb = cls.bb(ka)
rk = bb ^ (rotl(bb, 13)) ^ (rotl(bb, 23))
return rk
# 计算并获取加密/解密内容;
# args: [in] x0: 原始内容;
# args: [in] x1: 原始内容;
# args: [in] x2: 原始内容;
# args: [in] x3: 原始内容;
# args: [in] rk: 加密/解密密钥;
# 返回加密/解密内容的内容;
@classmethod
def _f(cls, x0, x1, x2, x3, rk):
# "T algorithm" == "L algorithm" + "t algorithm".
# args: [in] a: a is a 32 bits unsigned value;
# return: c:c用线性算法“L”和非线性算法“t”计算
def _sm4_l_t(ka):
bb = cls.bb(ka)
c = bb ^ (rotl(bb, 2)) ^ (rotl(bb, 10)) ^ (rotl(bb, 18)) ^ (rotl(bb, 24))
return c
return x0 ^ _sm4_l_t(x1 ^ x2 ^ x3 ^ rk)
def set_key(self, key, mode):
key = bytes_to_list(key)
MK = [0, 0, 0, 0]
MK[0] = get_uint32_be(key[0:4])
MK[1] = get_uint32_be(key[4:8])
MK[2] = get_uint32_be(key[8:12])
MK[3] = get_uint32_be(key[12:16])
k = [0] * 36
k[0:4] = xor(MK[0:4], FK[0:4])
for i in range(32):
k[i + 4] = k[i] ^ (self._round_key(k[i + 1] ^ k[i + 2] ^ k[i + 3] ^ CK[i]))
self.sk[i] = k[i + 4]
self.mode = mode
if mode == DECRYPT:
for idx in range(16):
t = self.sk[idx]
self.sk[idx] = self.sk[31 - idx]
self.sk[31 - idx] = t
def one_round(self, sk, in_put):
out_put = []
ul_buf = [0] * 36
ul_buf[0] = get_uint32_be(in_put[0:4])
ul_buf[1] = get_uint32_be(in_put[4:8])
ul_buf[2] = get_uint32_be(in_put[8:12])
ul_buf[3] = get_uint32_be(in_put[12:16])
for idx in range(32):
ul_buf[idx + 4] = self._f(
ul_buf[idx], ul_buf[idx + 1], ul_buf[idx + 2], ul_buf[idx + 3], sk[idx]
)
out_put += put_uint32_be(ul_buf[35])
out_put += put_uint32_be(ul_buf[34])
out_put += put_uint32_be(ul_buf[33])
out_put += put_uint32_be(ul_buf[32])
return out_put
def crypt_ecb(self, input_data):
# SM4-ECB块加密/解密
input_data = bytes_to_list(input_data)
if self.mode == ENCRYPT:
input_data = padding(input_data)
length = len(input_data)
i = 0
output_data = []
while length > 0:
output_data += self.one_round(self.sk, input_data[i:i + 16])
i += 16
length -= 16
if self.mode == DECRYPT:
return list_to_bytes(un_padding(output_data))
return list_to_bytes(output_data)
def crypt_cbc(self, iv, input_data):
# SM4-CBC缓冲区加密/解密
i = 0
output_data = []
tmp_input = [0] * 16
iv = bytes_to_list(iv)
if self.mode == ENCRYPT:
input_data = padding(bytes_to_list(input_data))
length = len(input_data)
while length > 0:
tmp_input[0:16] = xor(input_data[i:i + 16], iv[0:16])
output_data += self.one_round(self.sk, tmp_input[0:16])
iv = copy.deepcopy(output_data[i:i + 16])
i += 16
length -= 16
return list_to_bytes(output_data)
else:
length = len(input_data)
while length > 0:
output_data += self.one_round(self.sk, input_data[i:i + 16])
output_data[i:i + 16] = xor(output_data[i:i + 16], iv[0:16])
iv = copy.deepcopy(input_data[i:i + 16])
i += 16
length -= 16
return list_to_bytes(un_padding(output_data))
SM4_KEY = b'ED0Z2TCK2JN9SGV2'
SM4_IV = b'GM6PR0EL5TT4YUT6'
# 外部调用函数
def sm4_encrypt(value: str) -> str:
"""
加密数据并返回加密后数据
"""
sm = Crypt()
data = bytearray(value.encode('utf-8', 'ignore'))
sm.set_key(SM4_KEY, ENCRYPT)
digest = sm.crypt_cbc(SM4_IV, data)
digest = base64.b64encode(digest).decode('utf-8', 'ignore')
return digest
def sm4_decrypt(value: str) -> str:
"""
解密数据并返回解密后数据
"""
sm = Crypt()
data = base64.b64decode(value)
sm.set_key(SM4_KEY, DECRYPT)
digest = sm.crypt_cbc(SM4_IV, data)
return digest.decode('utf-8', 'ignore')
# 测试函数
def test():
key = b'KNN36H7F0MZB6RTW'
iv = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00' # bytes类型
crypt_sm4 = Crypt()
# value = b'Jll9496././' # bytes类型
value = bytearray("test明文".encode('utf-8'))
# 使用 CBC
crypt_sm4.set_key(key, ENCRYPT)
encrypt_value = crypt_sm4.crypt_cbc(iv, value) # bytes类型
encrypt_value = base64.b64encode(encrypt_value).decode('utf-8', 'ignore')
print(encrypt_value.upper()) # 把所有小写字母大写
encrypt_value = base64.b64decode(encrypt_value)
crypt_sm4.set_key(key, DECRYPT)
decrypt_value = crypt_sm4.crypt_cbc(iv, encrypt_value) # bytes类型
print(decrypt_value.decode('utf-8', 'ignore'))
# 使用 ECB
# crypt_sm4.set_key(key, ENCRYPT)
# encrypt_value = crypt_sm4.crypt_ecb(value) # bytes类型
# print(encrypt_value)
#
# crypt_sm4.set_key(key, DECRYPT)
# decrypt_value = crypt_sm4.crypt_ecb(encrypt_value) # bytes类型
# print(decrypt_value)
| 15.822894
| 108
| 0.632678
| 1,181
| 7,326
| 3.77138
| 0.367485
| 0.028738
| 0.028289
| 0.035025
| 0.316345
| 0.236641
| 0.201841
| 0.134037
| 0.121688
| 0.118096
| 0
| 0.189908
| 0.210074
| 7,326
| 462
| 109
| 15.857143
| 0.579748
| 0.104559
| 0
| 0.08794
| 0
| 0.002513
| 0.027401
| 0.009852
| 0
| 0
| 0.213054
| 0
| 0
| 1
| 0.030151
| false
| 0
| 0.007538
| 0
| 0.067839
| 0.005025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f86b378e72ad44c8909918ac3d29f4b3f63ef71
| 617
|
py
|
Python
|
question_bank/unique-paths/unique-paths.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 9
|
2020-08-12T10:01:00.000Z
|
2022-01-05T04:37:48.000Z
|
question_bank/unique-paths/unique-paths.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 1
|
2021-02-16T10:19:31.000Z
|
2021-02-16T10:19:31.000Z
|
question_bank/unique-paths/unique-paths.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 4
|
2020-08-12T10:13:31.000Z
|
2021-11-05T01:26:58.000Z
|
# -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:40 ms, 在所有 Python3 提交中击败了74.47% 的用户
内存消耗:13.8 MB, 在所有 Python3 提交中击败了7.95% 的用户
解题思路:
只能向右或向下前进。
则当前格的路径数等于左侧格的路径数+上侧格的路径数
dp[i][j] = dp[i-1][j] + dp[i][j-1]
例子:
1 1 1 1 1 1
1 2 3 4 5 6
1 3 6 10 15 21
1 4 10 20 35 56
"""
class Solution:
def uniquePaths(self, m: int, n: int) -> int:
dp = [[1 for _ in range(m)] for _ in range(n)]
for i in range(1, n):
for j in range(1, m):
dp[i][j] = dp[i-1][j] + dp[i][j-1]
return dp[-1][-1]
| 22.851852
| 54
| 0.458671
| 109
| 617
| 2.577982
| 0.458716
| 0.049822
| 0.053381
| 0.05694
| 0.124555
| 0.124555
| 0.099644
| 0.099644
| 0.099644
| 0.099644
| 0
| 0.145119
| 0.385737
| 617
| 26
| 55
| 23.730769
| 0.596306
| 0.552674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f8851b9c216915fb1f4051cf734644949f0036e
| 1,207
|
py
|
Python
|
crusoe_observe/ansible/roles/mlData/files/build-ml.py
|
CSIRT-MU/CRUSOE
|
73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b
|
[
"MIT"
] | 3
|
2021-11-09T09:55:17.000Z
|
2022-02-19T02:58:27.000Z
|
crusoe_observe/ansible/roles/mlData/files/build-ml.py
|
CSIRT-MU/CRUSOE
|
73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b
|
[
"MIT"
] | null | null | null |
crusoe_observe/ansible/roles/mlData/files/build-ml.py
|
CSIRT-MU/CRUSOE
|
73e4ac0ced6c3ac46d24ac5c3feb01a1e88bd36b
|
[
"MIT"
] | null | null | null |
import sys
import structlog
from osrest import Tcpml
import services_component
def build_os(dataset_path, model_path, logger):
logger.info(f"Loading OS dataset from \"{dataset_path}\".")
dataset = Tcpml.load_dataset(dataset_path)
logger.info(f"Building OS model.")
model = Tcpml.build_model(dataset)
logger.info(f"Storing OS model to \"{model_path}\".")
Tcpml.save_model(model, model_path)
def build_si(dataset_path, model_path, logger):
paths = {
"model": model_path,
"dataset": dataset_path,
"nbar": f"{services_component.__path__[0]}/data/si_nbar.json"
}
si = services_component.services.ServiceIdentifier(paths, ["0.0.0.0/0"], logger)
def main():
ml_data_path = sys.argv[1]
ml_model_path = sys.argv[2]
logger = structlog.PrintLogger()
logger.info("Starting OS model build.")
build_os(f"{ml_data_path}os_dataset.csv", f"{ml_model_path}os_model.pkl", logger)
logger.info("Finishing OS model build.")
logger.info("Starting SI model build.")
build_si(f"{ml_data_path}si_dataset.csv", f"{ml_model_path}si_model.pkl", logger)
logger.info("Finishing SI model build.")
if __name__ == "__main__":
main()
| 30.948718
| 85
| 0.697597
| 176
| 1,207
| 4.505682
| 0.238636
| 0.090794
| 0.06053
| 0.050441
| 0.204288
| 0.138714
| 0
| 0
| 0
| 0
| 0
| 0.007944
| 0.1657
| 1,207
| 38
| 86
| 31.763158
| 0.779543
| 0
| 0
| 0
| 0
| 0
| 0.295775
| 0.13256
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.133333
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f8911a3ffc8a10cc46f6545eeb625b8d7a7c1f6
| 4,049
|
py
|
Python
|
converter.py
|
TheSpiritXIII/Qt-Creator-TmTheme
|
3eba37c3712da9964e775a750732b6fda7cb6536
|
[
"Apache-2.0"
] | 1
|
2022-01-02T19:55:18.000Z
|
2022-01-02T19:55:18.000Z
|
converter.py
|
TheSpiritXIII/Qt-Creator-TmTheme
|
3eba37c3712da9964e775a750732b6fda7cb6536
|
[
"Apache-2.0"
] | null | null | null |
converter.py
|
TheSpiritXIII/Qt-Creator-TmTheme
|
3eba37c3712da9964e775a750732b6fda7cb6536
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import sys
import xml.etree.ElementTree as ET
def parse_value(element):
if element.tag == "string":
return element.text
elif element.tag == "dict":
return parse_dict(element)
elif element.tag == "array":
return parse_array(element)
else:
exception = "Unknown tag `" + element.tag + "`"
raise Exception(exception)
def parse_array(root):
sequence = []
for element in root:
if element.tag == "key":
exception = "Arrays must not have a key. Found key `" + element.text + "`"
raise Exception(exception)
else:
sequence.append(parse_value(element))
return sequence
def parse_dict(root):
lastKey = None;
sequence = {}
for element in root:
if element.tag == "key":
if lastKey:
exception = "Missing value for key `" + lastKey + "`"
raise Exception(exception)
lastKey = element.text
else:
if not lastKey:
exception = "Missing value for key after `" + lastKey + "`"
raise Exception(exception)
sequence[lastKey] = parse_value(element)
lastKey = None
return sequence
def parse_file(filename):
xml_file = open(filename, 'r')
file_contents = xml_file.read()
# Filter out all control characters.
mpa = dict.fromkeys(range(32))
file_contents = file_contents.translate(mpa)
return parse_dict(ET.fromstring(file_contents)[0])
def write_style(file, name, foreground, background, italic):
file.write("\t<style name=\"" + name + "\" ")
if foreground:
file.write("foreground=\"" + foreground + "\" ")
if background:
file.write("background=\"" + background + "\" ")
if italic:
file.write("italic=\"true\" ")
file.write("/>\n")
def create_file(filename, data):
f = open(filename, "w")
# f.write("<!-- Generated by Qt TmTheme Converter -->\n")
# f.write("<!-- Original file by " + data["author"] + ". -->\n")
f.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n")
f.write("<style-scheme version=\"1.0\" name=\"" + data["name"] + "\">\n")
if "gutterSettings" in data:
gutter_settings = data["gutterSettings"]
write_style(f, "LineNumber", gutter_settings["foreground"], None, False)
write_style(f, "DisabledCode", gutter_settings["foreground"], None, False)
key_map = {
"comment": ["Comment"],
"constant.numeric": ["Number"],
"entity.name.function": ["Function"],
"constant": ["Constant"],
"string": ["String"],
"keyword": ["Keyword", "Preprocessor"],
"keyword.operator": ["Operator"],
"variable": ["Field"],
# "storage": ["PrimitiveType"],
"storage.type": ["PrimitiveType"]
}
for setting in data["settings"]:
if "scope" in setting:
# print("Check: ", setting["scope"].split(","))
for scope in setting["scope"].split(","):
scope = scope.strip()
if scope in key_map:
full_settings = setting["settings"];
background = None
foreground = None
italics = False
if "foreground" in full_settings:
foreground = full_settings["foreground"]
if "background" in full_settings:
background = full_settings["background"]
if "fontStyle" in full_settings:
italics = full_settings["fontStyle"] == "italic"
for key in key_map[scope]:
write_style(f, key, foreground, background, italics)
elif "settings" in setting:
full_settings = setting["settings"];
write_style(f, "Text", full_settings["foreground"], full_settings["background"], False)
write_style(f, "Type", full_settings["foreground"], full_settings["background"], False)
write_style(f, "Enumeration", full_settings["foreground"], full_settings["background"], False)
write_style(f, "Selection", None, full_settings["selection"], False)
write_style(f, "CurrentLine", None, full_settings["lineHighlight"], False)
write_style(f, "VisualWhitespace", full_settings["invisibles"], None, False)
else:
raise Exception("Unknown setting type")
# f.write("\t\n")
f.write("</style-scheme>\n")
def main():
if len(sys.argv) != 3:
print("Invalid number of arguments. Must be: `converter.py input output`")
return
create_file(sys.argv[2], parse_file(sys.argv[1]))
main()
| 29.992593
| 97
| 0.667078
| 500
| 4,049
| 5.3
| 0.27
| 0.076981
| 0.037358
| 0.036226
| 0.17434
| 0.123019
| 0.097358
| 0.097358
| 0.097358
| 0.067925
| 0
| 0.003545
| 0.163991
| 4,049
| 134
| 98
| 30.216418
| 0.779321
| 0.065942
| 0
| 0.152381
| 0
| 0
| 0.240795
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.019048
| 0
| 0.152381
| 0.009524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f8eec6be049e9fe4a729f243ebe752e635be903
| 1,756
|
py
|
Python
|
rfim2d/tests/test_param_dict.py
|
lxh3/rfim2d
|
5283d0df492ad20ecef30b17803437ca9155f8b3
|
[
"MIT"
] | null | null | null |
rfim2d/tests/test_param_dict.py
|
lxh3/rfim2d
|
5283d0df492ad20ecef30b17803437ca9155f8b3
|
[
"MIT"
] | null | null | null |
rfim2d/tests/test_param_dict.py
|
lxh3/rfim2d
|
5283d0df492ad20ecef30b17803437ca9155f8b3
|
[
"MIT"
] | null | null | null |
from rfim2d import param_dict
key_dict = {
'A': ['Sigma', 'a', 'b'],
'dMdh': ['hMax', 'eta', 'a', 'b', 'c'],
'joint': ['rScale', 'rc', 'sScale', 'etaScale', 'df',
'lambdaH', 'B', 'C', 'F'],
'Sigma': ['rScale', 'rc', 'sScale', 'df', 'B', 'C'],
'eta': ['rScale', 'rc', 'etaScale', 'lambdaH', 'B', 'F']
}
powerlaw_key_dict = {
'joint': ['rScale', 'rc', 'sScale', 'etaScale', 'sigma', 'betaDelta'],
'Sigma': ['rScale', 'rc', 'sScale', 'sigma'],
'eta': ['rScale', 'rc', 'etaScale', 'betaDelta']
}
def test_split_dict():
adict = {'one': 1, 'two': 2}
keys, values = param_dict.split_dict(adict)
assert keys == ['one', 'two']
assert values == [1, 2]
assert param_dict.split_dict('test') == -1
def test_joint_dict():
keys = ['one', 'two']
values = [1, 2]
values_bad = [1, 2, 3]
assert isinstance(param_dict.join_dict(keys, values), dict)
assert param_dict.join_dict(keys, values_bad) == -1
def test_get_keys():
keys1 = param_dict.get_keys('A')
assert param_dict.get_keys('A', func_type='power law') == -1
keys3 = param_dict.get_keys('Sigma')
keys4 = param_dict.get_keys('Sigma', func_type='power law')
print(str(keys1)+str(keys3)+str(keys4))
def test_separate_params():
keys = param_dict.get_keys('joint')
values = [1. for i in range(len(keys))]
params = param_dict.join_dict(keys,values)
pS, pe = param_dict.separate_params(params)
return pS, pe
def test_generate_and_split_dict():
params = [1.0, 1.0]
keys = ['A', 'B', 'C']
fixed_dict = dict([('C', 0.)])
new_dict = param_dict.generate_dict_with_fixed_params(params, keys, fixed_dict)
vals = param_dict.split_dict_with_fixed_params(new_dict, fixed_dict)
| 30.807018
| 83
| 0.600228
| 249
| 1,756
| 4.004016
| 0.261044
| 0.126379
| 0.060181
| 0.080241
| 0.211635
| 0.081244
| 0
| 0
| 0
| 0
| 0
| 0.01763
| 0.192483
| 1,756
| 56
| 84
| 31.357143
| 0.685473
| 0
| 0
| 0
| 0
| 0
| 0.151481
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 1
| 0.113636
| false
| 0
| 0.022727
| 0
| 0.159091
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f90a5c7e193988dc43d8564c22a87b2b8ba9258
| 753
|
py
|
Python
|
populator/exercise_splitter.py
|
Calvibert/workout-generator
|
0c905a2132be4e0f440d8ecbfaba71592c0fe9e2
|
[
"MIT"
] | null | null | null |
populator/exercise_splitter.py
|
Calvibert/workout-generator
|
0c905a2132be4e0f440d8ecbfaba71592c0fe9e2
|
[
"MIT"
] | null | null | null |
populator/exercise_splitter.py
|
Calvibert/workout-generator
|
0c905a2132be4e0f440d8ecbfaba71592c0fe9e2
|
[
"MIT"
] | null | null | null |
# Upper-lower splitter for the exercise list
import sys
import exercise_populator_config as conf
print('Enter the file name: ')
filename = sys.stdin.readline()
filename = filename[0:len(filename)-1]
f = open(filename, 'r')
upper = conf.CONST_MUSCLES['upper']
lower = conf.CONST_MUSCLES['lower']
uex = []
lex = []
for ex in f:
i = ex.find(',')
t = ex[i+2:].rstrip()
if t in upper:
uex.append(ex.rstrip())
continue
lex.append(ex.rstrip())
upper_filename = 'upper.txt'
lower_filename = 'lower.txt'
o_stdout = sys.stdout
f = open(upper_filename, 'w+')
sys.stdout = f
for i in uex:
print(i)
f.close()
f = open(lower_filename, 'w+')
sys.stdout = f
for i in lex:
print(i)
sys.stdout = o_stdout
f.close()
| 16.733333
| 44
| 0.648074
| 119
| 753
| 4.016807
| 0.378151
| 0.075314
| 0.062762
| 0.075314
| 0.104603
| 0.104603
| 0.104603
| 0.104603
| 0
| 0
| 0
| 0.004983
| 0.200531
| 753
| 44
| 45
| 17.113636
| 0.789037
| 0.055777
| 0
| 0.193548
| 0
| 0
| 0.077574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.064516
| 0.096774
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f9197c39f4c2b4b9b35a18f55ab839142699e80
| 4,893
|
py
|
Python
|
fbpmp/pcf/mpc/emp.py
|
benliugithub/fbpcs
|
7af984264428058645847135026d474d7e28144e
|
[
"MIT"
] | null | null | null |
fbpmp/pcf/mpc/emp.py
|
benliugithub/fbpcs
|
7af984264428058645847135026d474d7e28144e
|
[
"MIT"
] | null | null | null |
fbpmp/pcf/mpc/emp.py
|
benliugithub/fbpcs
|
7af984264428058645847135026d474d7e28144e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env/python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import asyncio
import logging
import os
import pathlib
import shutil
from typing import Dict, List
from fbpmp.pcf import call_process
from fbpmp.pcf.errors import MPCRuntimeError, MPCStartupError
from fbpmp.pcf.games import (
ConversionLift,
ConverterLift,
SecretShareConversionLift,
SecretShareConverterLift,
)
from fbpmp.pcf.mpc.base import ServerClientMPCFramework
from fbpmp.pcf.structs import Game, Metric, Status
EMP_GAME_DIR = pathlib.Path(os.environ.get("EMP_GAME_DIR", os.getcwd()))
MAX_ROWS_PER_PARTITION = 1000000 # 1 million
class EmpMPCFramework(ServerClientMPCFramework):
"""
Implementation of EMP SH2PC MPC Framework
https://github.com/emp-toolkit/emp-sh2pc
"""
SUPPORTED_GAMES: List[Game] = [
ConversionLift,
ConverterLift,
SecretShareConversionLift,
SecretShareConverterLift,
]
async def prepare_input(self) -> Status:
# We purposefully do not want to use the base class's prepare_input
# method since it will sort the input which breaks the secret_share
# game logic (since IDs won't appear to match).
return Status.OK
async def run_mpc(self) -> Dict[str, Dict[Metric, int]]:
"""
Run the MPC game as the given player.
"""
logger = logging.getLogger(
f"EmpMPCFramework <Game:{self.game.name}> <{self.player.role!s}>"
)
game_path = EMP_GAME_DIR / self.game.base_game
game_path_absolute = game_path.absolute()
self._check_executable(game_path_absolute)
if len(self.other_players) != 0:
# pre_setup should have validated this, but we put another check
# here just to reinforce the invariant.
if len(self.other_players) != 1:
raise ValueError(
f"Must be run with exactly one other player, not {len(self.other_players)}"
)
other_player = self.other_players[0]
ip_address = other_player.ip_address
port = other_player.port
else:
ip_address = self.player.ip_address
port = self.player.port
cmd = (
f"{game_path_absolute} --role={self.player.id}"
f" --data_directory={self.input_file.parent.absolute()}"
f" --input_filename={self.input_file.name}"
f" --server_ip={ip_address}"
f" --port={port}"
f" --output_filename={self.output_file}"
)
if self.output_s3_path:
cmd = cmd + f" --output_s3_path={self.output_s3_path}"
cmd = cmd.split(" ") + self.game.extra_args
self.base_logger.debug(f"running command: {cmd}")
try:
operating_dir = pathlib.Path(os.getcwd())
result = await asyncio.wait_for(
call_process.run_command(cmd, operating_dir, logger=logger),
timeout=self.run_timeout,
)
except Exception as e:
# TODO: Should log e and raise an MPCRuntimeError instead
raise e
if result.returncode != 0:
raise MPCRuntimeError(result.returncode)
# At this point, assuming everything went correctly, we should have a
# File with one result per line
result_filepath = self.input_file.parent / self.output_file
all_results: Dict[str, Dict[Metric, int]] = {}
with open(result_filepath) as f:
for line in f.readlines():
if len(line) == 0:
# For some reason, we sometimes read an empty line from the
# output of the EMP MPC program in the result file.
continue
parts = line.strip().split(",")
feature_group = parts[0]
contents = [int(field) for field in parts[1:]]
all_results[feature_group] = {
metric: value
for metric, value in zip(self.game.output_metrics, contents)
}
return all_results
def _check_executable(self, absolute_path: pathlib.Path) -> None:
self.base_logger.debug(f"Checking {absolute_path} is executable.")
if shutil.which(absolute_path) is None:
raise MPCStartupError(f"Executable {absolute_path} not found.")
def _check_file_exists(self, absolute_path: pathlib.Path) -> None:
self.base_logger.debug(f"Checking {absolute_path} exists.")
if not os.path.isfile(absolute_path):
raise MPCStartupError(f"File {absolute_path} not found.")
@staticmethod
def get_max_rows_per_partition() -> int:
return MAX_ROWS_PER_PARTITION
| 35.977941
| 95
| 0.625179
| 598
| 4,893
| 4.961538
| 0.35786
| 0.032356
| 0.020222
| 0.019211
| 0.097068
| 0.06269
| 0.04786
| 0.04786
| 0.04786
| 0.04786
| 0
| 0.006014
| 0.286327
| 4,893
| 135
| 96
| 36.244444
| 0.843643
| 0.168404
| 0
| 0.086957
| 0
| 0
| 0.141203
| 0.070979
| 0
| 0
| 0
| 0.007407
| 0
| 1
| 0.032609
| false
| 0
| 0.119565
| 0.01087
| 0.206522
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f939a72fbb64e7dc423500b36e371b897a8fc9b
| 2,168
|
py
|
Python
|
01_Plots/plot_time_differences.py
|
awareseven/Reproducibility-and-Replicability-of-Web-Measurement-Studies
|
38953c70a9ab03e1d29e4f9c6da13ffcaaeac84b
|
[
"Apache-2.0"
] | 3
|
2022-01-27T07:36:24.000Z
|
2022-02-22T09:32:53.000Z
|
01_Plots/plot_time_differences.py
|
awareseven/Reproducibility-and-Replicability-of-Web-Measurement-Studies
|
38953c70a9ab03e1d29e4f9c6da13ffcaaeac84b
|
[
"Apache-2.0"
] | null | null | null |
01_Plots/plot_time_differences.py
|
awareseven/Reproducibility-and-Replicability-of-Web-Measurement-Studies
|
38953c70a9ab03e1d29e4f9c6da13ffcaaeac84b
|
[
"Apache-2.0"
] | 1
|
2022-02-02T08:21:39.000Z
|
2022-02-02T08:21:39.000Z
|
import matplotlib.font_manager as font_manager
import matplotlib.pyplot as plt
import pandas as pd
import os
# Read the data
path = os.path.join(os.getcwd(), "results")
df = pd.read_csv(os.path.join(path, "tracker_AND_cookies.csv"))
x = df["day"]
y1 = df["total_tracker"]
y2 = df["tracker_distinct"]
y3 = df["is_session"]
# Some styling stuff
fig, ax = plt.subplots(1, figsize=(7, 4))
legend_properties = {'weight': 'bold', 'size': 9}
font = font_manager.FontProperties(family='sans-serif',
weight='bold',
style='normal',
size=14)
plt.legend(loc='best', frameon=False, prop=font)
plt.xticks(weight='bold', fontname='sans-serif', size=14)
plt.yticks(weight='bold', fontname='sans-serif', size=14)
plt.xlabel("Measurement point", weight='bold', fontname='sans-serif', size=14)
# Add first y-axis (Number of tracking requests)
ax.plot(x, y1, color="#999999", label="Number of tracking requests", marker='o', linestyle='dashed')
ax.set_ylabel('Number of tracking requests')
ax.legend(loc=2, prop=legend_properties)
plt.ylabel("Number of tracking requests", weight='bold', fontname='sans-serif', size=14)
# Add second y-axis
ax2 = ax.twinx() # instantiate a second axes that shares the same x-axis
ax2.plot(x, y2, color="#555555", label="Number of distinct trackers", marker='x', linestyle='solid')
ax2.set_ylabel('Number of distinct trackers')
ax2.set_ylim(3500, 4200)
ax2.legend(loc=1, prop=legend_properties)
plt.ylabel("Number of distinct trackers", weight='bold', fontname='sans-serif', size=14)
plt.yticks(weight='bold', fontname='sans-serif')
# Save plot to disc
plt.grid(False)
#plt.show()
plt.savefig(path + "/04_long_term_tracker_cookies.pdf", dpi=600,
transparent=False, bbox_inches='tight', format="pdf")
# Simple min / max calculations
max_value = y1.max()
min_value = y1.min()
max_day = y1.index[df['total_tracker'] == max_value].tolist()
min_day = y1.index[df['total_tracker'] == min_value].tolist()
print("Max at: ", max_day, "max value: ", max_value)
print("Min at: ", min_day, "min value: ", min_value)
print("std:", y1.std())
| 37.37931
| 100
| 0.683579
| 324
| 2,168
| 4.475309
| 0.388889
| 0.055172
| 0.074483
| 0.091034
| 0.317931
| 0.235172
| 0.202069
| 0.151034
| 0.095172
| 0.095172
| 0
| 0.031539
| 0.151753
| 2,168
| 57
| 101
| 38.035088
| 0.756933
| 0.096402
| 0
| 0
| 0
| 0
| 0.261026
| 0.028718
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.097561
| 0
| 0.097561
| 0.073171
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
2f9feffcaa4a8285a2abe800ba2837e256eb6e2b
| 2,636
|
py
|
Python
|
nebula_utils/nebula_utils/persist_compute/utils.py
|
threathunterX/python_lib
|
e2d4052de04c82cb7bccd08042f28db824cab442
|
[
"Apache-2.0"
] | 2
|
2019-03-17T04:03:08.000Z
|
2019-05-01T09:42:23.000Z
|
nebula_utils/nebula_utils/persist_compute/utils.py
|
threathunterX/python_lib
|
e2d4052de04c82cb7bccd08042f28db824cab442
|
[
"Apache-2.0"
] | null | null | null |
nebula_utils/nebula_utils/persist_compute/utils.py
|
threathunterX/python_lib
|
e2d4052de04c82cb7bccd08042f28db824cab442
|
[
"Apache-2.0"
] | 4
|
2019-06-24T05:47:24.000Z
|
2020-09-29T05:00:31.000Z
|
# -*- coding: utf-8 -*-
Group_Key_To_Dimension = dict(
c_ip = 'ip',
uid = 'user',
page = 'page',
did = 'did',
# c_ipc = 'ipc',
)
Avail_Dimensions = tuple(Group_Key_To_Dimension.values())
# dimension : variable_name(获取点击量的变量名)
Click_Variable_Names = dict(
ip='ip__visit__dynamic_count__1h__slot',
did='did__visit__dynamic_count__1h__slot',
user='user__visit__dynamic_count__1h__slot',
page='page__visit__dynamic_count__1h__slot'
)
IP_Stat_Type = 2
IPC_Stat_Type = 3
DID_Stat_Type = 4
UID_Stat_Type = 5
PAGE_Stat_Type = 6
Dimension_Stat_Prefix = dict(
ip = IP_Stat_Type,
ipc = IPC_Stat_Type,
did = DID_Stat_Type,
user = UID_Stat_Type,
page = PAGE_Stat_Type,
)
Category = ['VISITOR', 'ACCOUNT', 'ORDER',
'TRANSACTION', 'MARKETING', 'OTHER']
Scene_Variable_Names = dict(
VISITOR='total__visit__visitor_incident_count__1h__slot',
ACCOUNT='total__visit__account_incident_count__1h__slot',
ORDER='total__visit__order_incident_count__1h__slot',
TRANSACTION='total__visit__transaction_incident_count__1h__slot',
MARKETING='total__visit__marketing_incident_count__1h__slot',
OTHER='total__visit__other_incident_count__1h__slot'
)
def get_dimension(group_key_name):
"""
根据groupby的key获取对应统计Stat_Dict中维度的key值
"""
return Group_Key_To_Dimension.get(group_key_name, None)
def dict_merge(src_dict, dst_dict):
"""
将两个dict中的数据对应键累加,
不同类型值的情况:
>>> s = dict(a=1,b='2')
>>> d = {'b': 3, 'c': 4}
>>> dict_merge(s,d)
>>> t = {'a': 1, 'b': 5, 'c': 4}
>>> s == t
True
>>> s = dict(a=set([1,2]), )
>>> d = dict(a=set([2, 3]),)
>>> dict_merge(s,d)
>>> t = {'a':set([1,2,3])}
>>> s == t
True
>>> s = dict(a={'a':1, 'b':2})
>>> d = dict(a={'a':1, 'b':2})
>>> dict_merge(s, d)
>>> t = dict(a={'a':2, 'b':4})
>>> s == t
True
"""
for k,v in dst_dict.iteritems():
if not src_dict.has_key(k):
src_dict[k] = v
else:
if isinstance(v, (basestring, int, float)):
src_dict[k] = int(v) + int(src_dict[k])
elif isinstance(v, set):
assert type(v) == type(src_dict[k]), 'key %s,dst_dict value: %s type: %s, src_dict value: %s type:%s' % (k, v, type(v), src_dict[k], type(src_dict[k]))
src_dict[k].update(v)
elif isinstance(v, dict):
assert type(v) == type(src_dict[k]), 'key %s,dst_dict value: %s type: %s, src_dict value: %s type:%s' % (k, v, type(v), src_dict[k], type(src_dict[k]))
dict_merge(src_dict[k], v)
| 28.967033
| 167
| 0.60091
| 389
| 2,636
| 3.645244
| 0.213368
| 0.074048
| 0.062059
| 0.080395
| 0.248942
| 0.171368
| 0.126939
| 0.126939
| 0.126939
| 0.126939
| 0
| 0.017822
| 0.233687
| 2,636
| 90
| 168
| 29.288889
| 0.684158
| 0.194234
| 0
| 0.04
| 0
| 0.04
| 0.297767
| 0.20794
| 0
| 0
| 0
| 0
| 0.04
| 1
| 0.04
| false
| 0
| 0
| 0
| 0.06
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85cb5db8536dff080788a2b44e8c7498ab0bd3f3
| 2,649
|
py
|
Python
|
course_grader/dao/message.py
|
uw-it-aca/gradepage
|
7059d715cc112ad0ecb0e5012f716e525ee7b3bc
|
[
"Apache-2.0"
] | 1
|
2017-01-29T09:52:06.000Z
|
2017-01-29T09:52:06.000Z
|
course_grader/dao/message.py
|
uw-it-aca/gradepage
|
7059d715cc112ad0ecb0e5012f716e525ee7b3bc
|
[
"Apache-2.0"
] | 287
|
2017-03-09T00:17:20.000Z
|
2022-01-08T00:36:34.000Z
|
course_grader/dao/message.py
|
uw-it-aca/gradepage
|
7059d715cc112ad0ecb0e5012f716e525ee7b3bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from course_grader.dao import current_datetime, display_datetime
from course_grader.dao.term import (
next_gradable_term, previous_gradable_term, submission_deadline_warning,
is_grading_period_open)
from persistent_message.models import Message
def get_open_grading_messages(term, params={}):
tags = ["is_open"]
rel_grade_submission_deadline = ""
if submission_deadline_warning(term):
tags.append("just_before_deadline")
delta = term.grade_submission_deadline - current_datetime()
seconds_remaining = (delta.days * 24 * 3600) + delta.seconds
if seconds_remaining < (17 * 3600):
rel_grade_submission_deadline = "5:00 PM today"
elif seconds_remaining < (41 * 3600):
rel_grade_submission_deadline = "5:00 PM tomorrow"
params.update({
"year": term.year,
"quarter": term.get_quarter_display(),
"grade_submission_deadline": term.grade_submission_deadline,
"rel_grade_submission_deadline": rel_grade_submission_deadline,
})
return _get_persistent_messages(tags, params)
def get_closed_grading_messages(params={}):
prev_term = previous_gradable_term()
next_term = next_gradable_term()
if next_term.quarter == next_term.SUMMER:
next_open_date = next_term.aterm_grading_period_open
else:
next_open_date = next_term.grading_period_open
params.update({
"prev_year": prev_term.year,
"prev_quarter": prev_term.get_quarter_display(),
"prev_window_close_date": display_datetime(
prev_term.grade_submission_deadline),
"next_year": next_term.year,
"next_quarter": next_term.get_quarter_display(),
"next_window_open_date": display_datetime(next_open_date),
"grade_submission_deadline": prev_term.grade_submission_deadline,
})
if (next_term.first_day_quarter < current_datetime().date()):
tags = ["is_closed"]
else:
tags = ["just_after_deadline"]
return _get_persistent_messages(tags, params)
def get_messages_for_term(term, params={}):
if is_grading_period_open(term):
return get_open_grading_messages(term, params)
else:
return get_closed_grading_messages(params)
def _get_persistent_messages(tags, params):
ret = {"messages": []}
for message in Message.objects.active_messages(tags=tags):
if "message_level" not in ret:
ret["message_level"] = message.get_level_display().lower()
ret["messages"].append(message.render(params))
return ret
| 35.797297
| 76
| 0.710834
| 327
| 2,649
| 5.348624
| 0.256881
| 0.133791
| 0.144654
| 0.074328
| 0.281875
| 0.173242
| 0.13665
| 0.098342
| 0.058319
| 0
| 0
| 0.014045
| 0.193658
| 2,649
| 73
| 77
| 36.287671
| 0.804775
| 0.030955
| 0
| 0.157895
| 0
| 0
| 0.117395
| 0.047582
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.052632
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85cb84708ec1159fcbafba9f83ab692e7fdf9668
| 4,541
|
py
|
Python
|
swn/file.py
|
wkitlasten/surface-water-network
|
fd36ad5ee3fbd7a1107f0c4c376c4af1295b5b1b
|
[
"BSD-3-Clause"
] | 18
|
2019-12-04T14:59:47.000Z
|
2021-12-21T12:34:28.000Z
|
swn/file.py
|
jonathanqv/surface-water-network
|
362217c897345042464564440be08b34f6f0915d
|
[
"BSD-3-Clause"
] | 17
|
2020-04-15T04:49:49.000Z
|
2022-03-04T05:22:17.000Z
|
swn/file.py
|
jonathanqv/surface-water-network
|
362217c897345042464564440be08b34f6f0915d
|
[
"BSD-3-Clause"
] | 6
|
2020-05-07T23:56:12.000Z
|
2022-01-08T16:56:32.000Z
|
"""File reading/writing helpers."""
__all__ = ["topnet2ts", "gdf_to_shapefile"]
import geopandas
import pandas as pd
from swn.logger import get_logger, logging
def topnet2ts(nc_path, varname, mult=None, log_level=logging.INFO):
"""Read TopNet data from a netCDF file into a pandas.DataFrame timeseries.
User may need to multiply DataFrame to convert units.
Parameters
----------
nc_path : str
File path to netCDF file
varname : str
Variable name in netCDF file to read
mult : float, optional
Multiplier applied to dataset, which preserves dtype. For example,
to convert from "meters3 second-1" to "meters3 day-1", use 86400.
verbose : int, optional
Level used by logging module; default is 20 (logging.INFO)
Returns
-------
pandas.DataFrame
Where columns is rchid and index is DatetimeIndex.
"""
try:
from netCDF4 import Dataset
except ImportError:
raise ImportError('function requires netCDF4')
try:
from cftime import num2pydate as n2d
except ImportError:
from cftime import num2date as n2d
logger = get_logger("topnet2ts", log_level)
logger.info("reading file: %s", nc_path)
with Dataset(nc_path, "r") as nc:
nc.set_auto_mask(False)
var = nc.variables[varname]
logger.info("variable %s:\n%s", varname, var)
# Evaluate dimensions
dim_has_time = False
dim_has_nrch = False
dim_ignore = []
varslice = [Ellipsis] # take first dimensions
for name, size in zip(var.dimensions, var.shape):
if name == "time":
dim_has_time = True
elif name == "nrch":
dim_has_nrch = True
elif size == 1:
dim_ignore.append(name)
varslice.append(0)
if not dim_has_time:
logger.error("no 'time' dimension found")
if not dim_has_nrch:
logger.error("no 'nrch' dimension found")
if dim_ignore:
logger.info("ignoring size 1 dimensions: %s", dim_ignore)
dat = var[tuple(varslice)]
if len(dat.shape) != 2:
logger.error("expected 2 dimensions, found shape %s", dat.shape)
if dim_has_time and var.dimensions.index("time") == 1:
dat = dat.T
if mult is not None and mult != 1.0:
dat *= mult
df = pd.DataFrame(dat)
df.columns = nc.variables["rchid"]
time_v = nc.variables["time"]
df.index = pd.DatetimeIndex(n2d(time_v[:], time_v.units))
logger.info("data successfully read")
return df
def gdf_to_shapefile(gdf, shp_fname, **kwargs):
"""Write any GeoDataFrame to a shapefile.
This is a workaround to the to_file method, which cannot save
GeoDataFrame objects with other data types, such as set.
Parameters
----------
gdf : geopandas.GeoDataFrame
GeoDataFrame to export
shp_fname : str
File path for output shapefile
kwargs : mapping
Keyword arguments passed to to_file and to fiona.open
Returns
-------
None
"""
if not isinstance(gdf, geopandas.GeoDataFrame):
raise ValueError("expected gdf to be a GeoDataFrame")
gdf = gdf.copy()
geom_name = gdf.geometry.name
for col, dtype in gdf.dtypes.iteritems():
if col == geom_name:
continue
if dtype == object:
is_none = gdf[col].map(lambda x: x is None)
gdf[col] = gdf[col].astype(str)
gdf.loc[is_none, col] = ""
elif dtype == bool:
gdf[col] = gdf[col].astype(int)
# potential names that need to be shortened to <= 10 characters for DBF
colname10 = {
"to_segnum": "to_seg",
"from_segnums": "from_seg",
"num_to_outlet": "num_to_out",
"dist_to_outlet": "dst_to_out",
"stream_order": "strm_order",
"upstream_length": "upstr_len",
"upstream_area": "upstr_area",
"inflow_segnums": "inflow_seg",
"zcoord_count": "zcoord_num",
"zcoord_first": "zcoordfrst",
"zcoord_last": "zcoordlast",
"strtop_incopt": "stpincopt",
"prev_ibound": "previbound",
"prev_idomain": "prevdomain",
}
for k, v in list(colname10.items()):
assert len(v) <= 10, v
if k == v or k not in gdf.columns:
del colname10[k]
gdf.rename(columns=colname10).reset_index(drop=False)\
.to_file(str(shp_fname), **kwargs)
| 32.435714
| 78
| 0.602731
| 579
| 4,541
| 4.594128
| 0.381693
| 0.015789
| 0.015038
| 0.008271
| 0.013534
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012741
| 0.291346
| 4,541
| 139
| 79
| 32.669065
| 0.81386
| 0.245761
| 0
| 0.046512
| 0
| 0
| 0.179933
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 1
| 0.023256
| false
| 0
| 0.104651
| 0
| 0.139535
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85cc4f7ba3e6215d40e3cc9668b7b4fc514ab919
| 5,752
|
py
|
Python
|
assignment4/src/clean_documents.py
|
jschmidtnj/cs584
|
d1d4d485d1fac8743cdbbc2996792db249dcf389
|
[
"MIT"
] | null | null | null |
assignment4/src/clean_documents.py
|
jschmidtnj/cs584
|
d1d4d485d1fac8743cdbbc2996792db249dcf389
|
[
"MIT"
] | null | null | null |
assignment4/src/clean_documents.py
|
jschmidtnj/cs584
|
d1d4d485d1fac8743cdbbc2996792db249dcf389
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
data clean for books (clean_documents.py)
note - this is the same as in assignment 1 for the most part
"""
import re
from ast import literal_eval
from os.path import basename, splitext, exists
from typing import Optional, List
from utils import get_glob, file_path_relative
from variables import part_1_data_folder, clean_data_folder, class_key, label_key, paragraph_key
from loguru import logger
from books import BookType, start_end_map, class_map
import pandas as pd
from typing import Tuple
import yaml
title_split: str = 'title: '
author_split: str = 'author: '
start_book: str = 'start of this project gutenberg ebook'
the_end: str = 'the end'
end_book: str = 'end of this project gutenberg ebook'
chapter: str = 'Chapter '
adventure: str = 'ADVENTURE '
multi_quote_identifier: str = '"'
min_line_len: int = 6 # line discarded if less than this number of characters
default_file_name: str = f'{clean_data_folder}/documents.csv'
classes_file_name: str = f'{clean_data_folder}/doc_classes.txt'
whitespace_regex = re.compile(r"\s+")
def normalize_sentence(sentence: str) -> str:
"""
remove punctuation, return list of words
"""
sentence = whitespace_regex.sub(' ', sentence).strip()
return sentence
def clean(clean_data_basename: Optional[str] = default_file_name) -> Tuple[pd.DataFrame, List[BookType]]:
"""
data cleaning
"""
class_count: int = 0
label_list: List[BookType] = []
get_from_disk = clean_data_basename is not None
if not get_from_disk:
clean_data_basename = default_file_name
clean_data_path = file_path_relative(clean_data_basename)
classes_path = file_path_relative(classes_file_name)
if get_from_disk and exists(clean_data_path) and exists(classes_path):
logger.info(f'reading data from {clean_data_path}')
data = pd.read_csv(clean_data_path, converters={
paragraph_key: literal_eval})
label_list_enum: Optional[List[BookType]] = None
with open(classes_path) as classes_file:
label_list = yaml.load(classes_file, Loader=yaml.FullLoader)
label_list_enum = [BookType(elem) for elem in label_list]
return data, label_list_enum
data: pd.DataFrame = pd.DataFrame()
# preprocess data and construct examples
found_files: bool = False
for file_path in get_glob(f'{part_1_data_folder}/*.txt'):
found_files = True
file_name: str = basename(splitext(file_path)[0])
logger.info(f'processing {file_name}')
title: Optional[str] = None
book_key: Optional[BookType] = None
book_started: bool = False
paragraphs: List[List[str]] = []
num_newline_count: int = 0
line_number: int = 0
with open(file_path, 'r') as current_file:
while True:
line = current_file.readline()
line_number += 1
line_trim: Optional[str] = None
if line:
line_trim = line.strip()
if not book_started and \
((line_trim is not None and line_trim.startswith(start_book))
or (book_key is not None and line_number >= start_end_map[book_key].start)):
book_started = True
if line_trim is None or line_trim.startswith(end_book) \
or line_trim == the_end or \
(book_key is not None and line_number >= start_end_map[book_key].end):
# done with reading the file
break
if not book_started:
if title is None and line_trim.startswith(title_split):
title = line_trim.split(title_split)[1]
logger.info(f'title: {title}')
if book_key is None and line_trim.startswith(author_split):
author: str = line_trim.split(author_split)[1]
logger.info(f'author: {author}')
book_key = BookType(author.split(' ')[-1])
else:
if len(line_trim) < min_line_len or \
line.startswith(chapter) or line.startswith(chapter):
num_newline_count += 1
else:
multi_line_quotes = line_trim.startswith(multi_quote_identifier) \
and paragraphs[-1][0].startswith(multi_quote_identifier)
if len(paragraphs) == 0 or \
(num_newline_count > 0 and not multi_line_quotes):
paragraphs.append([])
num_newline_count = 0
paragraphs[-1].append(line_trim)
if not found_files:
raise RuntimeError('no files found')
if book_key is None:
raise RuntimeError('no book key found')
class_name = class_map[book_key]
logger.info(
f'number of paragraphs in class "{class_name}": {len(paragraphs)}')
paragraphs = [[normalize_sentence(sentence) for sentence in paragraph] for paragraph in paragraphs]
data = pd.concat([data, pd.DataFrame({
paragraph_key: paragraphs,
label_key: [class_name] * len(paragraphs),
class_key: class_count
})], ignore_index=True)
label_list.append(book_key)
class_count += 1
data.to_csv(clean_data_path, index=False)
with open(classes_path, 'w') as classes_file:
label_list_str = [elem.name for elem in label_list]
yaml.dump(label_list_str, classes_file)
return data, label_list
if __name__ == '__main__':
clean()
| 39.129252
| 107
| 0.619784
| 734
| 5,752
| 4.592643
| 0.211172
| 0.033225
| 0.019282
| 0.010679
| 0.143578
| 0.077722
| 0.04509
| 0.029071
| 0.029071
| 0.029071
| 0
| 0.005176
| 0.29468
| 5,752
| 146
| 108
| 39.39726
| 0.825733
| 0.052156
| 0
| 0.017857
| 0
| 0
| 0.074478
| 0.017372
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0
| 0.098214
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85ccd6d8d9bc17b243d312e04343cd6c75bdd27f
| 6,041
|
py
|
Python
|
miniproject/api/organization/views.py
|
dandy7373/HR_web
|
65dd80159c7e3113961d55ef126b7df75c7bda13
|
[
"MIT"
] | null | null | null |
miniproject/api/organization/views.py
|
dandy7373/HR_web
|
65dd80159c7e3113961d55ef126b7df75c7bda13
|
[
"MIT"
] | null | null | null |
miniproject/api/organization/views.py
|
dandy7373/HR_web
|
65dd80159c7e3113961d55ef126b7df75c7bda13
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from rest_framework.generics import RetrieveAPIView,CreateAPIView
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST,HTTP_201_CREATED
from rest_framework.views import APIView
from .models import UserOrganization
from api.individual.models import Userprofile
from .serializers import UserOrganizationRegistrationSerializer,OrganizationLoginSerializer, OrganizationSerializer
from bson import ObjectId
class OrganizationLoginView(APIView):
permission_classes=[(AllowAny)]
def get_object(self):
print(self.request.data)
return UserOrganization.objects.all()
def post(self,request):
success={"success":"True"}
failure={"success":"False"}
try :
obj=UserOrganization.objects.get(email=str(request.data['email']))
if obj.password==str(request.data['password']):
return Response(success,status=HTTP_200_OK)
else:
return Response(failure,status=HTTP_400_BAD_REQUEST)
except:
return Response(failure,status=HTTP_400_BAD_REQUEST)
class OrganizationRegistrationView(APIView):
permission_classes=[AllowAny]
def post(self,request,*args,**kwargs):
if 'email' not in request.data and 'password' not in request.data and 'name' not in request.data and 'name_org' not in request.data:
return Response({"success":False},status=HTTP_400_BAD_REQUEST)
print(str(request.data['email']))
dic={}
for i in request.data:
dic.update({i:str(request.data[i])})
user=UserOrganization.objects.create(**dic)
response = {
"success": "True",
"status code": HTTP_200_OK,
"message": "User registered successfully",
}
return Response(response, status=HTTP_200_OK)
class ApproveLeaveView(APIView):
permission_classes=[AllowAny]
def post(self,request,*args,**kwargs):
user=UserOrganization.objects.get(email=request.data['to_email'])
individual=Userprofile.objects.get(email=request.data['from_email'])
leaves=list(user.Leaves_to_be_approved)
leave=leaves[int(request.data['index'])]['completed']
leaves.pop(int(request.data['index']))
user.Leaves_to_be_approved=leaves
user.save()
lis=list(individual.leave)
print(request.data)
print(lis)
index=-1
for i in lis:
if i['from_date']==request.data['from_date'] and i['to_date']==request.data['to_date']:
index=lis.index(i)
break
if index==-1:
return Response({"success":"False"},HTTP_400_BAD_REQUEST)
lis[index]['completed']="True"
print(lis)
individual.leave=lis
individual.save()
return Response({"success":"True"},HTTP_200_OK)
class AssignWorkView(APIView):
permission_classes=[AllowAny]
def post(self,request,*args,**kwargs):
user=UserOrganization.objects.get(email=request.data['email'])
print(user.employees)
print(user.employees is None)
if user.employees is None:
lis=[]
return Response('No employees',HTTP_400_BAD_REQUEST)
else:
lis=list(user.employees)
for i in range(len(lis)):
try:
ind=Userprofile.objects.get(_id=ObjectId(lis[i]['_id']))
print(ind)
if ind.workassigned is None:
work=[]
else:
work=list(ind.workassigned)
print(True)
dic={}
for i in request.data:
dic[i]=request.data[i][0]
print(dic)
work.append(dic)
ind.workassigned=work
print('yes')
ind.save()
print('all')
except:
return Response({"success":"False"},HTTP_400_BAD_REQUEST)
if user.work_assigned is None:
assigned=[]
else:
assigned=list(user.work_assigned)
assigned.append(request.data)
print(assigned)
user.work_assigned=assigned
user.save()
print(user.work_assigned)
return Response({"success":"True"},HTTP_200_OK)
class AddEmployeeView(APIView):
permission_classes=[AllowAny,]
def post(self,request,*args,**kwargs):
print(request.data)
dic={}
from_email=request.data['from_email']
for i in request.data:
if i!='from_email':
dic[i]=request.data[i]
dic['created_by']=from_email
print(dic)
try:
ind=Userprofile.objects.create(**dic)
user=UserOrganization.objects.get(email=from_email)
lis=[]
if user.employees is None:
lis.append(ind._id)
else:
lis=list(user.employees)
user.save()
return Response({'success':'True'},HTTP_200_OK)
except:
return Response({'success':'False'},HTTP_400_BAD_REQUEST)
class GetLeaves(APIView):
def get(self,request,*args,**kwargs):
try:
print(request.GET)
print(kwargs)
user=UserOrganization.objects.get(email=request.GET.get('email'))
print(user)
return Response({'leaves':user.Leaves_to_be_approved,'success':"True"},HTTP_200_OK)
except:
return Response({'success':False},HTTP_400_BAD_REQUEST)
class GetWorks(APIView):
def get(self,request,**kwargs):
print(request.GET.get('email'))
try:
user=UserOrganization.objects.get(email=request.GET.get('email'))
return Response({'works':user.work_assigned,'success':"True"},HTTP_200_OK)
except:
return Response({'success':False},HTTP_400_BAD_REQUEST)
| 37.290123
| 140
| 0.608343
| 675
| 6,041
| 5.30963
| 0.17037
| 0.07673
| 0.027902
| 0.047433
| 0.40904
| 0.301618
| 0.275391
| 0.260882
| 0.196429
| 0.153181
| 0
| 0.014393
| 0.275451
| 6,041
| 162
| 141
| 37.290123
| 0.804432
| 0
| 0
| 0.355705
| 0
| 0
| 0.066203
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053691
| false
| 0.013423
| 0.067114
| 0
| 0.315436
| 0.134228
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85ccf00c2aab76068a1c4fc3ab1b4c929b9cff1a
| 9,378
|
py
|
Python
|
nutils/cli.py
|
JochenHinz/nutils
|
ac18dd6825b107e2e4c186ebb1598dbf0fff0f77
|
[
"MIT"
] | null | null | null |
nutils/cli.py
|
JochenHinz/nutils
|
ac18dd6825b107e2e4c186ebb1598dbf0fff0f77
|
[
"MIT"
] | null | null | null |
nutils/cli.py
|
JochenHinz/nutils
|
ac18dd6825b107e2e4c186ebb1598dbf0fff0f77
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2014 Evalf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
The cli (command line interface) module provides the `cli.run` function that
can be used set up properties, initiate an output environment, and execute a
python function based arguments specified on the command line.
"""
from . import util, config, long_version, warnings, matrix, cache
import sys, inspect, os, io, time, pdb, signal, subprocess, contextlib, traceback, pathlib, html, treelog as log, stickybar
def _version():
try:
githash = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'], universal_newlines=True, stderr=subprocess.DEVNULL, cwd=os.path.dirname(__file__)).strip()
if subprocess.check_output(['git', 'status', '--untracked-files=no', '--porcelain'], stderr=subprocess.DEVNULL, cwd=os.path.dirname(__file__)):
githash += '+'
except:
return long_version
else:
return '{} (git:{})'.format(long_version, githash)
def _mkbox(*lines):
width = max(len(line) for line in lines)
ul, ur, ll, lr, hh, vv = '┌┐└┘─│' if config.richoutput else '++++-|'
return '\n'.join([ul + hh * (width+2) + ur]
+ [vv + (' '+line).ljust(width+2) + vv for line in lines]
+ [ll + hh * (width+2) + lr])
def _sigint_handler(mysignal, frame):
_handler = signal.signal(mysignal, signal.SIG_IGN) # temporarily disable handler
try:
while True:
answer = input('interrupted. quit, continue or start debugger? [q/c/d]')
if answer == 'q':
raise KeyboardInterrupt
if answer == 'c' or answer == 'd':
break
if answer == 'd': # after break, to minimize code after set_trace
print(_mkbox(
'TRACING ACTIVATED. Use the Python debugger',
'to step through the code at source line',
'level, list source code, set breakpoints,',
'and evaluate arbitrary Python code in the',
'context of any stack frame. Type "h" for',
'an overview of commands to get going, or',
'"c" to continue uninterrupted execution.'))
pdb.set_trace()
finally:
signal.signal(mysignal, _handler)
def _hms(dt):
seconds = int(dt)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return hours, minutes, seconds
def run(func, *, skip=1, loaduserconfig=True):
'''parse command line arguments and call function'''
configs = []
if loaduserconfig:
home = os.path.expanduser('~')
configs.append(dict(richoutput=sys.stdout.isatty()))
configs.extend(path for path in (os.path.join(home, '.config', 'nutils', 'config'), os.path.join(home, '.nutilsrc')) if os.path.isfile(path))
params = inspect.signature(func).parameters.values()
if '-h' in sys.argv[skip:] or '--help' in sys.argv[skip:]:
print('usage: {} (...)'.format(' '.join(sys.argv[:skip])))
print()
for param in params:
cls = param.default.__class__
print(' --{:<20}'.format(param.name + '=' + cls.__name__.upper() if cls != bool else '(no)' + param.name), end=' ')
if param.annotation != param.empty:
print(param.annotation, end=' ')
print('[{}]'.format(param.default))
sys.exit(1)
kwargs = {param.name: param.default for param in params}
cli_config = {}
for arg in sys.argv[skip:]:
name, sep, value = arg.lstrip('-').partition('=')
if not sep:
value = not name.startswith('no')
if not value:
name = name[2:]
if name in kwargs:
default = kwargs[name]
args = kwargs
else:
try:
default = getattr(config, name)
except AttributeError:
print('invalid argument {!r}'.format(arg))
sys.exit(2)
args = cli_config
try:
if isinstance(default, bool) and not isinstance(value, bool):
raise Exception('boolean value should be specifiec as --{0}/--no{0}'.format(name))
args[name] = default.__class__(value)
except Exception as e:
print('invalid argument for {!r}: {}'.format(name, e))
sys.exit(2)
with config(*configs, **cli_config):
status = call(func, kwargs, scriptname=os.path.basename(sys.argv[0]), funcname=None if skip==1 else func.__name__)
sys.exit(status)
def choose(*functions, loaduserconfig=True):
'''parse command line arguments and call one of multiple functions'''
assert functions, 'no functions specified'
funcnames = [func.__name__ for func in functions]
if len(sys.argv) == 1 or sys.argv[1] in ('-h', '--help'):
print('usage: {} [{}] (...)'.format(sys.argv[0], '|'.join(funcnames)))
sys.exit(1)
try:
ifunc = funcnames.index(sys.argv[1])
except ValueError:
print('invalid argument {!r}; choose from {}'.format(sys.argv[1], ', '.join(funcnames)))
sys.exit(2)
run(functions[ifunc], skip=2, loaduserconfig=loaduserconfig)
def call(func, kwargs, scriptname, funcname=None):
'''set up compute environment and call function'''
outdir = config.outdir or os.path.join(os.path.expanduser(config.outrootdir), scriptname)
with contextlib.ExitStack() as stack:
stack.enter_context(cache.enable(os.path.join(outdir, config.cachedir)) if config.cache else cache.disable())
stack.enter_context(matrix.backend(config.matrix))
stack.enter_context(log.set(log.FilterLog(log.RichOutputLog() if config.richoutput else log.StdoutLog(), minlevel=5-config.verbose)))
if config.htmloutput:
htmllog = stack.enter_context(log.HtmlLog(outdir, title=scriptname, htmltitle='<a href="http://www.nutils.org">{}</a> {}'.format(SVGLOGO, html.escape(scriptname)), favicon=FAVICON))
uri = (config.outrooturi.rstrip('/') + '/' + scriptname if config.outrooturi else pathlib.Path(outdir).resolve().as_uri()) + '/' + htmllog.filename
if config.richoutput:
t0 = time.perf_counter()
bar = lambda running: '{0} [{1}] {2[0]}:{2[1]:02d}:{2[2]:02d}'.format(uri, 'RUNNING' if running else 'STOPPED', _hms(time.perf_counter()-t0))
stack.enter_context(stickybar.activate(bar, update=1))
else:
log.info('opened log at', uri)
htmllog.write('<ul style="list-style-position: inside; padding-left: 0px; margin-top: 0px;">{}</ul>'.format(''.join(
'<li>{}={} <span style="color: gray;">{}</span></li>'.format(param.name, kwargs.get(param.name, param.default), param.annotation)
for param in inspect.signature(func).parameters.values())), level=1, escape=False)
stack.enter_context(log.add(htmllog))
stack.enter_context(warnings.via(log.warning))
stack.callback(signal.signal, signal.SIGINT, signal.signal(signal.SIGINT, _sigint_handler))
log.info('nutils v{}'.format(_version()))
log.info('start', time.ctime())
try:
func(**kwargs)
except (KeyboardInterrupt, SystemExit, pdb.bdb.BdbQuit):
log.error('killed by user')
return 1
except:
log.error(traceback.format_exc())
if config.pdb:
print(_mkbox(
'YOUR PROGRAM HAS DIED. The Python debugger',
'allows you to examine its post-mortem state',
'to figure out why this happened. Type "h"',
'for an overview of commands to get going.'))
pdb.post_mortem()
return 2
else:
log.info('finish', time.ctime())
return 0
SVGLOGO = '''\
<svg style="vertical-align: middle;" width="32" height="32" xmlns="http://www.w3.org/2000/svg">
<path d="M7.5 19 v-6 a6 6 0 0 1 12 0 v6 M25.5 13 v6 a6 6 0 0 1 -12 0 v-6" fill="none" stroke-width="3" stroke-linecap="round"/>
</svg>'''
FAVICON = 'data:image/png;base64,' \
'iVBORw0KGgoAAAANSUhEUgAAADAAAAAwCAQAAAD9CzEMAAACAElEQVRYw+2YS04bQRCGP2wJ' \
'gbAimS07WABXGMLzAgiBcgICFwDEEiGiDCScggWPHVseC1AIZ8AIJBA2hg1kF5DiycLYqppp' \
'M91j2KCp3rSq//7/VldPdfVAajHW0nAkywDjeHSTBx645IRdfvPvLWTbWeSewNDuWKC9Wfov' \
'3BjJa+2aqWa2bInKq/QBARV8MknoM2zHktfaVhKJ79b0AQEr7nsfpthjml466KCPr+xHNmrS' \
'7eTo0J4xFMEMUwiFu81eYFFNPSJvROU5Vrh5W/qsOvdnDegBOjkXyDJZO4Fhta7RV7FDCvvZ' \
'TmBdhTbODgT6R9zJr9qA8G2LfiurlCji0yq8O6LvKT4zHlQEeoXfr3t94e1TUSAWDzyJKTnh' \
'L9W9t8KbE+i/iieCr6XroEEKb9qfee8LJxVIBVKBjyRQqnuKavxZpTiZ1Ez4Typ9KoGN+sCG' \
'Evgj+l2ib8ZLxCOhi8KnaLgoTkVino7Fzwr0L7st/Cmm7MeiDwV6zU5gUF3wYw6Fg2dbztyJ' \
'SQWHcsb6fC6odR3T2YBeF2RzLiXltZpaYCSCGVWrD7hyKSlhKvJiOGCGfnLk6GdGhbZaFE+4' \
'fo7fnMr65STf+5Y1/Way9PPOT6uqTYbCHW5X7nsftjbmKRvJy8yZT05Lgnh4jOPR8/JAv+CE' \
'XU6ppH81Etp/wL7MKaEwo4sAAAAASUVORK5CYII='
# vim:sw=2:sts=2:et
| 44.028169
| 187
| 0.683301
| 1,204
| 9,378
| 5.273256
| 0.375415
| 0.00945
| 0.018743
| 0.006143
| 0.055442
| 0.044101
| 0.044101
| 0.041266
| 0.01197
| 0.01197
| 0
| 0.026173
| 0.17701
| 9,378
| 212
| 188
| 44.235849
| 0.795672
| 0.161122
| 0
| 0.121019
| 0
| 0.025478
| 0.276237
| 0.115557
| 0
| 0
| 0
| 0
| 0.006369
| 1
| 0.044586
| false
| 0
| 0.012739
| 0
| 0.101911
| 0.070064
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85ceb804c95eaa5e6ed011f7728feba8c174befd
| 6,336
|
py
|
Python
|
experiments/alpha_analysis.py
|
oeg-upm/tada-entity
|
6e538129229bed49bf1aa960fcd97a8468eca765
|
[
"Apache-2.0"
] | 3
|
2019-06-11T10:19:25.000Z
|
2022-02-28T22:58:29.000Z
|
experiments/alpha_analysis.py
|
oeg-upm/tada-entity
|
6e538129229bed49bf1aa960fcd97a8468eca765
|
[
"Apache-2.0"
] | 7
|
2019-02-04T08:57:54.000Z
|
2021-11-01T12:42:03.000Z
|
experiments/alpha_analysis.py
|
oeg-upm/tada-entity
|
6e538129229bed49bf1aa960fcd97a8468eca765
|
[
"Apache-2.0"
] | null | null | null |
"""
This script analyses optimal alphas for each class and draws them in a box and whisker plot
"""
import pandas as pd
import argparse
import seaborn as sns
import matplotlib.pyplot as plt
import itertools
def shorten_uri(class_uri, base="http://dbpedia.org/ontology/", pref="dbo:"):
return class_uri.replace(base, pref)
def get_classes(fpath, dataset):
d = dict()
f = open(fpath)
for line in f.readlines():
sline = line.strip()
if sline == "":
continue
if dataset == "wcv2":
fname, _, class_uri = sline.split(',')
elif dataset == "wcv1":
fname, _, class_uri, _ = sline.split(',')
fname = fname.split(".")[0]
else:
raise Exception("Unknown dataset")
fname = fname.replace('"', '')
fname += ".csv"
# #DEBUG
# print("%s> fname: %s" % (__name__, fname))
class_uri = class_uri.replace('"', "")
d[fname] = class_uri
return d
def analyse_alpha_for_all(falpha, classes, draw_fname, midalpha):
"""
:param fmeta: path to the meta file
:param classes: a dict of fnames and their classes
:return:
"""
df_all = pd.read_csv(falpha)
for fsid in range(1, 6):
df = df_all[df_all.fsid == fsid]
al_per_cls = aggregate_alpha_per_class(df, classes)
analyse_alpha(al_per_cls, draw_fname+"_fsid%d" % (fsid), midalpha)
# analyse_alpha(al_per_cls, "wcv2_alpha_%s_original_fsid%d" % (fattr,fsid))
# analyse_alpha(al_per_cls, "wcv2_alpha_fsid%d" % fsid)
# break
def analyse_alpha(alpha_per_class, draw_fname, midalpha):
rows = []
if midalpha:
attrs = ['mid_alpha']
else:
attrs = ['from_alpha', 'to_alpha']
# attrs = ['from_alpha', 'to_alpha', 'mid_alpha']
# attrs = ['mid_alpha']
for c in alpha_per_class:
for a_attr in attrs:
for a in alpha_per_class[c][a_attr]:
if a < 0:
continue
r = [shorten_uri(c), a, a_attr]
rows.append(r)
print(r)
# print(rows)
data = pd.DataFrame(rows, columns=["Class", "Alpha", "Attr"])
# ax = sns.boxplot(x="Class", y="Alpha",
# hue="Attr",
# data=data, linewidth=1.0,
# # palette="colorblind",
# palette="Spectral",
# # palette="pastel",
# dodge=True,
# # palette="ch:start=.2,rot=-.3",
# orient="v",
# flierprops=dict(markerfacecolor='0.50', markersize=2), whiskerprops={'linestyle': '-'})
ax = sns.boxplot(x="Alpha", y="Class",
hue="Attr",
data=data, linewidth=1.0,
# palette="colorblind",
palette="Spectral",
# palette="pastel",
dodge=True,
# palette="ch:start=.2,rot=-.3",
orient="h",
flierprops=dict(markerfacecolor='0.50', markersize=2))
ax.legend(bbox_to_anchor=(1.0, -0.1), borderaxespad=0)
if midalpha:
# to remove legend
ax.legend_.remove()
ax.set_xlim(0, 0.7)
# ax.set_ylim(0, 0.7)
# Horizontal
ticks = ax.get_yticks()
new_ticks = [t for t in ticks]
texts = ax.get_yticklabels()
print(ax.get_yticklabels())
labels = [t.get_text() for t in texts]
ax.set_yticks(new_ticks)
ax.set_yticklabels(labels, fontsize=8)
print(ax.get_yticklabels())
# Vertical
# ticks = ax.get_xticks()
# new_ticks = [t-1 for t in ticks]
# texts = ax.get_xticklabels()
# print(ax.get_xticklabels())
# labels = [t.get_text() for t in texts]
# ax.set_xticks(new_ticks)
# ax.set_xticklabels(labels)
# print(ax.get_xticklabels())
# for i, box in enumerate(ax.artists):
# box.set_edgecolor('black')
# To change bar colors
# plt.setp(ax.artists, edgecolor='k', facecolor='w')
# To make whiskers black
plt.setp(ax.lines, color='k')
# [t.set_rotation(70) for t in ax.get_xticklabels()]
#plt.show()
# ax.figure.savefig('docs/%s.svg' % draw_fname)
ax.figure.savefig('docs/%s.svg' % draw_fname, bbox_inches="tight")
ax.figure.clf()
def aggregate_alpha_per_class(df, classes):
"""
:param df: DataFrame of a meta file
:param calsses: a dict of fnames and their classes
:return:
"""
"""fname,colid,fsid,from_alpha,to_alpha"""
d = dict()
for idx, row in df.iterrows():
# print("fname: <%s>" % row['fname'])
# DEBUG
print("classes: ")
print(classes)
c = classes[row['fname']]
if c not in d:
d[c] = {'from_alpha': [], 'to_alpha': [], 'mid_alpha': []}
d[c]['from_alpha'].append(row['from_alpha'])
d[c]['to_alpha'].append(row['to_alpha'])
d[c]['mid_alpha'].append((row['from_alpha'] + row['to_alpha'])/2)
return d
def workflow(falpha, fmeta, draw_fpath, midalpha, dataset):
classes = get_classes(fmeta, dataset)
analyse_alpha_for_all(falpha, classes, draw_fpath, midalpha)
def main():
"""
Parse the arguments
:return:
"""
parser = argparse.ArgumentParser(description='Alpha Analysis')
# parser.add_argument('--debug', action="store_true", default=False, help="Whether to enable debug messages.")
parser.add_argument('falpha', help="The path to the alpha results file.")
parser.add_argument('fmeta', help="The path to the meta file which contain the classes.")
parser.add_argument('dataset', choices=["wcv1", "wcv2", "st19-r1", "st19-r2", "st19-r3", "st19-r4"],
help="The name of the dataset as the meta file differ for each")
parser.add_argument('--draw', default="test.svg", help="The filename prefix to draw (without the extension)")
parser.add_argument('--midalpha', action="store_true", default=False,
help="Whether to report the mid ranges of the optimal alpha or just the ranges")
parser.print_usage()
parser.print_help()
args = parser.parse_args()
workflow(args.falpha, args.fmeta, args.draw, args.midalpha, args.dataset)
if __name__ == "__main__":
main()
| 34.622951
| 114
| 0.574337
| 812
| 6,336
| 4.316502
| 0.280788
| 0.012839
| 0.029101
| 0.01826
| 0.297575
| 0.24194
| 0.210556
| 0.137518
| 0.077033
| 0.077033
| 0
| 0.011847
| 0.280619
| 6,336
| 182
| 115
| 34.813187
| 0.75713
| 0.283617
| 0
| 0.121212
| 0
| 0
| 0.145305
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070707
| false
| 0
| 0.050505
| 0.010101
| 0.151515
| 0.070707
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85cf779b9a1cc2e9b35950583be014be08b8ba73
| 1,009
|
py
|
Python
|
p039m/combination_sum.py
|
l33tdaima/l33tdaima
|
0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90
|
[
"MIT"
] | 1
|
2020-02-20T12:04:46.000Z
|
2020-02-20T12:04:46.000Z
|
p039m/combination_sum.py
|
l33tdaima/l33tdaima
|
0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90
|
[
"MIT"
] | null | null | null |
p039m/combination_sum.py
|
l33tdaima/l33tdaima
|
0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90
|
[
"MIT"
] | null | null | null |
from typing import List
class Solution:
def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:
candidates.sort()
ans = []
def helper(path: List[int], target: int, start: int) -> None:
if target < 0:
return
if target == 0:
ans.append(list(path))
return
for i in range(start, len(candidates)):
path.append(candidates[i])
helper(path, target - candidates[i], i)
path.pop()
helper([], target, 0)
return ans
# TESTS
tests = [
([2, 3, 6, 7], 7, [[2, 2, 3], [7]]),
([2, 3, 5], 8, [[2, 2, 2, 2], [2, 3, 3], [3, 5]]),
([2], 1, []),
([1], 1, [[1]]),
([1], 2, [[1, 1]]),
]
for candidates, target, expected in tests:
sol = Solution()
actual = sol.combinationSum(candidates, target)
print("Combinations in", candidates, "sum to", target, "->", actual)
assert actual == expected
| 27.27027
| 84
| 0.489594
| 123
| 1,009
| 4.01626
| 0.357724
| 0.020243
| 0.018219
| 0.064777
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05052
| 0.333003
| 1,009
| 36
| 85
| 28.027778
| 0.683507
| 0.004955
| 0
| 0.068966
| 0
| 0
| 0.022954
| 0
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.068966
| false
| 0
| 0.034483
| 0
| 0.241379
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85d1bb79ecc810612d2ce67b9924416144e6d28f
| 7,706
|
py
|
Python
|
singleimagemodel.py
|
severinaklingler/kaggle-ocular-disease
|
a6641f6005d1a7f2399b4de9e804ab3ac7f20dd2
|
[
"Apache-2.0"
] | null | null | null |
singleimagemodel.py
|
severinaklingler/kaggle-ocular-disease
|
a6641f6005d1a7f2399b4de9e804ab3ac7f20dd2
|
[
"Apache-2.0"
] | null | null | null |
singleimagemodel.py
|
severinaklingler/kaggle-ocular-disease
|
a6641f6005d1a7f2399b4de9e804ab3ac7f20dd2
|
[
"Apache-2.0"
] | null | null | null |
from logging import getLevelName
import numpy as np
import os
import tensorflow as tf
import pathlib
import pandas as pd
import re
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten , Conv1D
from tensorflow.keras.layers import concatenate
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import MaxPooling2D,MaxPooling1D
from tensorflow.keras.utils import plot_model
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
import datetime
import argparse
# Global config (TODO)
random_seed = 77
data_path = "./input/ocular-disease-recognition-odir5k/preprocessed_images/"
data_path_tensor = tf.constant(data_path)
data_dir = pathlib.Path(data_path)
AUTOTUNE = tf.data.AUTOTUNE
batch_size = 32
img_height = 224
img_width = 224
class_count = 8
image_channels = 3
num_threads = 4
label_dict = {}
# tf.config.run_functions_eagerly(True)
def load_sample_ids(df, val_size):
sample_ids = df['ID'].to_list()
dataset = tf.data.Dataset.from_tensor_slices(sample_ids)
dataset = dataset.unique()
dataset = dataset.shuffle(len(sample_ids))
val_ds = dataset.take(val_size)
test_ds = dataset.skip(val_size).take(val_size)
train_ds = dataset.skip(2*val_size)
return train_ds, val_ds, test_ds
def decode_one_hot(x):
return next(i for i,v in enumerate(x) if v==1)
def build_label_dictionary(df):
keys = []
values = []
for index, row in df.iterrows():
filename = row['filename']
target = eval(row["target"])
image_target = decode_one_hot(target)
keys.append(filename)
values.append(image_target)
keys_tensor = tf.constant(keys)
vals_tensor = tf.constant(values)
table = tf.lookup.StaticHashTable(tf.lookup.KeyValueTensorInitializer(keys_tensor, vals_tensor), default_value=-1)
return table
def _file_exists(file_path):
return tf.io.gfile.exists(data_path + bytes.decode(file_path.numpy()))
def file_exists(file_path):
[exists] = tf.py_function(_file_exists, [file_path], [tf.bool])
exists.set_shape([])
return exists
def filenames_from_id(id):
right_path = tf.strings.as_string(id) + tf.constant("_right.jpg")
left_path = tf.strings.as_string(id) + tf.constant("_left.jpg")
return tf.data.Dataset.from_tensor_slices([left_path, right_path])
def decode_img(img):
img = tf.io.decode_jpeg(img, channels=image_channels)
return tf.image.resize(img, [img_height, img_width])
def process_filename(filename):
img = tf.io.read_file(tf.strings.join([data_path_tensor, filename], ''))
img = decode_img(img)
return img, label_dict.lookup(filename)
def print_dataset_stats(names, datasets, n=-1):
for name, dataset in zip(names, datasets):
if n>0:
dataset = dataset.take(n)
d = list(dataset.as_numpy_iterator())
top5 = d[:5]
print(f"{name} size: {len(d)} . First elements : {top5}")
def label_not_missing(data, label):
return tf.math.not_equal(label,-1)
def prepare_data(ds):
filenames = ds.flat_map(filenames_from_id)
existing_files = filenames.filter(file_exists)
existing_files_and_labels = existing_files.map(process_filename, num_parallel_calls=num_threads)
existing_files_and_existing_labels = existing_files_and_labels.filter(label_not_missing)
data_and_labels = existing_files_and_existing_labels.map(lambda x,y : (x, tf.one_hot(y,class_count)), num_parallel_calls=num_threads)
return data_and_labels
def configure_for_performance(ds):
ds = ds.batch(batch_size)
ds = ds.prefetch(buffer_size=1)
return ds
def show_batch(ds):
images_batch, label_batch = next(iter(ds))
plt.figure(figsize=(10, 10))
for i in range(8):
ax = plt.subplot(2, 4, i + 1)
label = label_batch[i]
print("Image shape: ", images_batch[i].numpy().shape)
print("label: ", label)
plt.imshow(images_batch[i].numpy().astype("uint8"))
plt.title(decode_one_hot(label))
plt.show()
def create_model():
inp1 = Input(shape=(img_height,img_width,image_channels), name="img")
new_input = Input(shape=(img_height,img_width, image_channels), name="New Input")
conv1 = Conv2D(3, kernel_size=3, padding ='same', activation='relu', name="conleft1")(inp1)
i1 = tf.keras.applications.ResNet50(include_top=False,weights="imagenet",input_tensor=new_input,input_shape=None, pooling='avg')(conv1)
class1 = Dense(1024, activation='relu')(i1)
class1 = Dense(256, activation='relu')(class1)
class1 = Dense(64, activation='relu')(class1)
output = Dense(class_count, activation='sigmoid')(class1)
model = Model(inputs=[inp1], outputs=output)
return model
def train_model(model, training_data, validation_data, number_of_epochs):
METRICS = [
'accuracy',
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall(),
]
model.compile(
optimizer='Adam',
loss='binary_crossentropy',
metrics=METRICS
)
tf.keras.utils.plot_model(
model,
to_file="model.png",
show_shapes=True,
show_dtype=False,
show_layer_names=True,
rankdir="TB",
expand_nested=False,
dpi=300,
layer_range=None,
)
log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=10)
model.fit(
training_data,
validation_data=validation_data,
epochs=number_of_epochs,
callbacks=[tensorboard_callback])
return model
def test(model, test_data):
yhat = model.predict(test_data)
yhat = yhat.round()
y_test = np.concatenate([y for x, y in test_data], axis=0)
report = classification_report(y_test, yhat,target_names=['N','D','G','C','A','H','M','O'],output_dict=True)
df = pd.DataFrame(report).transpose()
print(df)
def load_datasets():
global label_dict
df = pd.read_csv('./input/ocular-disease-recognition-odir5k/full_df.csv')
label_dict = build_label_dictionary(df)
train, val, test = load_sample_ids(df, 500)
training_data = configure_for_performance(prepare_data(train))
validation_data = configure_for_performance(prepare_data(val))
test_data = configure_for_performance(prepare_data(test))
return training_data, validation_data, test_data
def main():
parser = argparse.ArgumentParser(description='Optional app description')
parser.add_argument('--show', action='store_true', help='Visualize a training batch')
parser.add_argument('--train', action='store_true', help='Train model')
parser.add_argument('--test', action='store_true', help='Test model')
parser.add_argument('--dump', action='store_true', help='Dump data from first examples')
parser.add_argument('--name', type=str, help='Name of the model', default="tmpModel")
parser.add_argument('--epochs', type=int, help='Number of epochs to train', default=40)
args = parser.parse_args()
training_data, validation_data, test_data = load_datasets()
if args.show:
show_batch(training_data)
if args.dump:
print_dataset_stats(["training_data"],[training_data],5)
if args.train:
trained_model = train_model(create_model(), training_data, validation_data, args.epochs)
trained_model.save('models/' + args.name)
if args.test:
model = tf.keras.models.load_model('models/' + args.name)
test(model, test_data)
if __name__ == '__main__':
main()
| 32.514768
| 139
| 0.706982
| 1,077
| 7,706
| 4.82637
| 0.278552
| 0.020777
| 0.029242
| 0.028857
| 0.166603
| 0.075798
| 0.029627
| 0.029627
| 0.01693
| 0
| 0
| 0.012197
| 0.170127
| 7,706
| 237
| 140
| 32.514768
| 0.800625
| 0.007527
| 0
| 0.010929
| 0
| 0
| 0.080565
| 0.015041
| 0
| 0
| 0
| 0.004219
| 0
| 1
| 0.098361
| false
| 0
| 0.10929
| 0.016393
| 0.284153
| 0.032787
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85d2ee56a1605c4085ef6834b7da596c8770a900
| 17,167
|
py
|
Python
|
features/steps/prs_steps.py
|
spidezad/python-pptx
|
eab3f55b84b54906876d5486172d5d0c457d55f8
|
[
"BSD-2-Clause"
] | 1
|
2021-05-17T06:33:32.000Z
|
2021-05-17T06:33:32.000Z
|
features/steps/prs_steps.py
|
spidezad/python-pptx
|
eab3f55b84b54906876d5486172d5d0c457d55f8
|
[
"BSD-2-Clause"
] | null | null | null |
features/steps/prs_steps.py
|
spidezad/python-pptx
|
eab3f55b84b54906876d5486172d5d0c457d55f8
|
[
"BSD-2-Clause"
] | null | null | null |
import os
from datetime import datetime, timedelta
from behave import given, when, then
from hamcrest import (
assert_that, equal_to, has_item, is_, is_not, greater_than, less_than
)
from StringIO import StringIO
from pptx import packaging
from pptx import Presentation
from pptx.constants import MSO_AUTO_SHAPE_TYPE as MAST, MSO, PP
from pptx.util import Inches
def absjoin(*paths):
return os.path.abspath(os.path.join(*paths))
thisdir = os.path.split(__file__)[0]
scratch_dir = absjoin(thisdir, '../_scratch')
test_file_dir = absjoin(thisdir, '../../test/test_files')
basic_pptx_path = absjoin(test_file_dir, 'test.pptx')
no_core_props_pptx_path = absjoin(test_file_dir, 'no-core-props.pptx')
saved_pptx_path = absjoin(scratch_dir, 'test_out.pptx')
test_image_path = absjoin(test_file_dir, 'python-powered.png')
test_text = "python-pptx was here!"
# logging.debug("saved_pptx_path is ==> '%s'\n", saved_pptx_path)
# given ===================================================
@given('a clean working directory')
def step_given_clean_working_dir(context):
if os.path.isfile(saved_pptx_path):
os.remove(saved_pptx_path)
@given('an initialized pptx environment')
def step_given_initialized_pptx_env(context):
pass
@given('I have a reference to a blank slide')
def step_given_ref_to_blank_slide(context):
context.prs = Presentation()
slidelayout = context.prs.slidelayouts[6]
context.sld = context.prs.slides.add_slide(slidelayout)
@given('I have a reference to a bullet body placeholder')
def step_given_ref_to_bullet_body_placeholder(context):
context.prs = Presentation()
slidelayout = context.prs.slidelayouts[1]
context.sld = context.prs.slides.add_slide(slidelayout)
context.body = context.sld.shapes.placeholders[1]
@given('I have a reference to a chevron shape')
def step_given_ref_to_chevron_shape(context):
context.prs = Presentation()
blank_slidelayout = context.prs.slidelayouts[6]
shapes = context.prs.slides.add_slide(blank_slidelayout).shapes
x = y = cx = cy = 914400
context.chevron_shape = shapes.add_shape(MAST.CHEVRON, x, y, cx, cy)
@given('I have a reference to a paragraph')
def step_given_ref_to_paragraph(context):
context.prs = Presentation()
blank_slidelayout = context.prs.slidelayouts[6]
slide = context.prs.slides.add_slide(blank_slidelayout)
length = Inches(2.00)
textbox = slide.shapes.add_textbox(length, length, length, length)
context.p = textbox.textframe.paragraphs[0]
@given('I have a reference to a slide')
def step_given_ref_to_slide(context):
context.prs = Presentation()
slidelayout = context.prs.slidelayouts[0]
context.sld = context.prs.slides.add_slide(slidelayout)
@given('I have a reference to a table')
def step_given_ref_to_table(context):
context.prs = Presentation()
slidelayout = context.prs.slidelayouts[6]
sld = context.prs.slides.add_slide(slidelayout)
shapes = sld.shapes
x, y = (Inches(1.00), Inches(2.00))
cx, cy = (Inches(3.00), Inches(1.00))
context.tbl = shapes.add_table(2, 2, x, y, cx, cy)
@given('I have a reference to a table cell')
def step_given_ref_to_table_cell(context):
context.prs = Presentation()
slidelayout = context.prs.slidelayouts[6]
sld = context.prs.slides.add_slide(slidelayout)
length = 1000
tbl = sld.shapes.add_table(2, 2, length, length, length, length)
context.cell = tbl.cell(0, 0)
@given('I have a reference to the core properties of a presentation')
def step_given_ref_to_core_doc_props(context):
context.prs = Presentation()
context.core_properties = context.prs.core_properties
@given('I have an empty presentation open')
def step_given_empty_prs(context):
context.prs = Presentation()
# when ====================================================
@when('I add a new slide')
def step_when_add_slide(context):
slidelayout = context.prs.slidemasters[0].slidelayouts[0]
context.prs.slides.add_slide(slidelayout)
@when("I add a picture stream to the slide's shape collection")
def step_when_add_picture_stream(context):
shapes = context.sld.shapes
x, y = (Inches(1.25), Inches(1.25))
with open(test_image_path) as f:
stream = StringIO(f.read())
shapes.add_picture(stream, x, y)
@when("I add a picture to the slide's shape collection")
def step_when_add_picture(context):
shapes = context.sld.shapes
x, y = (Inches(1.25), Inches(1.25))
shapes.add_picture(test_image_path, x, y)
@when("I add a table to the slide's shape collection")
def step_when_add_table(context):
shapes = context.sld.shapes
x, y = (Inches(1.00), Inches(2.00))
cx, cy = (Inches(3.00), Inches(1.00))
shapes.add_table(2, 2, x, y, cx, cy)
@when("I add a text box to the slide's shape collection")
def step_when_add_text_box(context):
shapes = context.sld.shapes
x, y = (Inches(1.00), Inches(2.00))
cx, cy = (Inches(3.00), Inches(1.00))
sp = shapes.add_textbox(x, y, cx, cy)
sp.text = test_text
@when("I add an auto shape to the slide's shape collection")
def step_when_add_auto_shape(context):
shapes = context.sld.shapes
x, y = (Inches(1.00), Inches(2.00))
cx, cy = (Inches(3.00), Inches(4.00))
sp = shapes.add_shape(MAST.ROUNDED_RECTANGLE, x, y, cx, cy)
sp.text = test_text
@when('I construct a Presentation instance with no path argument')
def step_when_construct_default_prs(context):
context.prs = Presentation()
@when('I indent the first paragraph')
def step_when_indent_first_paragraph(context):
p = context.body.textframe.paragraphs[0]
p.level = 1
@when('I open a basic PowerPoint presentation')
def step_when_open_basic_pptx(context):
context.prs = Presentation(basic_pptx_path)
@when('I open a presentation contained in a stream')
def step_when_open_presentation_stream(context):
with open(basic_pptx_path) as f:
stream = StringIO(f.read())
context.prs = Presentation(stream)
stream.close()
@when('I open a presentation having no core properties part')
def step_when_open_presentation_with_no_core_props_part(context):
context.prs = Presentation(no_core_props_pptx_path)
@when('I save that stream to a file')
def step_when_save_stream_to_a_file(context):
if os.path.isfile(saved_pptx_path):
os.remove(saved_pptx_path)
context.stream.seek(0)
with open(saved_pptx_path, 'wb') as f:
f.write(context.stream.read())
@when('I save the presentation')
def step_when_save_presentation(context):
if os.path.isfile(saved_pptx_path):
os.remove(saved_pptx_path)
context.prs.save(saved_pptx_path)
@when('I save the presentation to a stream')
def step_when_save_presentation_to_stream(context):
context.stream = StringIO()
context.prs.save(context.stream)
@when("I set the cell margins")
def step_when_set_cell_margins(context):
context.cell.margin_top = 1000
context.cell.margin_right = 2000
context.cell.margin_bottom = 3000
context.cell.margin_left = 4000
@when("I set the cell vertical anchor to middle")
def step_when_set_cell_vertical_anchor_to_middle(context):
context.cell.vertical_anchor = MSO.ANCHOR_MIDDLE
@when("I set the core properties to valid values")
def step_when_set_core_doc_props_to_valid_values(context):
context.propvals = (
('author', 'Creator'),
('category', 'Category'),
('comments', 'Description'),
('content_status', 'Content Status'),
('created', datetime(2013, 6, 15, 12, 34, 56)),
('identifier', 'Identifier'),
('keywords', 'key; word; keyword'),
('language', 'Language'),
('last_modified_by', 'Last Modified By'),
('last_printed', datetime(2013, 6, 15, 12, 34, 56)),
('modified', datetime(2013, 6, 15, 12, 34, 56)),
('revision', 9),
('subject', 'Subject'),
('title', 'Title'),
('version', 'Version'),
)
for name, value in context.propvals:
setattr(context.prs.core_properties, name, value)
@when("I set the first_col property to True")
def step_when_set_first_col_property_to_true(context):
context.tbl.first_col = True
@when("I set the first_row property to True")
def step_when_set_first_row_property_to_true(context):
context.tbl.first_row = True
@when("I set the first adjustment value to 0.15")
def step_when_set_first_adjustment_value(context):
context.chevron_shape.adjustments[0] = 0.15
@when("I set the horz_banding property to True")
def step_when_set_horz_banding_property_to_true(context):
context.tbl.horz_banding = True
@when("I set the last_col property to True")
def step_when_set_last_col_property_to_true(context):
context.tbl.last_col = True
@when("I set the last_row property to True")
def step_when_set_last_row_property_to_true(context):
context.tbl.last_row = True
@when("I set the paragraph alignment to centered")
def step_when_set_paragraph_alignment_to_centered(context):
context.p.alignment = PP.ALIGN_CENTER
@when("I set the text of the first cell")
def step_when_set_text_of_first_cell(context):
context.tbl.cell(0, 0).text = 'test text'
@when("I set the title text of the slide")
def step_when_set_slide_title_text(context):
context.sld.shapes.title.text = test_text
@when("I set the vert_banding property to True")
def step_when_set_vert_banding_property_to_true(context):
context.tbl.vert_banding = True
@when("I set the width of the table's columns")
def step_when_set_table_column_widths(context):
context.tbl.columns[0].width = Inches(1.50)
context.tbl.columns[1].width = Inches(3.00)
# then ====================================================
@then('a core properties part with default values is added')
def step_then_a_core_props_part_with_def_vals_is_added(context):
core_props = context.prs.core_properties
assert_that(core_props.title, is_('PowerPoint Presentation'))
assert_that(core_props.last_modified_by, is_('python-pptx'))
assert_that(core_props.revision, is_(1))
# core_props.modified only stores time with seconds resolution, so
# comparison needs to be a little loose (within two seconds)
modified_timedelta = datetime.utcnow() - core_props.modified
max_expected_timedelta = timedelta(seconds=2)
assert_that(modified_timedelta, less_than(max_expected_timedelta))
@then('I receive a presentation based on the default template')
def step_then_receive_prs_based_on_def_tmpl(context):
prs = context.prs
assert_that(prs, is_not(None))
slidemasters = prs.slidemasters
assert_that(slidemasters, is_not(None))
assert_that(len(slidemasters), is_(1))
slidelayouts = slidemasters[0].slidelayouts
assert_that(slidelayouts, is_not(None))
assert_that(len(slidelayouts), is_(11))
@then('I see the pptx file in the working directory')
def step_then_see_pptx_file_in_working_dir(context):
assert_that(os.path.isfile(saved_pptx_path))
minimum = 30000
actual = os.path.getsize(saved_pptx_path)
assert_that(actual, is_(greater_than(minimum)))
@then('the auto shape appears in the slide')
def step_then_auto_shape_appears_in_slide(context):
prs = Presentation(saved_pptx_path)
sp = prs.slides[0].shapes[0]
sp_text = sp.textframe.paragraphs[0].runs[0].text
assert_that(sp.shape_type, is_(equal_to(MSO.AUTO_SHAPE)))
assert_that(sp.auto_shape_type, is_(equal_to(MAST.ROUNDED_RECTANGLE)))
assert_that(sp_text, is_(equal_to(test_text)))
@then('the cell contents are inset by the margins')
def step_then_cell_contents_are_inset_by_the_margins(context):
prs = Presentation(saved_pptx_path)
table = prs.slides[0].shapes[0]
cell = table.cell(0, 0)
assert_that(cell.margin_top, is_(equal_to(1000)))
assert_that(cell.margin_right, is_(equal_to(2000)))
assert_that(cell.margin_bottom, is_(equal_to(3000)))
assert_that(cell.margin_left, is_(equal_to(4000)))
@then('the cell contents are vertically centered')
def step_then_cell_contents_are_vertically_centered(context):
prs = Presentation(saved_pptx_path)
table = prs.slides[0].shapes[0]
cell = table.cell(0, 0)
assert_that(cell.vertical_anchor, is_(equal_to(MSO.ANCHOR_MIDDLE)))
@then('the chevron shape appears with a less acute arrow head')
def step_then_chevron_shape_appears_with_less_acute_arrow_head(context):
chevron = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(chevron.adjustments[0], is_(equal_to(0.15)))
@then('the columns of the table have alternating shading')
def step_then_columns_of_table_have_alternating_shading(context):
tbl = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(tbl.vert_banding, is_(True))
@then('the core properties of the presentation have the values I set')
def step_then_core_props_have_values_previously_set(context):
core_props = Presentation(saved_pptx_path).core_properties
for name, value in context.propvals:
reason = "for core property '%s'" % name
assert_that(getattr(core_props, name), is_(value), reason)
@then('the first column of the table has special formatting')
def step_then_first_column_of_table_has_special_formatting(context):
tbl = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(tbl.first_col, is_(True))
@then('the first row of the table has special formatting')
def step_then_first_row_of_table_has_special_formatting(context):
tbl = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(tbl.first_row, is_(True))
@then('the image is saved in the pptx file')
def step_then_img_saved_in_pptx_file(context):
pkgng_pkg = packaging.Package().open(saved_pptx_path)
partnames = [part.partname for part in pkgng_pkg.parts
if part.partname.startswith('/ppt/media/')]
assert_that(partnames, has_item('/ppt/media/image1.png'))
@then('the last column of the table has special formatting')
def step_then_last_column_of_table_has_special_formatting(context):
tbl = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(tbl.last_col, is_(True))
@then('the last row of the table has special formatting')
def step_then_last_row_of_table_has_special_formatting(context):
tbl = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(tbl.last_row, is_(True))
@then('the paragraph is indented to the second level')
def step_then_paragraph_indented_to_second_level(context):
prs = Presentation(saved_pptx_path)
sld = prs.slides[0]
body = sld.shapes.placeholders[1]
p = body.textframe.paragraphs[0]
assert_that(p.level, is_(equal_to(1)))
@then('the picture appears in the slide')
def step_then_picture_appears_in_slide(context):
prs = Presentation(saved_pptx_path)
sld = prs.slides[0]
shapes = sld.shapes
classnames = [sp.__class__.__name__ for sp in shapes]
assert_that(classnames, has_item('_Picture'))
@then('the pptx file contains a single slide')
def step_then_pptx_file_contains_single_slide(context):
prs = Presentation(saved_pptx_path)
assert_that(len(prs.slides), is_(equal_to(1)))
@then('the paragraph is aligned centered')
def step_then_paragraph_is_aligned_centered(context):
prs = Presentation(saved_pptx_path)
p = prs.slides[0].shapes[0].textframe.paragraphs[0]
assert_that(p.alignment, is_(equal_to(PP.ALIGN_CENTER)))
@then('the rows of the table have alternating shading')
def step_then_rows_of_table_have_alternating_shading(context):
tbl = Presentation(saved_pptx_path).slides[0].shapes[0]
assert_that(tbl.horz_banding, is_(True))
@then('the table appears in the slide')
def step_then_table_appears_in_slide(context):
prs = Presentation(saved_pptx_path)
sld = prs.slides[0]
shapes = sld.shapes
classnames = [sp.__class__.__name__ for sp in shapes]
assert_that(classnames, has_item('_Table'))
@then('the table appears with the new column widths')
def step_then_table_appears_with_new_col_widths(context):
prs = Presentation(saved_pptx_path)
sld = prs.slides[0]
tbl = sld.shapes[0]
assert_that(tbl.columns[0].width, is_(equal_to(Inches(1.50))))
assert_that(tbl.columns[1].width, is_(equal_to(Inches(3.00))))
@then('the text appears in the first cell of the table')
def step_then_text_appears_in_first_cell_of_table(context):
prs = Presentation(saved_pptx_path)
sld = prs.slides[0]
tbl = sld.shapes[0]
text = tbl.cell(0, 0).textframe.paragraphs[0].runs[0].text
assert_that(text, is_(equal_to('test text')))
@then('the text box appears in the slide')
def step_then_text_box_appears_in_slide(context):
prs = Presentation(saved_pptx_path)
textbox = prs.slides[0].shapes[0]
textbox_text = textbox.textframe.paragraphs[0].runs[0].text
assert_that(textbox_text, is_(equal_to(test_text)))
@then('the text appears in the title placeholder')
def step_then_text_appears_in_title_placeholder(context):
prs = Presentation(saved_pptx_path)
title_shape = prs.slides[0].shapes.title
title_text = title_shape.textframe.paragraphs[0].runs[0].text
assert_that(title_text, is_(equal_to(test_text)))
| 33.926877
| 74
| 0.732044
| 2,609
| 17,167
| 4.551552
| 0.110387
| 0.037137
| 0.037221
| 0.042105
| 0.531368
| 0.449853
| 0.382316
| 0.312337
| 0.269137
| 0.233684
| 0
| 0.01776
| 0.147201
| 17,167
| 505
| 75
| 33.994059
| 0.793374
| 0.021029
| 0
| 0.220994
| 0
| 0
| 0.178284
| 0.0025
| 0
| 0
| 0
| 0
| 0.107735
| 1
| 0.176796
| false
| 0.002762
| 0.024862
| 0.002762
| 0.20442
| 0.002762
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85d71db4dff31a27689c64809381f6863f31ac08
| 3,177
|
py
|
Python
|
PomodoroTimer/Python/main2.py
|
zcribe/SmallProjectsCollection
|
fbd6bc9884468eba7519728e295b36b24043af27
|
[
"MIT"
] | null | null | null |
PomodoroTimer/Python/main2.py
|
zcribe/SmallProjectsCollection
|
fbd6bc9884468eba7519728e295b36b24043af27
|
[
"MIT"
] | null | null | null |
PomodoroTimer/Python/main2.py
|
zcribe/SmallProjectsCollection
|
fbd6bc9884468eba7519728e295b36b24043af27
|
[
"MIT"
] | null | null | null |
from time import time, sleep
from math import floor
import argparse
import csv
import datetime
# Constants
TIME_WORK = 25
TIME_REST = 5
TIME_REST_LONG = 30
ONE_MINUTE = 60
SESSIONS_WORK_MAX = 4
LOOP_LIMIT = 9999
# Console
parser = argparse.ArgumentParser(description='===== Pomodoro timer CLI =====')
parser.add_argument('-wt', '-worktime', type=int, help=f'Minutes of work in a work sessions (default {TIME_WORK})',
default=TIME_WORK, nargs='?')
parser.add_argument('-rt', '-resttime', type=int, help=f'Minutes of rest in a rest sessions (default {TIME_REST})',
default=TIME_REST, nargs='?')
parser.add_argument('-rtl', '-resttimelong', type=int,
help=f'Minutes of rest in a long rest sessions (default {TIME_REST_LONG})',
default=TIME_REST_LONG, nargs='?')
parser.add_argument('-mws', '-maxworksessions', type=int,
help=f'Number of work sessions cycles before long rest session (default {SESSIONS_WORK_MAX})',
default=SESSIONS_WORK_MAX, nargs='?')
parser.add_argument('-ll', '-looplimit', type=int,
help=f'Maximum number of total sessions (default 9999)', default=LOOP_LIMIT, nargs='?')
parser.add_argument('-log', '-logsessions', type=bool,
help='Should sessions be logged (False)', default=False, nargs='?')
arguments = vars(parser.parse_args())
time_work = arguments['wt']
time_rest = arguments['rt']
time_rest_long = arguments['rtl']
sessions_work_max = arguments['mws']
loop_lim = arguments['ll']
# Core
def run():
target_minutes = time_work
work_sessions = 0
started = False
for _ in range(0, loop_lim):
if target_minutes == time_work and work_sessions >= sessions_work_max and started:
target_minutes = time_rest_long
elif target_minutes == time_work and started:
target_minutes = time_rest
work_sessions += 1
elif not started:
started = True
else:
target_minutes = time_work
timer(target_minutes)
write_log(target_minutes)
def timer(target_minutes: int) -> int:
time_target = create_target_time(target_minutes, time())
while time() < time_target:
tick(time_target)
sleep(1)
return target_minutes
def write_log(minutes: int, testing=False):
with open('session_log.csv', 'w', newline='') as csvfile:
log_writer = csv.writer(csvfile, delimiter=' ', quotechar='|', quoting=csv.QUOTE_MINIMAL)
today = datetime.datetime.now(datetime.timezone.utc)
log_writer.writerow([today, minutes])
def tick(time_target: float, broadcast=True):
if broadcast:
print(create_message(time_target))
def create_message(time_target: float) -> str:
time_left = time_target - time()
print(time())
minutes = floor(time_left / ONE_MINUTE)
seconds = round(time_left - minutes * ONE_MINUTE)
message = f"{minutes}:{seconds}"
return message
def create_target_time(target_minutes: int, current_time: float) -> float:
return current_time + target_minutes * ONE_MINUTE
if __name__ == "__main__":
run()
| 33.09375
| 115
| 0.664463
| 407
| 3,177
| 4.948403
| 0.297297
| 0.083913
| 0.059086
| 0.029791
| 0.146971
| 0.069017
| 0.027805
| 0.027805
| 0.027805
| 0
| 0
| 0.008052
| 0.21813
| 3,177
| 95
| 116
| 33.442105
| 0.802738
| 0.006925
| 0
| 0.027397
| 0
| 0
| 0.166931
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082192
| false
| 0
| 0.068493
| 0.013699
| 0.191781
| 0.027397
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85db5b6d5b5a64186bb3b9c04d0a279e4a5f0c0a
| 998
|
py
|
Python
|
hw1/1.6/encrpyt_equals_decrypt.py
|
rocke97/crypto
|
89c4e595adf74558e12ceb1762025fd2f0275fec
|
[
"MIT"
] | null | null | null |
hw1/1.6/encrpyt_equals_decrypt.py
|
rocke97/crypto
|
89c4e595adf74558e12ceb1762025fd2f0275fec
|
[
"MIT"
] | null | null | null |
hw1/1.6/encrpyt_equals_decrypt.py
|
rocke97/crypto
|
89c4e595adf74558e12ceb1762025fd2f0275fec
|
[
"MIT"
] | null | null | null |
from itertools import count
from string import ascii_lowercase
plain_text = 'july'
results_file = open('results.txt', 'w')
letters_to_numbers = dict(zip(ascii_lowercase, count(0)))
numbers_to_letters = dict(zip(count(0), ascii_lowercase))
plain_text_numbers = [letters_to_numbers[letter] for letter in plain_text]
for i in range(0, 26):
#encrypt the plain text by shifting by some number
cipher_numbers = [(num + i)%26 for num in plain_text_numbers]
#try to decrypt the plain text by shifting forward by the same number (encrypt function = decrypt function)
decrypted_cipher_numbers = [(num + i)%26 for num in cipher_numbers]
attempted_plain_text = [numbers_to_letters[num] for num in decrypted_cipher_numbers]
if ''.join(attempted_plain_text) == plain_text: #if we decrypt print which key values work
print('At shift = ' + str(i) + ':')
print('Plain text: ' + plain_text)
print('Attempted Plain Text Decrypt: ' + ''.join(attempted_plain_text))
| 52.526316
| 111
| 0.728457
| 149
| 998
| 4.66443
| 0.355705
| 0.168345
| 0.103597
| 0.066187
| 0.141007
| 0.077698
| 0.077698
| 0.077698
| 0
| 0
| 0
| 0.010896
| 0.172345
| 998
| 19
| 112
| 52.526316
| 0.830508
| 0.196393
| 0
| 0
| 0
| 0
| 0.0875
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.133333
| 0
| 0.133333
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85db89656ff34bccb3df57eb36eff9c756872dce
| 2,663
|
py
|
Python
|
generator.py
|
mann1/DD_SIM_Template
|
84c7787b6b3c52f08e7031114894c98416c02fcf
|
[
"MIT"
] | null | null | null |
generator.py
|
mann1/DD_SIM_Template
|
84c7787b6b3c52f08e7031114894c98416c02fcf
|
[
"MIT"
] | null | null | null |
generator.py
|
mann1/DD_SIM_Template
|
84c7787b6b3c52f08e7031114894c98416c02fcf
|
[
"MIT"
] | null | null | null |
import os, pickle
import numpy as np
import tensorflow as tf
def read_pickle(file_name):
with (open(file_name, "rb")) as openfile:
while True:
try:
objects = pickle.load(openfile)
except EOFError:
break
return objects
class Generator(tf.keras.utils.Sequence):
def __init__(self, DATASET_PATH, BATCH_SIZE=32):
""" Initialize Generator object.
Args
DATASET_PATH : Path to folder containing individual folders named by their class names
BATCH_SIZE : The size of the batches to generate.
"""
self.batch_size = BATCH_SIZE
self.load_data(DATASET_PATH)
self.create_data_batches()
def load_data(self, DATASET_PATH):
cwd = os.getcwd()
DATA_PATH = os.path.join(cwd, DATASET_PATH)
if DATASET_PATH == 'datasets/train':
data_file = os.path.join(DATA_PATH, "train_data.pickle")
target_file = os.path.join(DATA_PATH, "train_target.pickle")
elif DATASET_PATH == 'datasets/val':
data_file = os.path.join(DATA_PATH, "val_data.pickle")
target_file = os.path.join(DATA_PATH, "val_target.pickle")
self.data = read_pickle(data_file)
self.target = read_pickle(target_file)
assert len(self.data) == len(self.target)
def create_data_batches(self):
# Divide data and target into groups of BATCH_SIZE
self.data_batchs = [[self.data[x % len(self.data)] for x in range(i, i + self.batch_size)]
for i in range(0, len(self.data), self.batch_size)]
self.target_batchs = [[self.target[x % len(self.target)] for x in range(i, i + self.batch_size)]
for i in range(0, len(self.target), self.batch_size)]
def __len__(self):
"""
Number of batches for each Epoch.
"""
return len(self.data_batchs)
def __getitem__(self, index):
"""
Keras sequence method for generating batches.
"""
if index >= len(self.data_batchs):
index = index % len(self.data_batchs)
data_batch = self.data_batchs[index]
target_batch = self.target_batchs[index]
return np.array(data_batch), np.array(target_batch)
if __name__ == "__main__":
train_generator = Generator('datasets/train')
val_generator = Generator('datasets/val')
print(len(train_generator))
print(len(val_generator))
data_batch, target_batch = train_generator.__getitem__(0)
print(data_batch.shape)
print(target_batch.shape)
| 32.876543
| 108
| 0.613969
| 341
| 2,663
| 4.542522
| 0.258065
| 0.051646
| 0.042608
| 0.036152
| 0.179471
| 0.151065
| 0.151065
| 0.107166
| 0.107166
| 0.058102
| 0
| 0.002626
| 0.285017
| 2,663
| 81
| 109
| 32.876543
| 0.810924
| 0.124296
| 0
| 0
| 0
| 0
| 0.057855
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 1
| 0.122449
| false
| 0
| 0.061224
| 0
| 0.265306
| 0.081633
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85db99fa2aa9b948ffca4017b69512e862fe9571
| 5,096
|
py
|
Python
|
src/mlb/schedule/schedule_view.py
|
benbrandt22/MagTagMLB
|
1ec347743bc7df9339fb8e3de0f86ea037b7694f
|
[
"MIT"
] | null | null | null |
src/mlb/schedule/schedule_view.py
|
benbrandt22/MagTagMLB
|
1ec347743bc7df9339fb8e3de0f86ea037b7694f
|
[
"MIT"
] | null | null | null |
src/mlb/schedule/schedule_view.py
|
benbrandt22/MagTagMLB
|
1ec347743bc7df9339fb8e3de0f86ea037b7694f
|
[
"MIT"
] | null | null | null |
from mlb.models.game_detail import GameDetail
import time
import board
import displayio
from adafruit_display_text import label
from adafruit_display_shapes.roundrect import RoundRect
import fonts.fonts as FONTS
from mlb.schedule.schedule_view_model import ScheduleViewModel
from time_utils import day_of_week, month_name_short, relative_day, utc_to_local, month_name, hour_12, ampm
class ScheduleView:
# (Display 296 x 128)
def __init__(self, model: ScheduleViewModel):
self.model = model
def render(self):
display = board.DISPLAY
# wait until we can draw
time.sleep(display.time_to_refresh)
# main group to hold everything
main_group = displayio.Group()
# white background. Scaled to save RAM
bg_bitmap = displayio.Bitmap(display.width // 8, display.height // 8, 1)
bg_palette = displayio.Palette(1)
bg_palette[0] = 0xFFFFFF
bg_sprite = displayio.TileGrid(bg_bitmap, x=0, y=0, pixel_shader=bg_palette)
bg_group = displayio.Group(scale=8)
bg_group.append(bg_sprite)
main_group.append(bg_group)
game1_group = self._single_game_group(self.model.game1)
game1_group.x = 0
game1_group.y = 0
game2_group = self._single_game_group(self.model.game2)
game2_group.x = 99
game2_group.y = 0
game3_group = self._single_game_group(self.model.game3)
game3_group.x = 198
game3_group.y = 0
main_group.append(game1_group)
main_group.append(game2_group)
main_group.append(game3_group)
# show the main group and refresh.
display.show(main_group)
display.refresh()
def _single_game_group(self, game: GameDetail):
game_group = displayio.Group()
if game is None:
return game_group
roundrect = RoundRect(5, 5, 88, 118, 10, fill=0xFFFFFF, outline=0x555555, stroke=3)
game_group.append(roundrect)
gametime_local = utc_to_local(game.dateTimeUtc)
day_text = ( relative_day(gametime_local) or day_of_week(gametime_local) )
date_text = f'{month_name(gametime_local)} {gametime_local.day}'
time_text = f'{hour_12(gametime_local)}:{gametime_local.minute:02d} {ampm(gametime_local)}'
day_label = label.Label(FONTS.OpenSans_12, text=day_text, color=0x000000)
day_label.anchor_point = (0.5, 0)
day_label.anchored_position = (49, 11)
game_group.append(day_label)
date_label = label.Label(FONTS.OpenSans_12, text=date_text, color=0x000000)
date_label.anchor_point = (0.5, 0)
date_label.anchored_position = (49, 25)
game_group.append(date_label)
time_label = label.Label(FONTS.OpenSans_12, text=time_text, color=0x000000)
time_label.anchor_point = (0.5, 0)
time_label.anchored_position = (49, 39)
game_group.append(time_label)
#Teams
if game.isPreview: #(no score to show)
away_team = label.Label(FONTS.OpenSans_Bold_18, text=f"{game.away.teamAbbreviation}", color=0x000000)
away_team.anchor_point = (0.5, 0)
away_team.anchored_position = (49, 58)
game_group.append(away_team)
at_label = label.Label(FONTS.OpenSans_12, text='@', color=0x000000)
at_label.anchor_point = (0.5, 0)
at_label.anchored_position = (49, 75)
game_group.append(at_label)
home_team = label.Label(FONTS.OpenSans_Bold_18, text=f"{game.home.teamAbbreviation}", color=0x000000)
home_team.anchor_point = (0.5, 0)
home_team.anchored_position = (49, 90)
game_group.append(home_team)
else:
team_y = 58
for team in [ game.away, game.home ]:
team_abbrev = label.Label(FONTS.OpenSans_Bold_18, text=f"{team.teamAbbreviation}", color=0x000000)
team_abbrev.anchor_point = (0, 0)
team_abbrev.anchored_position = (15, team_y)
game_group.append(team_abbrev)
score = label.Label(FONTS.OpenSans_Bold_18, text=f"{team.runs}", color=0x000000)
score.anchor_point = (1, 0)
score.anchored_position = (84, team_y)
game_group.append(score)
team_y = team_y + 20
if game.isLive or game.isFinal:
# show status text at the bottom
status_text = game.detailedState if game.isStatusExceptional else game.abstractGameState
if game.isLive and not game.isStatusExceptional:
status_text = f'{game.inningHalf} {game.currentInningOrdinal}'
if game.isFinal and game.isExtraInnings:
status_text = f'{game.abstractGameState} / {game.inningCount}'
status_label = label.Label(FONTS.OpenSans_12, text=status_text, color=0x000000)
status_label.anchor_point = (0.5, 0)
status_label.anchored_position = (49, 105)
game_group.append(status_label)
return game_group
| 38.315789
| 114
| 0.649333
| 661
| 5,096
| 4.748865
| 0.229955
| 0.048742
| 0.047786
| 0.065945
| 0.191144
| 0.178401
| 0.136668
| 0.050972
| 0.050972
| 0.02676
| 0
| 0.054526
| 0.258634
| 5,096
| 133
| 115
| 38.315789
| 0.776337
| 0.038462
| 0
| 0.021053
| 0
| 0
| 0.062551
| 0.047629
| 0
| 0
| 0.019624
| 0
| 0
| 1
| 0.031579
| false
| 0
| 0.094737
| 0
| 0.157895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85dddc830d151d3b583e5d23116cb924afd1cfe8
| 2,106
|
py
|
Python
|
src/platform_controller/scripts/controlTiltMotors.py
|
ahmohamed1/activeStereoVisionPlatform
|
6c928ca242e4de68c7b15a8748bff1d9f7fa1382
|
[
"MIT"
] | null | null | null |
src/platform_controller/scripts/controlTiltMotors.py
|
ahmohamed1/activeStereoVisionPlatform
|
6c928ca242e4de68c7b15a8748bff1d9f7fa1382
|
[
"MIT"
] | null | null | null |
src/platform_controller/scripts/controlTiltMotors.py
|
ahmohamed1/activeStereoVisionPlatform
|
6c928ca242e4de68c7b15a8748bff1d9f7fa1382
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
import actionlib
from control_msgs.msg import *
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
PI = 3.14159265359
class TiltMotorController:
def __init__(self):
self.leftMotor_publisher = rospy.Publisher('/left_motor_tilt/command', Float64, queue_size = 2)
self.rightMotor_publisher = rospy.Publisher('/right_motor_tilt/command', Float64, queue_size = 2)
self.leftMotorState_publisher = rospy.Publisher('/left/tilt/angle', Float64, queue_size = 2)
self.rightMotorState_publisher = rospy.Publisher('/right/tilt/angle', Float64, queue_size = 2)
# Alternative command topics
self.right_motor_subscriper = rospy.Subscriber('/right/tilt/move', Float64, self.right_motor_callback)
self.left_motor_subscriper = rospy.Subscriber("/left/tilt/move", Float64, self.left_motor_callback)
self.joint_command = rospy.Subscriber('/joint_states', JointState, self.jointCommandCb)
def rad2Deg(self, val):
return val * 180 / PI
def deg2Rad(self, val):
return val * PI / 180
def jointCommandCb(self, msg):
leftMotorState = Float64
rightMotorState = Float64
left = msg.position[0]
right = msg.position[1]
# print (left, right)
leftMotorState.data = self.rad2Deg(left)
rightMotorState.data = self.rad2Deg(right-10)
self.leftMotorState_publisher.publish(leftMotorState)
self.rightMotorState_publisher.publish(rightMotorState)
def right_motor_callback(self, msg):
rad = Float64
rad.date = self.deg2Rad(msg.data)
self.rightMotor_publisher.publish(rad)
def left_motor_callback(self, msg):
rad = Float64
rad.date = self.deg2Rad(msg.data)
print(rad)
self.leftMotor_publisher.publish(rad)
def controlLoop(self):
"""
Runs the control loop
"""
rate = rospy.Rate(15) # 10hz
while not rospy.is_shutdown():
rate.sleep()
def start(self):
"""
Starts the control loop and runs spin
"""
self.controlLoop()
def main():
rospy.init_node('TiltMotorController')
tiltMotorController = TiltMotorController()
tiltMotorController.start()
if __name__=='__main__':
main()
exit()
| 23.931818
| 104
| 0.746439
| 266
| 2,106
| 5.733083
| 0.304511
| 0.036721
| 0.060328
| 0.04459
| 0.157377
| 0.154754
| 0.120656
| 0.120656
| 0.072131
| 0.072131
| 0
| 0.032009
| 0.139601
| 2,106
| 87
| 105
| 24.206897
| 0.809603
| 0.062678
| 0
| 0.08
| 0
| 0
| 0.078623
| 0.02518
| 0
| 0
| 0
| 0
| 0
| 1
| 0.18
| false
| 0
| 0.1
| 0.04
| 0.34
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85dde154e71416994a5fa1e8b1afe91eea13927c
| 14,888
|
py
|
Python
|
py/Parser.py
|
Sqazine/ComputeDuck
|
d307d88a24601d433aa7507ea90000207a34e1f0
|
[
"Apache-2.0"
] | 2
|
2021-12-05T12:38:26.000Z
|
2022-03-09T02:24:44.000Z
|
py/Parser.py
|
Sqazine/ComputeDuck
|
d307d88a24601d433aa7507ea90000207a34e1f0
|
[
"Apache-2.0"
] | null | null | null |
py/Parser.py
|
Sqazine/ComputeDuck
|
d307d88a24601d433aa7507ea90000207a34e1f0
|
[
"Apache-2.0"
] | null | null | null |
from ast import Lambda
from enum import IntEnum
from typing import Any
from Ast import Stmt
from Ast import Expr
from Token import Token, TokenType
from Utils import Assert
from Ast import AstType, ArrayExpr, BoolExpr, ExprStmt, FunctionCallExpr, FunctionStmt, GroupExpr, IdentifierExpr, IfStmt, IndexExpr, InfixExpr, NilExpr, NumExpr, PrefixExpr, ReturnStmt, ScopeStmt, StrExpr, StructCallExpr, StructStmt, VarStmt, WhileStmt, RefExpr,LambdaExpr
class Precedence(IntEnum):
LOWEST = 0, # ,
ASSIGN = 1, # =
OR = 2, # or
AND = 3, # and
EQUAL = 4, # == !=
COMPARE = 5, # < <= > >=
ADD_PLUS = 6, # + -
MUL_DIV = 7, # * /
PREFIX = 8, # !
INFIX = 9, # [] () .
class Parser:
__curPos: int = 0
__tokens: list[Token] = []
__prefixFunctions: dict[TokenType, Any] = {}
__infixFunctions: dict[TokenType, Any] = {}
__precedence: dict[TokenType, Any] = {}
def __init__(self) -> None:
self.__curPos: int = 0
self.__tokens: list[Token] = []
self.__prefixFunctions: dict[TokenType, Any] = {}
self.__infixFunctions: dict[TokenType, Any] = {}
self.__precedence: dict[TokenType, Any] = {}
self.__prefixFunctions = {
TokenType.IDENTIFIER: self.ParseIdentifierExpr,
TokenType.NUMBER: self.ParseNumExpr,
TokenType.STRING: self.ParseStrExpr,
TokenType.NIL: self.ParseNilExpr,
TokenType.TRUE: self.ParseTrueExpr,
TokenType.FALSE: self.ParseFalseExpr,
TokenType.MINUS: self.ParsePrefixExpr,
TokenType.NOT: self.ParsePrefixExpr,
TokenType.LPAREN: self.ParseGroupExpr,
TokenType.LBRACKET: self.ParseArrayExpr,
TokenType.REF:self.ParseRefExpr,
TokenType.LAMBDA:self.ParseLambdaExpr,
}
self.__infixFunctions = {
TokenType.EQUAL: self.ParseInfixExpr,
TokenType.EQUAL_EQUAL: self.ParseInfixExpr,
TokenType.BANG_EQUAL: self.ParseInfixExpr,
TokenType.LESS: self.ParseInfixExpr,
TokenType.LESS_EQUAL: self.ParseInfixExpr,
TokenType.GREATER: self.ParseInfixExpr,
TokenType.GREATER_EQUAL: self.ParseInfixExpr,
TokenType.PLUS: self.ParseInfixExpr,
TokenType.MINUS: self.ParseInfixExpr,
TokenType.ASTERISK: self.ParseInfixExpr,
TokenType.SLASH: self.ParseInfixExpr,
TokenType.LPAREN: self.ParseFunctionCallExpr,
TokenType.LBRACKET: self.ParseIndexExpr,
TokenType.AND: self.ParseInfixExpr,
TokenType.OR: self.ParseInfixExpr,
TokenType.DOT: self.ParseStructCallExpr,
}
self.__precedence = {
TokenType.EQUAL: Precedence.ASSIGN,
TokenType.EQUAL_EQUAL: Precedence.EQUAL,
TokenType.BANG_EQUAL: Precedence.EQUAL,
TokenType.LESS: Precedence.COMPARE,
TokenType.LESS_EQUAL: Precedence.COMPARE,
TokenType.GREATER: Precedence.COMPARE,
TokenType.GREATER_EQUAL: Precedence.COMPARE,
TokenType.PLUS: Precedence.ADD_PLUS,
TokenType.MINUS: Precedence.ADD_PLUS,
TokenType.ASTERISK: Precedence.MUL_DIV,
TokenType.SLASH: Precedence.MUL_DIV,
TokenType.LBRACKET: Precedence.INFIX,
TokenType.LPAREN: Precedence.INFIX,
TokenType.AND: Precedence.AND,
TokenType.OR: Precedence.OR,
TokenType.DOT: Precedence.INFIX
}
def Parse(self, tokens: list[Token]) -> list[Stmt]:
self.__curPos = 0
self.__tokens = tokens
stmts: list[Stmt] = []
while (not self.IsMatchCurToken(TokenType.END)):
stmts.append(self.ParseStmt())
return stmts
def IsAtEnd(self) -> bool:
return self.__curPos >= len(self.__tokens)
def Consume(self, type, errMsg) -> Token:
if self.IsMatchCurToken(type):
return self.GetCurTokenAndStepOnce()
Assert("[line "+str(self.GetCurToken().line)+"]:"+errMsg)
return Token(TokenType.END, "", 0)
def GetCurToken(self) -> Token:
if not self.IsAtEnd():
return self.__tokens[self.__curPos]
return self.__tokens[-1]
def GetCurTokenAndStepOnce(self) -> Token:
if not self.IsAtEnd():
result = self.__tokens[self.__curPos]
self.__curPos += 1
return result
return self.__tokens[-1]
def GetCurTokenPrecedence(self) -> Token:
if self.__precedence.get(self.GetCurToken().type)==None:
return Precedence.LOWEST
return self.__precedence.get(self.GetCurToken().type)
def GetNextToken(self) -> Token:
if self.__curPos+1 < self.__tokens.count:
return self.__tokens[self.__curPos+1]
return self.__tokens[-1]
def GetNextTokenAndStepOnce(self) -> Token:
if self.__curPos+1 < self.__tokens.count:
self.__curPos += 1
return self.__tokens[self.__curPos]
return self.__tokens[-1]
def GetNextTokenPrecedence(self) -> Token:
return self.__precedence.get(self.GetNextToken().type, default=Precedence.LOWEST)
def IsMatchCurToken(self, type) -> bool:
return self.GetCurToken().type == type
def IsMatchCurTokenAndStepOnce(self, type) -> bool:
if self.IsMatchCurToken(type):
self.__curPos += 1
return True
return False
def IsMatchNextToken(self, type) -> bool:
return self.GetNextToken().type == type
def IsMatchNextTokenAndStepOnce(self, type) -> bool:
if self.IsMatchNextToken(type):
self.__curPos += 1
return True
return False
def ParseStmt(self) -> Stmt:
if self.IsMatchCurToken(TokenType.VAR):
return self.ParseVarStmt()
elif self.IsMatchCurToken(TokenType.RETURN):
return self.ParseReturnStmt()
elif self.IsMatchCurToken(TokenType.IF):
return self.ParseIfStmt()
elif self.IsMatchCurToken(TokenType.LBRACE):
return self.ParseScopeStmt()
elif self.IsMatchCurToken(TokenType.WHILE):
return self.ParseWhileStmt()
elif self.IsMatchCurToken(TokenType.FUNCTION):
return self.ParseFunctionStmt()
elif self.IsMatchCurToken(TokenType.STRUCT):
return self.ParseStructStmt()
else:
return self.ParseExprStmt()
def ParseExprStmt(self) -> Stmt:
exprStmt = ExprStmt(self.ParseExpr())
self.Consume(TokenType.SEMICOLON, "Expect ';' after expr stmt.")
return exprStmt
def ParseVarStmt(self) -> Stmt:
self.Consume(TokenType.VAR, "Expect 'var' key word")
name = (self.ParseIdentifierExpr())
value = NilExpr()
if self.IsMatchCurTokenAndStepOnce(TokenType.EQUAL):
value = self.ParseExpr()
self.Consume(TokenType.SEMICOLON, "Expect ';' after var stmt")
return VarStmt(name, value)
def ParseReturnStmt(self) -> Stmt:
self.Consume(TokenType.RETURN, "Expecr 'return' keyword")
expr = None
if not self.IsMatchCurToken(TokenType.SEMICOLON):
expr = self.ParseExpr()
self.Consume(TokenType.SEMICOLON, "Expect ';' after return stmt.")
return ReturnStmt(expr)
def ParseIfStmt(self) -> Stmt:
self.Consume(TokenType.IF, "Expect 'if' key word.")
self.Consume(TokenType.LPAREN, "Expect '(' after 'if'.")
condition = self.ParseExpr()
self.Consume(TokenType.RPAREN, "Expect ')' after if condition")
thenBranch = self.ParseStmt()
elseBranch = None
if self.IsMatchCurTokenAndStepOnce(TokenType.ELSE):
elseBranch = self.ParseStmt()
return IfStmt(condition, thenBranch, elseBranch)
def ParseScopeStmt(self) -> Stmt:
self.Consume(TokenType.LBRACE, "Expect '{'.")
scopeStmt = ScopeStmt([])
while (not self.IsMatchCurToken(TokenType.RBRACE)):
scopeStmt.stmts.append(self.ParseStmt())
self.Consume(TokenType.RBRACE, "Expect '}'.")
return scopeStmt
def ParseWhileStmt(self) -> Stmt:
self.Consume(TokenType.WHILE, "Expect 'while' keyword.")
self.Consume(TokenType.LPAREN, "Expect '(' after 'while'.")
condition = self.ParseExpr()
self.Consume(TokenType.RPAREN,
"Expect ')' after while stmt's condition")
body = self.ParseStmt()
return WhileStmt(condition, body)
def ParseFunctionStmt(self) -> Stmt:
self.Consume(TokenType.FUNCTION, "Expect 'fn' keyword")
funcStmt = FunctionStmt("", [], None)
funcStmt.name = self.ParseIdentifierExpr().Stringify()
self.Consume(TokenType.LPAREN, "Expect '(' after function name")
if (not self.IsMatchCurToken(TokenType.RPAREN)):
idenExpr = self.ParseIdentifierExpr()
funcStmt.parameters.append(idenExpr)
while self.IsMatchCurTokenAndStepOnce(TokenType.COMMA):
idenExpr = self.ParseIdentifierExpr()
funcStmt.parameters.append(idenExpr)
self.Consume(TokenType.RPAREN, "Expect ')' after function expr's '('")
funcStmt.body = self.ParseScopeStmt()
return funcStmt
def ParseStructStmt(self) -> Stmt:
self.Consume(TokenType.STRUCT, "Expect 'struct keyword'")
structStmt = StructStmt("", [])
structStmt.name = self.ParseIdentifierExpr().Stringify()
self.Consume(TokenType.LBRACE, "Expect '{' after struct name")
while not self.IsMatchCurToken(TokenType.RBRACE):
structStmt.members.append(self.ParseVarStmt())
self.Consume(TokenType.RBRACE, "Expect '}' after struct's '{'")
return structStmt
def ParseExpr(self, precedence=Precedence.LOWEST) -> Expr:
if self.__prefixFunctions.get(self.GetCurToken().type) == None:
print("no prefix definition for:" +
self.GetCurTokenAndStepOnce().literal)
return NilExpr()
prefixFn = self.__prefixFunctions.get(self.GetCurToken().type)
leftExpr = prefixFn()
while (not self.IsMatchCurToken(TokenType.SEMICOLON) and precedence < self.GetCurTokenPrecedence()):
if self.__infixFunctions.get(self.GetCurToken().type) == None:
return leftExpr
infixFn = self.__infixFunctions[self.GetCurToken().type]
leftExpr = infixFn(leftExpr)
return leftExpr
def ParseIdentifierExpr(self) -> Expr:
literal=self.Consume(TokenType.IDENTIFIER, "Unexpect Identifier '"+self.GetCurToken().literal+".").literal
return IdentifierExpr(literal)
def ParseNumExpr(self) -> Expr:
numLiteral = self.Consume(
TokenType.NUMBER, "Expect a number literal.").literal
return NumExpr(float(numLiteral))
def ParseStrExpr(self) -> Expr:
return StrExpr(self.Consume(TokenType.STRING, "Expact a string literal.").literal)
def ParseNilExpr(self) -> Expr:
self.Consume(TokenType.NIL, "Expect 'nil' keyword")
return NilExpr()
def ParseTrueExpr(self) -> Expr:
self.Consume(TokenType.TRUE, "Expect 'true' keyword")
return BoolExpr(True)
def ParseFalseExpr(self) -> Expr:
self.Consume(TokenType.FALSE, "Expect 'false' keyword")
return BoolExpr(False)
def ParseGroupExpr(self) -> Expr:
self.Consume(TokenType.LPAREN, "Expect '('.")
groupExpr = GroupExpr(self.ParseExpr())
self.Consume(TokenType.RPAREN, "Expect ')'.")
return groupExpr
def ParseArrayExpr(self) -> Expr:
self.Consume(TokenType.LBRACKET, "Expect '['.")
arrayExpr = ArrayExpr([])
if (not self.IsMatchCurToken(TokenType.RBRACKET)):
arrayExpr.elements.append(self.ParseExpr())
while self.IsMatchCurTokenAndStepOnce(TokenType.COMMA):
arrayExpr.elements.append(self.ParseExpr())
self.Consume(TokenType.RBRACKET, "Expect ']'.")
return arrayExpr
def ParsePrefixExpr(self) -> Expr:
prefixExpr = PrefixExpr("", None)
prefixExpr.op = self.GetCurTokenAndStepOnce().literal
prefixExpr.right = self.ParseExpr(Precedence.PREFIX)
return prefixExpr
def ParseInfixExpr(self, prefixExpr: Expr) -> Expr:
infixExpr = InfixExpr(None, "", None)
infixExpr.left = prefixExpr
opPrece = self.GetCurTokenPrecedence()
infixExpr.op = self.GetCurTokenAndStepOnce().literal
infixExpr.right = self.ParseExpr(opPrece)
return infixExpr
def ParseIndexExpr(self, prefixExpr: Expr) -> Expr:
self.Consume(TokenType.LBRACKET, "Expect '['.")
indexExpr = IndexExpr(None, None)
indexExpr.ds = prefixExpr
indexExpr.index = self.ParseExpr(Precedence.INFIX)
self.Consume(TokenType.RBRACKET, "Expect ']'.")
return indexExpr
def ParseRefExpr(self)->Expr:
self.Consume(TokenType.REF,"Expect 'ref' keyword.")
refExpr=self.ParseExpr(Precedence.LOWEST)
if refExpr.Type() != AstType.IDENTIFIER:
Assert("Invalid reference type, only variable can be referenced.")
return RefExpr(refExpr)
def ParseLambdaExpr(self)->Expr:
self.Consume(TokenType.LAMBDA,"Expect 'lambda' keyword.")
self.Consume(TokenType.LPAREN,"Expect '(' after keyword 'lambda'.")
parameters: list[IdentifierExpr] = []
body: ScopeStmt = None
if (not self.IsMatchCurToken(TokenType.RPAREN)):
idenExpr = self.ParseIdentifierExpr()
parameters.append(idenExpr)
while self.IsMatchCurTokenAndStepOnce(TokenType.COMMA):
idenExpr = self.ParseIdentifierExpr()
parameters.append(idenExpr)
self.Consume(TokenType.RPAREN, "Expect ')' after lambda expr's '('.")
body = self.ParseScopeStmt()
return LambdaExpr(parameters,body)
def ParseFunctionCallExpr(self, prefixExpr: Expr) -> Expr:
funcCallExpr = FunctionCallExpr("", [])
funcCallExpr.name = prefixExpr
self.Consume(TokenType.LPAREN, "Expect '('.")
if not self.IsMatchCurToken(TokenType.RPAREN):
funcCallExpr.arguments.append(self.ParseExpr())
while self.IsMatchCurTokenAndStepOnce(TokenType.COMMA):
funcCallExpr.arguments.append(self.ParseExpr())
self.Consume(TokenType.RPAREN, "Expect ')'.")
return funcCallExpr
def ParseStructCallExpr(self, prefixExpr: Expr) -> Expr:
self.Consume(TokenType.DOT, "Expect '.'.")
structCallExpr = StructCallExpr(None, None)
structCallExpr.callee = prefixExpr
structCallExpr.callMember = self.ParseExpr(Precedence.INFIX)
return structCallExpr
| 39.076115
| 273
| 0.637493
| 1,401
| 14,888
| 6.698787
| 0.146324
| 0.044539
| 0.08098
| 0.029728
| 0.324347
| 0.233138
| 0.16292
| 0.130954
| 0.088652
| 0.031966
| 0
| 0.002244
| 0.251612
| 14,888
| 380
| 274
| 39.178947
| 0.840065
| 0.002955
| 0
| 0.163522
| 0
| 0
| 0.06108
| 0
| 0
| 0
| 0
| 0
| 0.009434
| 1
| 0.122642
| false
| 0
| 0.025157
| 0.015723
| 0.374214
| 0.003145
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85e03a75a96c393560650c8bb391a58fe00c64f1
| 302
|
py
|
Python
|
code/color.py
|
Archkitten/sleep
|
dd81d8fe379d8e37c58b101d78fe258588d6c1bc
|
[
"MIT"
] | null | null | null |
code/color.py
|
Archkitten/sleep
|
dd81d8fe379d8e37c58b101d78fe258588d6c1bc
|
[
"MIT"
] | null | null | null |
code/color.py
|
Archkitten/sleep
|
dd81d8fe379d8e37c58b101d78fe258588d6c1bc
|
[
"MIT"
] | null | null | null |
# COLORS
black = "\033[30m"
red = "\033[31m"
green = "\033[32m"
yellow = "\033[33m"
blue = "\033[34m"
magenta = "\033[35m"
cyan = "\033[36m"
white = "\033[37m"
nc = "\n"
# COLOR TESTING
def test():
print(red + "test")
print(blue + "test2")
print(green + "test3" + "\n" + cyan + "test4" + white)
| 17.764706
| 56
| 0.566225
| 44
| 302
| 3.886364
| 0.636364
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17623
| 0.192053
| 302
| 16
| 57
| 18.875
| 0.52459
| 0.066225
| 0
| 0
| 0
| 0
| 0.311828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.076923
| 0.230769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85e1dc6359b959fbe3bde169c1c1df0d7df72888
| 253
|
py
|
Python
|
database/urls.py
|
shrishtickling/train_coding
|
ba2918ce13379940f359e2ae253987691a00f3a9
|
[
"Apache-2.0"
] | null | null | null |
database/urls.py
|
shrishtickling/train_coding
|
ba2918ce13379940f359e2ae253987691a00f3a9
|
[
"Apache-2.0"
] | null | null | null |
database/urls.py
|
shrishtickling/train_coding
|
ba2918ce13379940f359e2ae253987691a00f3a9
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from . import views
app_name = 'database'
urlpatterns = [
path('update/', views.update),
path('update2/', views.update2),
path('update3/', views.update3),
path('upload-user/', views.create_user_dataset)
]
| 19.461538
| 51
| 0.675889
| 31
| 253
| 5.419355
| 0.548387
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018957
| 0.166008
| 253
| 12
| 52
| 21.083333
| 0.777251
| 0
| 0
| 0
| 0
| 0
| 0.170635
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.222222
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85e31f8319151021136e63792aab66a8fe4825ad
| 421
|
py
|
Python
|
scripts/read_radar.py
|
jdiasn/raincoat
|
b0249c88f1a5ca22a720285e87be4b06b67705b5
|
[
"MIT"
] | 1
|
2020-04-22T05:41:08.000Z
|
2020-04-22T05:41:08.000Z
|
scripts/read_radar.py
|
jdiasn/raincoat
|
b0249c88f1a5ca22a720285e87be4b06b67705b5
|
[
"MIT"
] | null | null | null |
scripts/read_radar.py
|
jdiasn/raincoat
|
b0249c88f1a5ca22a720285e87be4b06b67705b5
|
[
"MIT"
] | 4
|
2019-01-01T11:33:14.000Z
|
2021-01-04T20:34:43.000Z
|
from raincoat.radarFunctions import getVarTimeRange, getRadarVar
import pandas as pd
data = getRadarVar('../samplefiles/radar/181202_000000_P09_ZEN_compact.nc',
'2001.01.01. 00:00:00',
'Ze')
start = pd.to_datetime('2018-12-02 00:00:00', format='%Y-%m-%d %H:%M:%S')
stop = pd.to_datetime('2018-12-02 01:00:00',format='%Y-%m-%d %H:%M:%S')
data = getVarTimeRange(data,1,2000, start, stop)
| 35.083333
| 75
| 0.655582
| 68
| 421
| 3.970588
| 0.544118
| 0.074074
| 0.044444
| 0.118519
| 0.266667
| 0.266667
| 0.118519
| 0.118519
| 0.118519
| 0
| 0
| 0.171831
| 0.15677
| 421
| 11
| 76
| 38.272727
| 0.588732
| 0
| 0
| 0
| 0
| 0
| 0.35
| 0.12619
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85e79f4d2b450460c3e188d3ec311565e5eee0d2
| 30,714
|
py
|
Python
|
SoundServer.py
|
yoyoberenguer/SoundServer
|
3a824a8f519f205d5f4c277d314cb92732a157b1
|
[
"MIT"
] | null | null | null |
SoundServer.py
|
yoyoberenguer/SoundServer
|
3a824a8f519f205d5f4c277d314cb92732a157b1
|
[
"MIT"
] | null | null | null |
SoundServer.py
|
yoyoberenguer/SoundServer
|
3a824a8f519f205d5f4c277d314cb92732a157b1
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
__version__ = "1.0.1"
try:
import pygame
from pygame import mixer
except ImportError:
raise ImportError("\n<pygame> library is missing on your system."
"\nTry: \n C:\\pip install pygame on a window command prompt.")
from time import time
class SoundObject:
def __init__(self, sound_, priority_: int, name_: str,
channel_: int, obj_id_: int, position_: int, loop_: int = False):
"""
CREATE A SOUND OBJECT CONTAINING CERTAIN ATTRIBUTES (SEE THE
COMPLETE LIST BELOW)
:param sound_ : Sound object; Sound object to play
:param priority_: integer; Define the sound priority (Sound with highest priority have to be stopped with
specific methods)
:param name_ : string; Sound given name (if the object has no name -> str(id(sound_))
:param channel_ : integer; Channel to use (channel where the sound is being played by the mixer)
:param obj_id_ : python int (C long long int); Sound unique ID
:param position_: integer | None ; Sound position for panning sound in stereo.
position must be within range [0...Max display width]
:param loop_ : int; -1 for looping the sound
"""
self.sound = sound_ # sound object to play
self.length = sound_.get_length() # return the length of this sound in seconds
self.priority = priority_ if 0 < priority_ < 2 else 0 # sound priority - lowest to highest (0 - 2)
self.time = time() # timestamp
self.name = name_ # sound name for identification
self.active_channel = channel_ # channel used
self.obj_id = obj_id_ # unique sound id number
self.id = id(self) # class id
# NOTE : new attribute 27/11/2020
# sound position for panning sound on stereo
self.pos = position_ # Sound position for panning method
self.loop = loop_
class SoundControl(object):
def __init__(self, screen_size_, channels_: int = 8):
"""
:param screen_size_: pygame.Rect; Size of the active display
:param channels_ : integer; number of channels to reserved for the sound controller
:return : None
"""
if not isinstance(screen_size_, pygame.Rect):
raise ValueError("\n screen_size_ argument must be a pygame.Rect type, got %s " % type(screen_size_))
if not isinstance(channels_, int):
raise ValueError("\n channels_ argument must be a integer type, got %s " % type(channels_))
assert channels_ >= 1, "\nArgument channel_num_ must be >=1"
if pygame.mixer.get_init() is None:
raise ValueError("\nMixer has not been initialized."
"\nUse pygame.mixer.init() before starting the Sound controller")
self.channel_num = channels_ # channel to init
self.start = mixer.get_num_channels() # get the total number of playback channels
self.end = self.channel_num + self.start # last channel
mixer.set_num_channels(self.end) # sets the number of available channels for the mixer.
mixer.set_reserved(self.end) # reserve channels from being automatically used
self.channels = [mixer.Channel(j + self.start)
for j in range(self.channel_num)] # create a channel object for controlling playback
self.snd_obj = [None] * self.channel_num # list of un-initialised objects
self.channel = self.start # pointer to the bottom of the stack
self.all = list(range(self.start, self.end)) # create a list with all channel number
self.screen_size = screen_size_ # size of the display (used for stereo mode)
def update(self):
"""
THIS METHOD HAS TO BE CALLED FROM THE MAIN LOOP OF YOUR PROGRAM
DETECT SOUNDS THAT HAVE STOPPED TO PLAY ON THE MIXER AND SET THE CHANNEL VALUE TO NONE
"""
i = 0
snd_obj = self.snd_obj
for c in self.channels:
if c:
# Returns True if the mixer is busy mixing any channels.
# If the mixer is idle then this return False.
if not c.get_busy():
snd_obj[i] = None
i += 1
# SINGLE SOUND
def update_sound_panning(self, new_x_: int, volume_: float, name_=None, id_=None) -> None:
"""
PANNING IS THE DISTRIBUTION OF A SOUND SIGNAL INTO A NEW STEREO OR MULTI-CHANNEL SOUND FIELD
CHANGE PANNING FOR ALL SOUNDS BEING PLAYED ON THE MIXER.
ADJUST THE PANNING OF A GIVEN SOUND (FOUND THE SOUND OBJECT WITH AN EXPLICIT NAME OR ID).
AT LEAST ONE SEARCH METHOD MUST BE DEFINED.
:param new_x_ : integer; new sound position in the display. Value must be in range [0, Max width]
:param volume_ : float; Sound volume (adjust all sound being played by the mixer)
value must be in range [0 ... 1.0]
:param name_ : string; Given sound name (name given at the time eof the SoundObject construction)
:param id_ : int | None; Default None. ID number such as object_id_ = id(sound_).
:return : None
"""
assert 0 <= new_x_ <= self.screen_size.w, \
"\nArgument new_x_ value must be in range (0, %s) got %s" % (self.screen_size.w, new_x_)
# SET THE VOLUME IN CASE OF AN INPUT ERROR
if 0.0 >= volume_ >= 1.0:
volume_ = 1.0
if name_ is None and id_ is None:
raise ValueError("\nInvalid function call, at least one argument must be set!")
# search by name take precedence (if name value is not undefined)
if name_ is not None:
id_ = None
# Calculate the sound panning, left & right volume values
left, right = self.stereo_panning(new_x_, self.screen_size.w)
left *= volume_
right *= volume_
channels = self.channels # Fetch all the channels from the sound controller
for obj in self.snd_obj: # Iterate all the SoundObject
if obj:
if hasattr(obj, "pos") and obj.pos is not None:
# search by name
if name_ is not None:
if hasattr(obj, 'name') and hasattr(obj, 'active_channel'):
if obj.name == name_:
c = obj.active_channel # Channel playing the sound
obj.pos = new_x_ # update the sound position
try:
channel = channels[c]
if hasattr(channel, 'set_volume'):
channel.set_volume(left, right) # set the panning for the channel
else:
raise AttributeError("\nObject is missing attribute set_volume")
except IndexError as e:
raise IndexError("\n %s " % e)
else:
continue
else:
raise IndexError(
"\nSoundObject is missing attribute(s), "
"obj must be a SoundObject type got %s " % type(obj))
# search by id
elif id_ is not None:
if hasattr(obj, 'obj_id') and hasattr(obj, 'active_channel'):
if obj.obj_id == id_:
c = obj.active_channel # Channel playing the sound
obj.pos = new_x_ # update the sound position
try:
channel = channels[c]
if hasattr(channel, 'set_volume'):
channel.set_volume(left, right) # set the panning for the channel
else:
raise AttributeError("\nObject is missing attribute set_volume")
except IndexError as e:
raise IndexError("\n %s " % e)
else:
continue
else:
print('\nFunction call error, at least one search method must'
' be set (search by name or search by id')
return
# ALL SOUNDS
def update_sounds_panning(self, new_x_: int, volume_: float) -> None:
"""
PANNING IS THE DISTRIBUTION OF A SOUND SIGNAL INTO A NEW STEREO OR MULTI-CHANNEL SOUND FIELD
CHANGE PANNING FOR ALL SOUNDS BEING PLAYED ON THE MIXER.
THIS METHOD ITERATE OVER ALL SOUNDS BEING PLAYED BY THE MIXER AND ADJUST THE PANNING ACCORDING
TO THE NEW POSITION new_x_ AND GIVEN VOLUME_
:param new_x_ : integer; new sound position in the display. Value must be in range [0, Max width]
:param volume_ : float; Sound volume (adjust all sound being played by the mixer)
value must be in range [0 ... 1.0]
:return : None
"""
assert 0 <= new_x_ <= self.screen_size.w, \
"\nArgument new_x_ value must be in range (0, %s) got %s" % (self.screen_size.w, new_x_)
# SET THE VOLUME IN CASE OF AN INPUT ERROR
if 0.0 >= volume_ >= 1.0:
volume_ = 1.0
# Calculate the sound panning, left & right volume values
left, right = self.stereo_panning(new_x_, self.screen_size.w)
left *= volume_
right *= volume_
channels = self.channels # Fetch all the channels from the sound controller
for obj in self.snd_obj: # Iterate all the SoundObject
if obj:
if hasattr(obj, "pos") and obj.pos is not None:
if hasattr(obj, 'active_channel'):
c = obj.active_channel # Channel playing the sound
obj.pos = new_x_ # update the sound position
try:
c = channels[c]
if hasattr(c, "set_volume"):
c.set_volume(left, right) # set the panning for the channel
else:
raise AttributeError('\nObject is missing attributes set_volume')
except IndexError as e:
raise IndexError("\n %s " % e)
else:
raise AttributeError(
"\nSoundObject is missing attribute(s), "
"obj must be a SoundObject type got %s " % type(obj))
def update_volume(self, volume_: float = 1.0) -> None:
"""
UPDATE ALL SOUND OBJECT VOLUME TO A SPECIFIC VALUE.
THIS HAS IMMEDIATE EFFECT AND DO NOT FADE THE SOUND
AFFECT ALL SOUNDS WITH OR WITHOUT PANNING EFFECT.
PANNING SOUND EFFECT WILL BE CONSERVED AFTER ADJUSTING THE VOLUME
:param volume_: float; volume value, default is 1.0
:return : None
"""
# SET THE VOLUME IN CASE OF AN INPUT ERROR
if 0.0 >= volume_ >= 1.0:
volume_ = 1.0
objs = self.snd_obj
i = 0
# SET THE VOLUME FOR ALL SOUNDS
for channel in self.channels:
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
# WITH PANNING
if hasattr(single_obj, "pos") and single_obj.pos is not None:
if hasattr(channel, "set_volume"):
# Calculate the sound panning, left & right volume values
left, right = self.stereo_panning(single_obj.pos, self.screen_size.w)
left *= volume_
right *= volume_
channel.set_volume(left, right)
# WITHOUT PANNING
else:
if single_obj is not None:
if hasattr(single_obj.sound, "set_volume"):
single_obj.sound.set_volume(volume_)
i += 1
def pause_sound(self, name_: str = None, id_=None) -> None:
"""
PAUSE A SINGLE SOUND FROM THE MIXER (AT LEAST ONE SEARCH ELEMENT HAS TO BE PROVIDED NAME OR ID)
:param name_ : string | None; Given sound name (name given at the time eof the SoundObject construction)
:param id_ : int | None; Default None. ID number such as object_id_ = id(sound_).
:return : None
"""
if name_ is None and id_ is None:
raise ValueError("\nInvalid function call, at least one argument must be set!")
# search by name take precedence (if name value is not undefined)
if name_ is not None:
id_ = None
objs = self.snd_obj
i = 0
# SET THE VOLUME FOR ALL SOUNDS
for channel in self.channels:
if hasattr(channel, "pause"):
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
# search by name
if name_ is not None:
if single_obj.name == name_:
channel.pause()
# search by id_
elif id_ is not None:
if single_obj.obj_id == id_:
channel.pause()
i += 1
...
def pause_sounds(self) -> None:
"""
PAUSE ALL SOUND OBJECTS (THIS HAS IMMEDIATE EFFECT)
:return : None
"""
objs = self.snd_obj
i = 0
# SET THE VOLUME FOR ALL SOUNDS
for channel in self.channels:
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
if hasattr(channel, "pause"):
channel.pause()
i += 1
def unpause_sounds(self) -> None:
"""
UNPAUSE ALL SOUND OBJECTS (THIS HAS IMMEDIATE EFFECT)
:return : None
"""
objs = self.snd_obj
i = 0
for channel in self.channels:
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
if hasattr(channel, "unpause"):
channel.unpause()
i += 1
def unpause_sound(self, name_: str = None, id_=None) -> None:
"""
UNPAUSE A SINGLE SOUND FROM THE MIXER (AT LEAST ONE SEARCH ELEMENT HAS TO BE PROVIDED NAME OR ID)
:param name_ : string | None; Given sound name (name given at the time eof the SoundObject construction)
:param id_ : int | None; Default None. ID number such as object_id_ = id(sound_).
:return : None
"""
if name_ is None and id_ is None:
raise ValueError("\nInvalid function call, at least one argument must be set!")
# search by name take precedence (if name value is not undefined)
if name_ is not None:
id_ = None
objs = self.snd_obj
i = 0
for channel in self.channels:
try:
single_obj = objs[i]
except IndexError as e:
raise IndexError("\n %s " % e)
if single_obj is not None:
# search by name
if name_ is not None:
if single_obj.name == name_:
channel.unpause()
# search by id_
elif id_ is not None:
if single_obj.obj_id == id_:
channel.unpause()
i += 1
def show_free_channels(self) -> list:
"""
RETURN A LIST OF FREE CHANNELS (NUMERICAL VALUES).
:return: list; RETURN A LIST
"""
free_channels = []
i = 0
free_channels_append = free_channels.append
start = self.start
for c in self.channels:
if not c.get_busy():
free_channels_append(i + start)
i += 1
print("Free channels : %s " % free_channels)
return free_channels
def show_sounds_playing(self):
"""
DISPLAY ALL SOUNDS OBJECTS
"""
j = 0
for object_ in self.snd_obj:
if object_:
timeleft = round(object_.length - (time() - object_.time), 2)
# if timeleft < 0, most likely to be a sound with attribute loop enabled
if timeleft < 0.0:
timeleft = 0.0
print('Name %s priority %s channel %s length(s) %s time left(s) %s' %
(object_.name, object_.priority, object_.active_channel, round(object_.length, 2),
timeleft))
j += 1
def get_identical_sounds(self, sound_: pygame.mixer.Sound) -> list:
"""
RETURN A LIST OF CHANNEL(S) PLAYING IDENTICAL SOUND OBJECT(s)
SEARCH BY IDENTICAL PYGAME.SOUND OBJECT
:param sound_ : Mixer object; Object to compare to
:return : python list; List containing channels number playing similar sound object,
if no match is found, return an empty list
"""
assert isinstance(sound_, pygame.mixer.Sound), \
"\nPositional argument sound_ must be a pygame.mixer.Sound type, got %s " % type(sound_)
duplicate = []
duplicate_append = duplicate.append
for obj in self.snd_obj:
if obj:
if obj.sound == sound_:
duplicate_append(obj.active_channel)
return duplicate
def get_identical_id(self, id_: int) -> list:
"""
RETURN A LIST CONTAINING ANY IDENTICAL SOUND BEING MIXED.
USE THE UNIQUE ID FOR REFERENCING OBJECTS
:param id_: python integer; unique id number that reference a sound object
:return : list; Return a list of channels containing identical sound object
"""
assert isinstance(id_, int), \
"\nPositional argument id_ must be an int type, got %s " % type(id_)
duplicate = []
duplicate_append = duplicate.append
for obj in self.snd_obj:
if obj:
if obj.obj_id == id_:
duplicate_append(obj)
return duplicate
def stop(self, stop_list_: list):
"""
STOP ALL SOUND BEING PLAYED ON THE GIVEN LIST OF CHANNELS.
ONLY SOUND WITH PRIORITY LEVEL 0 CAN BE STOPPED.
:param stop_list_: python list; list of channels
:return : None
"""
assert isinstance(stop_list_, list), \
"\nPositional argument stop_list must be a python list type, got %s " % type(stop_list_)
start = self.start
snd_obj = self.snd_obj
channels = self.channels
for c in stop_list_:
l = c - start
if snd_obj[l]:
if snd_obj[l].priority == 0:
channels[l].set_volume(0.0, 0.0)
channels[l].stop()
self.update()
def stop_all_except(self, exception_: list):
"""
STOP ALL SOUND OBJECT EXCEPT SOUNDS FROM A GIVEN LIST OF ID(SOUND)
IT WILL STOP SOUND PLAYING ON ALL CHANNELS REGARDLESS
OF THEIR PRIORITY.
:param exception_: Can be a single pygame.Sound id value or a list containing
all pygame.Sound object id numbers.
"""
assert isinstance(exception_, list),\
"\nPositional argument exception_ must be a python list type, got %s " % type(exception_)
start = self.start
snd_obj = self.snd_obj
channels = self.channels
for c in self.all:
l = c - start
snd_object = snd_obj[l]
if snd_object:
if snd_object.obj_id not in exception_:
channels[l].set_volume(0.0)
channels[l].stop()
self.update()
def stop_all(self):
"""
STOP ALL SOUNDS NO EXCEPTIONS.
:return: None
"""
start = self.start
snd_obj = self.snd_obj
channels = self.channels
for c in self.all:
l = c - start
snd_object = snd_obj[l]
if snd_object:
channels[l].set_volume(0.0)
channels[l].stop()
self.update()
def stop_name(self, name_: str = ""):
"""
STOP A PYGAME.SOUND OBJECT IF PLAYING ON ANY OF THE CHANNELS.
:param name_: string; Sound name to stop
:return : None
"""
assert isinstance(name_, str),\
"\nPositional argument name_ must be a python string type, got %s " % type(name_)
channels = self.channels
start = self.start
for sound in self.snd_obj:
if sound and sound.name == name_:
try:
channels[sound.active_channel - start].set_volume(0.0)
channels[sound.active_channel - start].stop()
except IndexError:
# IGNORE ERROR
...
self.update()
def stop_object(self, object_id: int):
"""
STOP A GIVEN SOUND USING THE PYGAME.SOUND OBJECT ID NUMBER.
:param object_id: integer; Object unique identifier such as id(sound)
:return : None
"""
assert isinstance(object_id, int), \
"\nPositional argument object_id must be a python string type, got %s " % type(object_id)
channels = self.channels
start = self.start
for sound in self.snd_obj:
if sound and sound.obj_id == object_id:
try:
channels[sound.active_channel - start].set_volume(0.0)
channels[sound.active_channel - start].stop()
except IndexError:
# IGNORE ERROR
...
self.update()
def return_time_left(self, object_id) -> float:
"""
RETURN THE TIME LEFT IN SECONDS (RETURN -1 IF SOUND IS SEAMLESS LOOPED ON THE CHANNEL,
AND NONE WHEN SOUND IS NOT FOUND
:param object_id: python integer; unique object id
:return : float | None; Return a float representing the time left in seconds.
"""
j = 0
snd_obj = self.snd_obj
for obj in snd_obj:
if obj:
if obj.obj_id == object_id:
timeleft = round(snd_obj[j].length - (time() - snd_obj[j].time), 2)
# if timeleft < 0, most likely to be a sound with attribute loop enabled
if timeleft < 0.0:
if obj.loop:
return -1.0
else:
timeleft = 0.0
return timeleft
j += 1
return None
def get_reserved_channels(self):
""" RETURN THE NUMBER OF RESERVED CHANNELS """
return self.channel_num
def get_reserved_start(self):
""" RETURN THE FIRST RESERVED CHANNEL NUMBER """
return self.start
def get_reserved_end(self):
""" RETURN THE LAST RESERVED CHANNEL NUMBER """
return self.end
def get_channels(self):
"""
RETURN A LIST OF ALL RESERVED PYGAME MIXER CHANNELS.
"""
return self.channels
def get_sound(self, channel_):
"""
RETURN THE SOUND BEING PLAYED ON A SPECIFIC CHANNEL (PYGAME.MIXER.CHANNEL)
:param channel_: integer; channel_ is an integer representing the channel number.
"""
try:
sound = self.channels[channel_]
except IndexError:
raise Exception('\nIndexError: Channel number out of range ')
else:
return sound
def get_sound_object(self, channel_):
"""
RETURN A SPECIFIC SOUND OBJECT
RETURN NONE IN CASE OF AN INDEX ERROR
"""
try:
s = self.snd_obj[channel_]
except IndexError:
return None
else:
return s
def get_all_sound_object(self):
""" RETURN ALL SOUND OBJECTS """
return self.snd_obj
def play(self, sound_, loop_=0, priority_=0, volume_=1.0,
fade_in_ms=100, fade_out_ms=100, panning_=False, name_=None,
x_=None, object_id_=None):
"""
PLAY A SOUND OBJECT ON THE GIVEN CHANNEL
RETURN NONE IF ALL CHANNELS ARE BUSY OR IF AN EXCEPTION IS RAISED
:param sound_ : pygame mixer sound
:param loop_ : loop the sound indefinitely -1 (default = 0)
:param priority_ : Set the sound priority (low : 0, med : 1, high : 2)
:param volume_ : Set the sound volume 0.0 to 1.0 (100% full volume)
:param fade_in_ms : Fade in sound effect in ms
:param fade_out_ms : float; Fade out sound effect in ms
:param panning_ : boolean for using panning method (stereo mode)
:param name_ : String representing the sound name (if no name default is -> str(id(sound_)))
:param x_ : Sound position for stereo mode,
:param object_id_ : unique sound id
"""
l = 0
channels = self.channels
channel = self.channel
start = self.start
end = self.end
screen_width = self.screen_size.w
left = 0
right = 0
try:
if not sound_:
raise AttributeError('\nIncorrect call argument, sound_ cannot be None')
if panning_:
# panning mode is enable but sound position value is not correct
# Adjusting the value manually
if x_ is None or (0 > x_ > screen_width):
x_ = screen_width >> 1
# Regardless x_ value, if passing mode is disabled the variable
# x_ is set to None
else:
x_ = None
# set a name by default id(sound_)
if name_ is None:
name_ = str(id(sound_))
# set object id default value
if object_id_ is None:
object_id_ = id(sound_)
l = channel - start
# TODO OVERFLOW CHANNELS[l]
# CHECK IF CURRENT CHANNEL IS BUSY
if channels[l].get_busy() == 0:
# PLAY A SOUND IN STEREO MODE
if panning_:
left, right = self.stereo_panning(x_, self.screen_size.w)
channels[l].set_volume(left * volume_, right * volume_)
else:
channels[l].set_volume(volume_)
channels[l].fadeout(fade_out_ms)
channels[l].play(sound_, loops=loop_, maxtime=0, fade_ms=fade_in_ms)
self.snd_obj[l] = SoundObject(sound_, priority_, name_, l, object_id_, position_ = x_, loop_ = loop_)
# PREPARE THE MIXER FOR THE NEXT CHANNEL
self.channel += 1
if self.channel > end - 1:
self.channel = start
# RETURN THE CHANNEL NUMBER PLAYING THE SOUND OBJECT
return channel - 1
# ALL CHANNELS ARE BUSY
else:
self.stop(self.get_identical_sounds(sound_))
# VERY IMPORTANT, GO TO NEXT CHANNEL.
self.channel += 1
if self.channel > end - 1:
self.channel = start
return None
except IndexError as e:
print('\n[-] SoundControl error : %s ' % e)
print(self.channel, l)
return None
def display_size_update(self, rect_):
"""
UPDATE THE SCREEN SIZE AFTER CHANGING MODE
THIS FUNCTION IS MAINLY USED FOR THE PANNING MODE (STEREO)
:param rect_: pygame.Rect; display dimension
:return: None
"""
self.screen_size = rect_
def stereo_panning(self, x_, screen_width):
"""
STEREO MODE
:param screen_width: display width
:param x_ : integer; x value of sprite position on screen
:return: tuple of float;
"""
right_volume = 0.0
left_volume = 0.0
# MUTE THE SOUND IF OUTSIDE THE BOUNDARIES
if 0 > x_ > screen_width:
return right_volume, left_volume
right_volume = float(x_) / screen_width
left_volume = 1.0 - right_volume
return left_volume, right_volume
| 39.226054
| 119
| 0.513088
| 3,516
| 30,714
| 4.34215
| 0.092435
| 0.013362
| 0.013755
| 0.007926
| 0.449008
| 0.407873
| 0.399555
| 0.383245
| 0.373354
| 0.363726
| 0
| 0.008125
| 0.418929
| 30,714
| 782
| 120
| 39.276215
| 0.847313
| 0.297454
| 0
| 0.581948
| 0
| 0.002375
| 0.091607
| 0
| 0
| 0
| 0
| 0.001279
| 0.021378
| 1
| 0.071259
| false
| 0
| 0.011876
| 0
| 0.135392
| 0.011876
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85e90c8a65010ce9ecba5749d22457498fa4d999
| 2,931
|
py
|
Python
|
tests/extmethods/run.py
|
dariobig/pyangbind
|
db0808f719bb963dac85606fddd65a1930a84aef
|
[
"Apache-2.0"
] | 1
|
2020-04-01T05:45:41.000Z
|
2020-04-01T05:45:41.000Z
|
tests/extmethods/run.py
|
dariobig/pyangbind
|
db0808f719bb963dac85606fddd65a1930a84aef
|
[
"Apache-2.0"
] | null | null | null |
tests/extmethods/run.py
|
dariobig/pyangbind
|
db0808f719bb963dac85606fddd65a1930a84aef
|
[
"Apache-2.0"
] | 3
|
2016-11-01T23:51:35.000Z
|
2018-05-23T10:09:08.000Z
|
#!/usr/bin/env python
import os
import sys
import getopt
TESTNAME = "extmethods"
class extmethodcls(object):
def commit(self, *args, **kwargs):
return "COMMIT_CALLED"
def presave(self, *args, **kwargs):
return "PRESAVE_CALLED"
def postsave(self, *args, **kwargs):
return "POSTSAVE_CALLED"
def oam_check(self, *args, **kwargs):
return "OAM_CHECK_CALLED"
def echo(self, *args, **kwargs):
return {'args': args, 'kwargs': kwargs}
# generate bindings in this folder
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "k", ["keepfiles"])
except getopt.GetoptError as e:
sys.exit(127)
k = False
for o, a in opts:
if o in ["-k", "--keepfiles"]:
k = True
pythonpath = os.environ.get("PATH_TO_PYBIND_TEST_PYTHON") if \
os.environ.get('PATH_TO_PYBIND_TEST_PYTHON') is not None \
else sys.executable
pyangpath = os.environ.get('PYANGPATH') if \
os.environ.get('PYANGPATH') is not None else False
pyangbindpath = os.environ.get('PYANGBINDPATH') if \
os.environ.get('PYANGBINDPATH') is not None else False
assert pyangpath is not False, "could not find path to pyang"
assert pyangbindpath is not False, "could not resolve pyangbind directory"
this_dir = os.path.dirname(os.path.realpath(__file__))
cmd = "%s " % pythonpath
cmd += "%s --plugindir %s/pyangbind/plugin" % (pyangpath, pyangbindpath)
cmd += " -f pybind -o %s/bindings.py" % this_dir
cmd += " -p %s" % this_dir
cmd += " --use-extmethods"
cmd += " %s/%s.yang" % (this_dir, TESTNAME)
os.system(cmd)
extdict = {
'/item/one': extmethodcls()
}
from bindings import extmethods
x = extmethods(extmethods=extdict)
results = [
("commit", True, "COMMIT_CALLED"),
("presave", True, "PRESAVE_CALLED"),
("postsave", True, "POSTSAVE_CALLED"),
("oam_check", True, "OAM_CHECK_CALLED"),
("doesnotexist", False, "")
]
for chk in results:
method = getattr(x.item.one, "_" + chk[0], None)
assert (method is not None) == chk[1], \
"Method %s retrieved incorrectly, method was: %s" % method
if method is not None:
result = method()
assert result == chk[2], "Incorrect return from %s -> %s != %s" \
% (chk[0], result, chk[2])
expected_return = {'args': ('one',), 'kwargs': {'caller': ['item', 'one'],
'two': 2, 'path_helper': False}}
assert x.item.one._echo('one', two=2) == expected_return, \
"args+kwargs not echoed correctly"
try:
x.item.two = False
assert False, \
"incorrectly set an attribute that did not exist in extmethods"
except AttributeError:
pass
if not k:
os.system("/bin/rm %s/bindings.py" % this_dir)
os.system("/bin/rm %s/bindings.pyc" % this_dir)
if __name__ == '__main__':
main()
| 29.019802
| 76
| 0.604572
| 375
| 2,931
| 4.610667
| 0.314667
| 0.040486
| 0.041643
| 0.057837
| 0.122036
| 0.064777
| 0.039329
| 0.039329
| 0
| 0
| 0
| 0.004986
| 0.247356
| 2,931
| 100
| 77
| 29.31
| 0.778785
| 0.018083
| 0
| 0.026316
| 0
| 0
| 0.258345
| 0.018081
| 0
| 0
| 0
| 0
| 0.078947
| 1
| 0.078947
| false
| 0.013158
| 0.052632
| 0.065789
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85eb93c822a019fc750d57de9e82b6de5c0352f3
| 790
|
py
|
Python
|
scripts/solved/031_TRAN.py
|
akikuno/rosalind
|
7015dc63e493d870e5789e99f2ee523a9b1f5ab9
|
[
"MIT"
] | null | null | null |
scripts/solved/031_TRAN.py
|
akikuno/rosalind
|
7015dc63e493d870e5789e99f2ee523a9b1f5ab9
|
[
"MIT"
] | null | null | null |
scripts/solved/031_TRAN.py
|
akikuno/rosalind
|
7015dc63e493d870e5789e99f2ee523a9b1f5ab9
|
[
"MIT"
] | null | null | null |
# https://rosalind.info/problems/tran/
file = "data/tran.txt"
def read_fasta(file: str):
"""
Args
file: path of fasta file
"""
with open(file) as f:
fa = f.read().splitlines()
prev = True
header = []
seq = []
for f in fa:
if ">" in f:
header.append(f[1:])
prev = True
elif prev:
seq.append(f)
prev = False
else:
seq[-1] += f
return header, seq
_, seq = read_fasta(file)
seq1, seq2 = seq
transition = 0
transversion = 0
import re
for s1, s2 in zip(seq1, seq2):
if s1 == s2:
continue
s = s1 + s2
if re.match(r"(AG)|(GA)|(CT)|(TC)", s):
transition += 1
else:
transversion += 1
print(transition / transversion)
| 16.458333
| 43
| 0.501266
| 104
| 790
| 3.778846
| 0.509615
| 0.068702
| 0.066158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031683
| 0.360759
| 790
| 47
| 44
| 16.808511
| 0.746535
| 0.08481
| 0
| 0.129032
| 0
| 0
| 0.047009
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.032258
| 0
| 0.096774
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85ef49b97d17705c81cdeeb0ece8add9c7768f1d
| 6,718
|
py
|
Python
|
extract_data_1.1.py
|
stanlee321/bolivia_power
|
4c86be2be8b81fead5ba9f1d50f32233cd54c1fc
|
[
"MIT"
] | null | null | null |
extract_data_1.1.py
|
stanlee321/bolivia_power
|
4c86be2be8b81fead5ba9f1d50f32233cd54c1fc
|
[
"MIT"
] | null | null | null |
extract_data_1.1.py
|
stanlee321/bolivia_power
|
4c86be2be8b81fead5ba9f1d50f32233cd54c1fc
|
[
"MIT"
] | null | null | null |
# Code for extract the information from the web
# with the <id> information into the bolivia_power_1.csv file
# input: bolivia_power_1.id.csv
# output 6x.npy array file:
# <nodes_ids.lat,lon> <node.tags>
# <way.ids> <way.ref> <way.tags>
# ...
# v. 1.1
#import pandas as pd
import numpy as np
import pandas as pd
# Data from Bolivia_power
path_to_csv_power_data = '/notebooks/Power/data/bolivia_power_1.csv'
df_bolivia_power= pd.read_csv(path_to_csv_power_data,delimiter=',',sep=',', error_bad_lines=False)
df_bolivia_power.columns = ['type','id','name_1','name_2','name_3','name_4']
df_bolivia_power.head()
# As array Type and id
df2_type = np.asarray(df_bolivia_power['type'])
df2_id = np.asarray(df_bolivia_power['id'])
# Return to Pandas DataFrame
data_frame_type = pd.DataFrame(df2_type)
data_frame_id = pd.DataFrame(df2_id)
print(len(df2_type))
# AS a unique DataFrame
M = np.ones((len(df2_type),2))
data_frame = pd.DataFrame(M, columns=['type', 'id'])
data_frame['type'] = data_frame_type
data_frame['id'] = data_frame_id
data_frame.head()
## Extracting the data from the web
import urllib.request
from urllib.error import URLError, HTTPError
print("starting to download the files...")
# function fur Convert to pandasdataframe from str
##################FUR NODES #####################
import xml.etree.ElementTree as ET
#################################################
def iter_docs(author):
author_attr = author.attrib
for doc in author.iterfind('.//node'):
doc_dict = author_attr.copy()
doc_dict.update(doc.attrib)
doc_dict['data'] = doc.text
yield doc_dict
def extract_data():
node = []
way = []
relation= []
r = 0
for x in data_frame['type']:
n = data_frame['id'][r]
try:
page = urllib.request.urlopen('http://api.openstreetmap.org/api/0.6/' + x + '/%d' %n)
if x == 'node':
node.append(page)
print(".....node...: " + "%d" %n)
print('http://api.openstreetmap.org/api/0.6/' + x + '/%d' %n)
print(len(node), '/' , data_frame.shape[0])
node[-1] = node[-1].read().decode()
r +=1
np.array(node).dump(open('/notebooks/Power/data/nodes.npy', 'wb'))
print(node[-1])
if x == 'way':
way.append(page)
print(".....way...: " + "%d" %n)
print('http://api.openstreetmap.org/api/0.6/' + x + '/%d' %n)
print(len(node)+len(way)+len(relation), '/' ,data_frame.shape[0])
way[-1] = way[-1].read().decode()
r +=1
np.array(way).dump(open('/notebooks/Power/data/ways.npy', 'wb'))
print(way[-1])
if x == 'relation':
relation.append(page)
print(".....relation...: " + "%d" %n)
print('http://api.openstreetmap.org/api/0.6/' + x + '/%d' %n)
print(len(node)+len(way)+len(relation), '/' ,data_frame.shape[0])
relation[-1] = relation[-1].read().decode()
r +=1
np.array(relation).dump(open('/notebooks/Power/data/relations.npy', 'wb'))
print(relation[-1])
except HTTPError:
print('The server couldn\'t fulfill the request...node')
r = r + 1
#print('Error code: ', e.code)
if HTTPError == True:
pass
except URLError:
r = r + 1
print('We failed to reach a server...node')
#print('Reason: ', e.reason)
if URLError == True:
pass
print("sussessful ...!!!!!!!!!!!!")
print("check your disk... :P")
#return (node, way, relation)
extract_data()
print('finished node,way,relation')
print('saving list arrays into disk....')
#node, ways, relations = extract_data()
"""""
xml_data = node[0]
etree = ET.fromstring(xml_data) #create an ElementTree object
d = pd.DataFrame(list(iter_docs(etree)))
data_list=[] # create list for append every dataframe
for i in range(1,len(node)):
xml_data = node[i]
etree = ET.fromstring(xml_data) #create an ElementTree object
doc_df = pd.DataFrame(list(iter_docs(etree)))
data_list.append(doc_df)
d = d.append(data_list[-1],ignore_index=True)
d.head()
d.to_csv('/notebooks/Power/data/power_node.csv', sep=',', encoding='utf-8',index = False)
#########################################################################################
##############################################FUR WAYS#####################################################################
def iter_docs_way(author):
author_attr = author.attrib
for doc in author.iterfind('.//way'):
doc_dict = author_attr.copy()
doc_dict.update(doc.attrib)
doc_dict['data'] = doc.text
yield doc_dict
xml_data = node[0]
etree = ET.fromstring(xml_data) #create an ElementTree object
w = pd.DataFrame(list(iter_docs(etree)))
data_list_way=[] # create list for append every dataframe
for i in range(1,len(way)):
xml_data = node[i]
etree = ET.fromstring(xml_data) #create an ElementTree object
doc_df = pd.DataFrame(list(iter_docs_way(etree)))
data_list.append(doc_df)
w = w.append(data_list[-1],ignore_index=True)
w.head()
w.to_csv('/notebooks/Power/data/power_way.csv', sep=',', encoding='utf-8',index = False)
#########################################################################################
########################################################## FUR Relation ##################################################
def iter_docs_rel(author):
author_attr = author.attrib
for doc in author.iterfind('.//way'):
doc_dict = author_attr.copy()
doc_dict.update(doc.attrib)
doc_dict['data'] = doc.text
yield doc_dict
xml_data = node[0]
etree = ET.fromstring(xml_data) #create an ElementTree object
r = pd.DataFrame(list(iter_docs_rel(etree)))
data_list_way=[] # create list for append every dataframe
for i in range(1,len(relation)):
xml_data = node[i]
etree = ET.fromstring(xml_data) #create an ElementTree object
doc_df = pd.DataFrame(list(iter_docs_rel(etree)))
data_list.append(doc_df)
r = r.append(data_list[-1],ignore_index=True)
r.head()
r.to_csv('/notebooks/Power/data/power_rel.csv', sep=',', encoding='utf-8',index = False) """
| 28.108787
| 124
| 0.539744
| 854
| 6,718
| 4.094848
| 0.181499
| 0.033457
| 0.036031
| 0.034315
| 0.556763
| 0.49471
| 0.457535
| 0.406634
| 0.368316
| 0.356305
| 0
| 0.010872
| 0.246949
| 6,718
| 239
| 125
| 28.108787
| 0.680372
| 0.084102
| 0
| 0.153846
| 0
| 0
| 0.190332
| 0.04139
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0.025641
| 0.064103
| 0
| 0.089744
| 0.25641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85f402a990563be3704e3ce90f8e5fbc80ebcb6e
| 526
|
py
|
Python
|
Practice/Problem Solving/MaximizingXOR.py
|
avantikasharma/HackerRank-Solutions
|
a980859ac352688853fcbcf3c7ec6d95685f99ea
|
[
"MIT"
] | 1
|
2018-07-08T15:44:15.000Z
|
2018-07-08T15:44:15.000Z
|
Practice/Problem Solving/MaximizingXOR.py
|
avantikasharma/HackerRank-Solutions
|
a980859ac352688853fcbcf3c7ec6d95685f99ea
|
[
"MIT"
] | null | null | null |
Practice/Problem Solving/MaximizingXOR.py
|
avantikasharma/HackerRank-Solutions
|
a980859ac352688853fcbcf3c7ec6d95685f99ea
|
[
"MIT"
] | 2
|
2018-08-10T06:49:34.000Z
|
2020-10-01T04:50:59.000Z
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the maximizingXor function below.
def maximizingXor(l, r):
result = []
for num1 in range(l,r+1):
for num2 in range(l,r+1):
xor = num1^num2
result.append(xor)
return max(result)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
l = int(input())
r = int(input())
result = maximizingXor(l, r)
fptr.write(str(result) + '\n')
fptr.close()
| 17.533333
| 47
| 0.58365
| 72
| 526
| 4.138889
| 0.583333
| 0.026846
| 0.100671
| 0.060403
| 0.067114
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01847
| 0.279468
| 526
| 29
| 48
| 18.137931
| 0.76781
| 0.106464
| 0
| 0
| 0
| 0
| 0.047009
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.263158
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85f686d400d73419843a0643d08f81afb4fe05ef
| 4,417
|
py
|
Python
|
interface_report_interactive.py
|
hpreston/network_info_scripts
|
b25076eb6f55a7f7335f6cae1a4c3c00ce9aa191
|
[
"MIT"
] | 20
|
2019-05-11T03:08:52.000Z
|
2022-01-13T13:44:22.000Z
|
interface_report_interactive.py
|
hpreston/network_info_scripts
|
b25076eb6f55a7f7335f6cae1a4c3c00ce9aa191
|
[
"MIT"
] | 4
|
2020-02-26T23:25:59.000Z
|
2021-12-13T19:59:01.000Z
|
interface_report_interactive.py
|
hpreston/network_info_scripts
|
b25076eb6f55a7f7335f6cae1a4c3c00ce9aa191
|
[
"MIT"
] | 8
|
2019-05-20T02:27:40.000Z
|
2021-07-07T18:49:45.000Z
|
#! /usr/bin/env python
"""Exploring Genie's ability to gather details and write to CSV
This script is meant to be run line by line interactively in a Python
interpretor (such as iPython) to learn how the Genie and csv libraries work.
This script assumes you have a virl simulation running and a testbed file
created.
Example:
virl up --provision virlfiles/5_router_mesh
virl generate pyats -o testbed.yaml
Copyright (c) 2018 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Import the Genie library
from genie.conf import Genie
# Create a testbed object
testbed = Genie.init("testbed.yaml")
# Take a look at the devices that are in the testbed
print(testbed.devices)
# Create a "convenience" variable for one device
iosv1 = testbed.devices["iosv-1"]
# Connect to the router
iosv1.connect()
# Check that you are connected
iosv1.connected
# Run the "show interfaces" command and "parse" results to Python object
interfaces = iosv1.parse("show interfaces")
# Print the parsed data
print(interfaces)
# That's a lot of data, let's explore it some..
# Look at the first set of dictionary keys avialable
interfaces.keys()
# Now let's checkout one interface in a pretty printed way
from pprint import pprint
pprint(interfaces["GigabitEthernet0/0"])
# Much nicer... now let's just get the mac-address for one interface
interfaces["GigabitEthernet0/0"]["mac_address"]
# Suppose we wanted the IP address...
interfaces["GigabitEthernet0/0"]["ipv4"]
# Now let's create a CSV file of the MAC Addresses for each interface
# Import in the CSV library
import csv
# Name our CSV file
interface_file = "interfaces.csv"
# Let's setup the headers for our CSV file
report_fields = ["Interface", "MAC Address"]
# Now let's open up our file and create our report
# This whole block of text from `with` and everything
# indented under it will run at once. Copy or type it all in.
# DON'T FORGET TO SPACE OVER IF TYPING MANUALLY
with open(interface_file, "w") as f:
# Create a DictWriter object
writer = csv.DictWriter(f, report_fields)
# Write the header row
writer.writeheader()
# Loop over each interface and write a row
for interface, details in interfaces.items():
writer.writerow({"Interface": interface, "MAC Address": details["mac_address"]})
# Uh oh.. did you get a "KeyError: 'mac_address'"?
# That's because Loopbacks do NOT have mac_addresses.
# See for yourself...
interfaces["Loopback0"].keys()
# So we need to create our code so we can handle interfaces without mac-addresses
# Several ways you COULD do it, here's one. A "try... except... " block
with open(interface_file, "w") as f:
writer = csv.DictWriter(f, report_fields)
writer.writeheader()
for interface, details in interfaces.items():
# Try to write a row with a mac-address
try:
writer.writerow(
{
"Interface": interface,
"MAC Address": details["mac_address"],
}
)
except KeyError:
# If there isn't one... use "N/A"
writer.writerow(
{
"Interface": interface,
"MAC Address": "N/A"}
)
# Great... let's see what was written.
# Open up the file again for "r"eading (also the default)
with open(interface_file, "r") as f:
# Just print it out
print(f.read())
# Great job!
| 33.462121
| 88
| 0.713607
| 664
| 4,417
| 4.725904
| 0.406627
| 0.031867
| 0.008923
| 0.020076
| 0.110261
| 0.110261
| 0.053537
| 0.037604
| 0.037604
| 0
| 0
| 0.005153
| 0.209192
| 4,417
| 131
| 89
| 33.717557
| 0.893215
| 0.66561
| 0
| 0.292683
| 0
| 0
| 0.162369
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.073171
| 0
| 0.073171
| 0.121951
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85f74ccca3d8f227ec09283215d9c1ace1b61121
| 1,159
|
py
|
Python
|
app/priu.py
|
robhaswell/powerstrip-restrict-image-user
|
d6a5dbb19330f1ee5b384095c1010636af12120d
|
[
"Apache-2.0"
] | null | null | null |
app/priu.py
|
robhaswell/powerstrip-restrict-image-user
|
d6a5dbb19330f1ee5b384095c1010636af12120d
|
[
"Apache-2.0"
] | null | null | null |
app/priu.py
|
robhaswell/powerstrip-restrict-image-user
|
d6a5dbb19330f1ee5b384095c1010636af12120d
|
[
"Apache-2.0"
] | null | null | null |
import os, sys
import json as _json
from flask import Flask, Response, request
app = Flask(__name__)
app.debug = True
import lib
@app.route("/", methods=["HEAD", "GET", "POST", "DELETE", "PUT"])
def adapter():
json = request.get_data()
decoded = _json.loads(json)
docker_json = _json.loads(decoded['ClientRequest']['Body'])
image = docker_json['Image']
if "/" not in image:
user = "_"
else:
user = image.split("/")[0]
if user != app.config['ALLOWED_USER']:
return '', 403
response = lib.pre_hook_response(
decoded['ClientRequest']['Method'],
decoded['ClientRequest']['Request'],
decoded['ClientRequest']['Body'],
)
return Response(response, mimetype="application/json")
if __name__ == "__main__":
try:
app.config['ALLOWED_USER'] = os.environ['USER']
except KeyError:
sys.stdout.write("""Error: Configuration environment variable USER not provided.
Specify an image username on the Docker command-line by using docker run -e USER=<user>.
Use the user "_" to only allow official Docker images.
""")
sys.exit(1)
app.run(port=80)
| 26.340909
| 88
| 0.637619
| 143
| 1,159
| 5
| 0.552448
| 0.111888
| 0.067133
| 0.055944
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007675
| 0.213115
| 1,159
| 43
| 89
| 26.953488
| 0.776316
| 0
| 0
| 0
| 0
| 0
| 0.311475
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.117647
| 0
| 0.205882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85f7c87317fb94af50f148e6f619929fe75f47af
| 1,316
|
py
|
Python
|
app/gather/api/serializers.py
|
eHealthAfrica/gather
|
88d96009c5f9832b564d13fa66d63841a7fbcd90
|
[
"Apache-2.0"
] | 2
|
2019-09-25T18:37:30.000Z
|
2019-09-25T18:37:39.000Z
|
app/gather/api/serializers.py
|
eHealthAfrica/gather
|
88d96009c5f9832b564d13fa66d63841a7fbcd90
|
[
"Apache-2.0"
] | 41
|
2015-07-29T14:10:05.000Z
|
2021-09-13T07:07:41.000Z
|
app/gather/api/serializers.py
|
eHealthAfrica/gather
|
88d96009c5f9832b564d13fa66d63841a7fbcd90
|
[
"Apache-2.0"
] | 2
|
2019-11-12T23:09:35.000Z
|
2020-03-11T16:39:35.000Z
|
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aether.sdk.multitenancy.serializers import (
DynamicFieldsModelSerializer,
MtPrimaryKeyRelatedField,
MtModelSerializer,
)
from .models import Survey, Mask
class MaskSerializer(DynamicFieldsModelSerializer):
survey = MtPrimaryKeyRelatedField(
required=True,
queryset=Survey.objects.all(),
)
class Meta:
model = Mask
fields = '__all__'
class SurveySerializer(MtModelSerializer):
masks = MaskSerializer(omit=('survey', ), many=True, read_only=True)
class Meta:
model = Survey
fields = '__all__'
| 28
| 75
| 0.729483
| 159
| 1,316
| 5.981132
| 0.616352
| 0.063091
| 0.02734
| 0.033649
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007547
| 0.194529
| 1,316
| 46
| 76
| 28.608696
| 0.889623
| 0.527356
| 0
| 0.210526
| 0
| 0
| 0.033113
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.105263
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85fd9ccfe64a572bc3232cd253f5cd2894061049
| 1,514
|
py
|
Python
|
src/utils/jupyter_setup.py
|
paxtonedgar/MisInfo
|
81b32fa3e7d0d204feb83e10169093f45727a2ea
|
[
"MIT"
] | null | null | null |
src/utils/jupyter_setup.py
|
paxtonedgar/MisInfo
|
81b32fa3e7d0d204feb83e10169093f45727a2ea
|
[
"MIT"
] | null | null | null |
src/utils/jupyter_setup.py
|
paxtonedgar/MisInfo
|
81b32fa3e7d0d204feb83e10169093f45727a2ea
|
[
"MIT"
] | null | null | null |
# built-in
import os
import logging
# installed
import pandas as pd
import seaborn as sns
from matplotlib import pylab
# custom
import src.settings
from src.utils.log_utils import setup_logging, LogLevel
from src.utils.config_loader import ConfigLoader, Config
def setup_jupyter(
root_dir: str, config_path: str = None,
logging_level: LogLevel = logging.DEBUG
) -> Config:
"""
Setup needed for Jupyter.
:param root_dir: [description]
:type root_dir: str
:param config_path: [description], defaults to None
:type config_path: str, optional
:param logging_level: [description], defaults to logging.DEBUG
:type logging_level: LogLevel, optional
:return: [description]
:rtype: Config
"""
src.settings.init()
cfg = ConfigLoader.load_config(config_path)
print('Config loaded.')
setup_logging(
os.path.join(root_dir, 'logging.json'), logging_level=logging_level
)
# other setup
sns.set()
palette = sns.color_palette('muted')
sns.set_palette(palette)
sns.set(rc={'figure.figsize': (12, 8)})
pd.options.display.float_format = '{:.4f}'.format
pd.set_option('max_colwidth', 800)
pd.set_option('display.max_rows', 200)
params = {
'legend.fontsize': 16,
'figure.figsize': (10, 8),
'axes.labelsize': 16,
'axes.titlesize': 16,
'xtick.labelsize': 16,
'ytick.labelsize': 16
}
pylab.rcParams.update(params)
print('Setup done')
return cfg
| 26.103448
| 75
| 0.664465
| 191
| 1,514
| 5.13089
| 0.439791
| 0.061224
| 0.02449
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019426
| 0.217966
| 1,514
| 57
| 76
| 26.561404
| 0.808277
| 0.225892
| 0
| 0
| 0
| 0
| 0.157003
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.222222
| 0
| 0.277778
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85fdbccdde41392a6f2e6723a8450dd58d4c3c85
| 3,169
|
py
|
Python
|
EclipseOJ/contests/models.py
|
cs251-eclipse/eclipseOJ
|
ad93bf65014e87051278026f87b6b92afdaed349
|
[
"MIT"
] | null | null | null |
EclipseOJ/contests/models.py
|
cs251-eclipse/eclipseOJ
|
ad93bf65014e87051278026f87b6b92afdaed349
|
[
"MIT"
] | null | null | null |
EclipseOJ/contests/models.py
|
cs251-eclipse/eclipseOJ
|
ad93bf65014e87051278026f87b6b92afdaed349
|
[
"MIT"
] | 1
|
2020-06-06T21:05:09.000Z
|
2020-06-06T21:05:09.000Z
|
from django.db import models
from django.contrib.auth.models import User
from core.models import Profile
from array import *
from datetime import datetime
from django.utils import timezone
class Contest(models.Model):
"""
Contests models are used to store save contests as object. Contests contain problems, users register in a contest and that's how they can compete among one another.
"""
start_time = models.DateTimeField(
help_text="This is a DateTimeField used to store start time of contest"
)
end_time = models.DateTimeField(
help_text="This is a DateTimeField used to store end time of contest"
)
registered_user = models.ManyToManyField(
User, blank = True,
help_text="This is a ManyToManyField field between :model:`auth.User` and contest. Multiple users will register any contests, this field anables direct access to list of users registered for contests. Also this stores in users which contests they registered for"
)
name = models.CharField(
max_length=200,
blank=True,
help_text="This is the name of the contest"
)
completed = models.BooleanField(
default=False,
help_text="This is a boolean variable that automatically gets updated once the contests is completed."
)
def __str__(self):
return 'Contest {}: {}'.format(str(self.id), self.name)
class Score(models.Model):
"""
Score models are used to store the performance of a particular user in a particular contest.
"""
contest=models.ForeignKey(
Contest,
help_text="This is a ForeignKey relation betwen a Score object and a Contest object. The tells us that a Score model is linked to a particular contest"
)
user=models.ForeignKey(
User,
help_text="This is a ForeignKey relation betwen a Score object and a Contest object. The tells us that a Score model belongs to which user",
)
score=models.IntegerField(
default=0,
help_text="This is the score of user in a particular contest. This is calculated by checking number of problems he solved and duration it took him to solve the problems"
)
acceptedA=models.BooleanField(
default=False,
help_text="Boolean field whether A is solved or not",
)
acceptedB=models.BooleanField(
default=False,
help_text="Boolean field whether B is solved or not",
)
acceptedC=models.BooleanField(
default=False,
help_text="Boolean field whether C is solved or not",
)
acceptedD=models.BooleanField(
default=False,
help_text="Boolean field whether D is solved or not",
)
acceptedE=models.BooleanField(
default=False,
help_text="Boolean field whether E is solved or not",
)
acceptedF=models.BooleanField(
default=False,
help_text="Boolean field whether F is solved or not",
)
wins=models.IntegerField(
default=0,
help_text="It keeps track of the number of users he defeated"
)
def __str__(self):
return 'Contest '+str(self.contest.id)+': User '+str(self.user.username)
| 39.123457
| 270
| 0.688545
| 431
| 3,169
| 5
| 0.303944
| 0.055684
| 0.044548
| 0.051972
| 0.444548
| 0.373086
| 0.302552
| 0.302552
| 0.302552
| 0.143852
| 0
| 0.002082
| 0.242032
| 3,169
| 80
| 271
| 39.6125
| 0.895087
| 0.081098
| 0
| 0.15493
| 0
| 0.056338
| 0.426588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028169
| false
| 0
| 0.084507
| 0.028169
| 0.380282
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85ff76b7f34f9abc8f910e03a1576bfe726a0de5
| 7,602
|
py
|
Python
|
mistletoe/renderers/base.py
|
executablebooks/mistletoe-ebp
|
229812436726fd9b1af85c6e66ff8c81b415758d
|
[
"MIT"
] | 2
|
2020-05-19T02:06:47.000Z
|
2020-06-27T10:01:59.000Z
|
mistletoe/renderers/base.py
|
executablebooks/mistletoe-ebp
|
229812436726fd9b1af85c6e66ff8c81b415758d
|
[
"MIT"
] | 5
|
2020-03-10T22:43:16.000Z
|
2020-03-21T22:09:09.000Z
|
mistletoe/renderers/base.py
|
ExecutableBookProject/mistletoe-ebp
|
229812436726fd9b1af85c6e66ff8c81b415758d
|
[
"MIT"
] | null | null | null |
"""
Base class for renderers.
"""
from itertools import chain
import re
import sys
from typing import Optional
from mistletoe import block_tokens, block_tokens_ext, span_tokens, span_tokens_ext
from mistletoe.parse_context import ParseContext, set_parse_context
class BaseRenderer:
"""
Base class for renderers.
All renderers should ...
* define all render functions specified in `self.render_map`;
* be a context manager (by inheriting `__enter__` and `__exit__`);
Custom renderers could ...
* set the default tokens searched for during parsing, by overriding
``default_block_tokens`` and/or ``default_span_tokens``
* add additional render functions by appending to self.render_map;
:Usage:
Suppose SomeRenderer inherits BaseRenderer, and ``fin`` is the input file.
The syntax looks something like this::
>>> from mistletoe import Document
>>> from some_renderer import SomeRenderer
>>> with SomeRenderer() as renderer:
... rendered = renderer.render(Document.read(fin))
See mistletoe.renderers.html for an implementation example.
:Naming conventions:
* The keys of `self.render_map` should exactly match the class
name of tokens;
* Render function names should be of form: `render_` + the
"snake-case" form of token's class name.
:param render_map: maps tokens to their corresponding render functions.
:type render_map: dict
"""
default_block_tokens = (
block_tokens.HTMLBlock,
block_tokens.BlockCode,
block_tokens.Heading,
block_tokens.Quote,
block_tokens.CodeFence,
block_tokens.ThematicBreak,
block_tokens.List,
block_tokens_ext.Table,
block_tokens_ext.Footnote,
block_tokens.LinkDefinition,
block_tokens.Paragraph,
)
default_span_tokens = (
span_tokens.EscapeSequence,
span_tokens.HTMLSpan,
span_tokens.AutoLink,
span_tokens.CoreTokens,
span_tokens_ext.FootReference,
span_tokens_ext.Strikethrough,
span_tokens.InlineCode,
span_tokens.LineBreak,
span_tokens.RawText,
)
_parse_name = re.compile(r"([A-Z][a-z]+|[A-Z]+(?![a-z]))")
def __init__(self, *, parse_context: Optional[ParseContext] = None):
"""Initialise the renderer.
:param parse_context: the parse context stores global parsing variables,
such as the block/span tokens to search for,
and link/footnote definitions that have been collected.
If None, a new context will be instatiated, with the default
block/span tokens for this renderer.
These will be re-instatiated on ``__enter__``.
:type parse_context: mistletoe.parse_context.ParseContext
"""
if parse_context is None:
parse_context = ParseContext(
self.default_block_tokens, self.default_span_tokens
)
self.parse_context = parse_context
set_parse_context(self.parse_context)
self.render_map = self.get_default_render_map()
for token in chain(
self.parse_context.block_tokens, self.parse_context.span_tokens
):
if token.__name__ not in self.render_map:
render_func = getattr(self, self._cls_to_func(token.__name__))
self.render_map[token.__name__] = render_func
def get_default_render_map(self):
"""Return the default map of token names to methods."""
return {
"Strong": self.render_strong,
"Emphasis": self.render_emphasis,
"InlineCode": self.render_inline_code,
"RawText": self.render_raw_text,
"Strikethrough": self.render_strikethrough,
"Image": self.render_image,
"Link": self.render_link,
"AutoLink": self.render_auto_link,
"EscapeSequence": self.render_escape_sequence,
"Heading": self.render_heading,
"SetextHeading": self.render_setext_heading,
"Quote": self.render_quote,
"Paragraph": self.render_paragraph,
"CodeFence": self.render_code_fence,
"BlockCode": self.render_block_code,
"List": self.render_list,
"ListItem": self.render_list_item,
"Table": self.render_table,
"TableRow": self.render_table_row,
"TableCell": self.render_table_cell,
"ThematicBreak": self.render_thematic_break,
"LineBreak": self.render_line_break,
"Document": self.render_document,
"LinkDefinition": self.render_link_definition,
"Footnote": self.render_footnote,
}
def render(self, token):
"""
Grabs the class name from input token and finds its corresponding
render function.
Basically a janky way to do polymorphism.
Arguments:
token: whose __class__.__name__ is in self.render_map.
"""
return self.render_map[token.__class__.__name__](token)
def render_inner(self, token):
"""
Recursively renders child tokens. Joins the rendered
strings with no space in between.
If newlines / spaces are needed between tokens, add them
in their respective templates, or override this function
in the renderer subclass, so that whitespace won't seem to
appear magically for anyone reading your program.
:param token: a branch node who has children attribute.
"""
return "".join(map(self.render, token.children or []))
def __enter__(self):
"""
Make renderer classes into context managers, reinstatiated the
originally instatiated ``parse_context``.
"""
set_parse_context(self.parse_context)
return self
def __exit__(self, exception_type, exception_val, traceback):
"""
Make renderer classes into context managers.
"""
pass
@classmethod
def _cls_to_func(cls, cls_name):
snake = "_".join(map(str.lower, cls._parse_name.findall(cls_name)))
return "render_{}".format(snake)
@staticmethod
def _tokens_from_module(module):
"""
Helper method; takes a module and returns a list of all token classes
specified in `module.__all__`. Useful when custom tokens are defined in a
separate module.
"""
return [getattr(module, name) for name in module.__all__]
def render_raw_text(self, token):
"""
Default render method for RawText. Simply return token.content.
"""
return token.content
def render_setext_heading(self, token):
"""
Default render method for SetextHeader. Simply parse to render_header.
"""
return self.render_heading(token)
def render_code_fence(self, token):
"""
Default render method for CodeFence. Simply parse to render_block_code.
"""
return self.render_block_code(token)
def render_core_tokens(self, token):
raise TypeError(
"CoreTokens span tokens should not be present in the final syntax tree"
)
def unimplemented_renderer(self, token):
raise NotImplementedError("no render method set for {}".format(token))
def __getattr__(self, name):
""""""
if name.startswith("render_"):
return self.unimplemented_renderer
raise AttributeError(name).with_traceback(sys.exc_info()[2])
| 34.089686
| 83
| 0.64654
| 874
| 7,602
| 5.366133
| 0.296339
| 0.076759
| 0.022175
| 0.009595
| 0.056077
| 0.056077
| 0.018337
| 0.018337
| 0
| 0
| 0
| 0.00018
| 0.270981
| 7,602
| 222
| 84
| 34.243243
| 0.846084
| 0.349776
| 0
| 0.018868
| 0
| 0
| 0.079561
| 0.006499
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132075
| false
| 0.009434
| 0.056604
| 0
| 0.320755
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
85ff94648db8e42f7e087780f32ca9e870cb3118
| 2,123
|
py
|
Python
|
deep-scratch/steps/step50.py
|
jayChung0302/myml
|
6575706aec707186037607e49342f77cde34ff52
|
[
"MIT"
] | null | null | null |
deep-scratch/steps/step50.py
|
jayChung0302/myml
|
6575706aec707186037607e49342f77cde34ff52
|
[
"MIT"
] | null | null | null |
deep-scratch/steps/step50.py
|
jayChung0302/myml
|
6575706aec707186037607e49342f77cde34ff52
|
[
"MIT"
] | null | null | null |
if '__file__' in globals():
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import math
import numpy as np
import matplotlib.pyplot as plt
import dezero
from dezero import optimizers
import dezero.functions as F
import dezero.datasets as datasets
from dezero.models import MLP
from dezero.dataloaders import DataLoader as DataLoader
t = [1, 2, 3]
x = iter(t)
print(next(x))
print(next(x))
print(next(x))
class MyIterator:
def __init__(self, max_cnt):
self.max_cnt = max_cnt
self.cnt = 0
def __iter__(self):
return self
def __next__(self):
if self.cnt == self.max_cnt:
raise StopIteration()
self.cnt += 1
return self.cnt
obj = MyIterator(5)
for x in obj:
print(x)
y = np.array([[0.2, 0.8, 0], [0.1, 0.9, 0], [0.8, 0.1, 0.1]])
t = np.array([1, 2, 0])
acc = F.accuracy(y, t)
print(acc)
max_epoch = 300
batch_size = 30
hidden_size = 10
lr = 1.0
train_set = dezero.datasets.Spiral(train=True)
test_set = dezero.datasets.Spiral(train=False)
train_loader = DataLoader(train_set, batch_size)
test_loader = DataLoader(test_set, batch_size)
model = MLP((hidden_size, 3))
optimizer = optimizers.SGD(lr).setup(model)
for ep in range(max_epoch):
sum_loss, sum_acc = 0, 0
for x, t in train_loader:
y = model(x)
loss = F.softmax_cross_entropy(y, t)
acc = F.accuracy(y, t)
model.cleargrads()
loss.backward()
optimizer.update()
sum_loss += float(loss.data) * len(t)
sum_acc += float(acc.data) * len(t)
print(f"epoch:{ep+1}")
print(f"train loss:{sum_loss/len(train_set):.4f}, accuracy:{sum_acc/len(train_set):.4f}")
sum_loss, sum_acc = 0, 0
with dezero.no_grad():
for x, t in test_loader:
y = model(x)
loss = F.softmax_cross_entropy(y, t)
acc = F.accuracy(y, t)
sum_loss += float(loss.data) * len(t)
sum_acc += float(acc.data) * len(t)
print(f"test loss: {sum_loss/len(test_set):.4f}, accuracy: {sum_acc/len(test_set):.4f}")
| 25.890244
| 93
| 0.621291
| 338
| 2,123
| 3.724852
| 0.272189
| 0.03336
| 0.025417
| 0.030977
| 0.305004
| 0.249404
| 0.166799
| 0.166799
| 0.166799
| 0.166799
| 0
| 0.027278
| 0.240226
| 2,123
| 81
| 94
| 26.209877
| 0.753255
| 0
| 0
| 0.238806
| 0
| 0.029851
| 0.084354
| 0.06032
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044776
| false
| 0
| 0.149254
| 0.014925
| 0.238806
| 0.119403
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c804be87c5478ddfa9fadf38397429243edc770e
| 4,363
|
py
|
Python
|
play.py
|
cp1r8/metadungeon
|
e68a35c815d60bccb883436fde782868bff7f81f
|
[
"CC0-1.0"
] | null | null | null |
play.py
|
cp1r8/metadungeon
|
e68a35c815d60bccb883436fde782868bff7f81f
|
[
"CC0-1.0"
] | null | null | null |
play.py
|
cp1r8/metadungeon
|
e68a35c815d60bccb883436fde782868bff7f81f
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
from game import World
from game.creatures import Humanoid, Unit
from game.creatures.adventurers import Adventurer, Party
from game.objects.containers import Container
from game.places.underground import Dungeon
from game.dice import d4
from pathlib import Path
import pickle
import sys
import ui
if __name__ == '__main__':
game_file = Path.home() / '.local' / 'metadungeon.pickle'
if game_file.exists() and '--reset' not in sys.argv:
with game_file.open('rb') as input:
world, party = pickle.load(input)
else:
world = World()
dungeon = Dungeon(world)
world.add(dungeon)
if '--shop' in sys.argv:
auto_equip = False
# TODO start in town (purchase equipment manually)
else:
auto_equip = True
location = dungeon.entrance
if '--basic' in sys.argv:
party = Party.basic(location, auto_equip)
elif '--expert' in sys.argv:
party = Party.expert(location, auto_equip)
elif '--funnel' in sys.argv:
party = Party.assemble(0, sum(4*d4) + 4, location, auto_equip)
elif '--hlc' in sys.argv:
party = Party.highLevelClient(location, auto_equip)
elif '--hlf' in sys.argv:
party = Party.highLevelFighter(location, auto_equip)
elif '--hlm' in sys.argv:
party = Party.highLevelMuser(location, auto_equip)
else:
party = Party.assemble(1, sum(2*d4) + 4, location, auto_equip)
location.add(party)
# for testing
if '--zap' in sys.argv:
damage = sys.argv.count('--zap')
for entity in party.location.entities:
if isinstance(entity, Unit):
for member in entity.members:
member.hit(damage)
actions = party.location.actions(party)
for arg in sys.argv:
if arg in actions:
actions[arg]()
world.age(minutes=10)
actions = party.location.actions(party)
break
with game_file.open('wb') as output:
pickle.dump((world, party), output)
print(f"{str(world):<19} {world.now}")
print('-' * 39)
print(str(party.location))
print()
print('[ ' + ' ] [ '.join(sorted(actions.keys())) + ' ]')
print('=' * 39)
print()
for entity in sorted(party.location.entities, key=lambda entity: entity.id):
if isinstance(entity, Unit):
continue
print(str(entity))
if isinstance(entity, Container):
for item in entity.contents:
ui.print_inventory_item(item)
print('-' * 39)
print()
for entity in sorted(party.location.entities, key=lambda entity: entity.id):
if not isinstance(entity, Unit):
continue
print(str(entity))
# TODO unit "health bar"
# TODO unit status (e.g., lost/flee)
if '--stats' in sys.argv:
print(ui.unitstats(entity))
print('-' * 39)
print()
for member in sorted(entity.members, key=lambda member: member.id):
print(str(member))
if member.hits_taken > member.hit_dice:
hit_points = f"{member.hit_dice - member.hits_taken:d}/{member.hit_dice:d}"
else:
hit_points = f"{member.hits_remaining - member.partial_hit:d}/{member.hit_dice:d}"
print(
f"[{ui.health_bar(member, 28)}]",
f"{hit_points:>5} hp",
)
if '--stats' in sys.argv:
print(ui.statblock(member))
if isinstance(member, Adventurer):
if '--abilities' in sys.argv:
print(ui.abilities(member))
if '--level' in sys.argv:
# TODO calculate "bounty"
print(
f"{member.profile}",
f"1UP:{member.silver_for_next_level:,.0f}$"
)
if isinstance(member, Humanoid):
if '--inventory' in sys.argv:
ui.print_inventory(member, True)
print('-' * 39)
elif '--arms' in sys.argv:
ui.print_inventory(member)
print()
print('=' * 39)
print()
| 28.89404
| 98
| 0.544579
| 501
| 4,363
| 4.662675
| 0.283433
| 0.050942
| 0.061644
| 0.035959
| 0.259418
| 0.146404
| 0.146404
| 0.064212
| 0.064212
| 0.064212
| 0
| 0.010712
| 0.336695
| 4,363
| 150
| 99
| 29.086667
| 0.796475
| 0.037589
| 0
| 0.280374
| 0
| 0
| 0.099451
| 0.03959
| 0
| 0
| 0
| 0.006667
| 0
| 1
| 0
| false
| 0
| 0.093458
| 0
| 0.093458
| 0.242991
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c80624c4bad650eb5277c12ff9ddd20884d61424
| 590
|
py
|
Python
|
freeze.py
|
eudemonia-research/hec
|
e65df8e4584746dcb2785327cfcffac10a66c689
|
[
"MIT"
] | 2
|
2015-11-05T16:24:31.000Z
|
2022-02-05T19:01:58.000Z
|
freeze.py
|
eudemonia-research/hec
|
e65df8e4584746dcb2785327cfcffac10a66c689
|
[
"MIT"
] | null | null | null |
freeze.py
|
eudemonia-research/hec
|
e65df8e4584746dcb2785327cfcffac10a66c689
|
[
"MIT"
] | null | null | null |
from cx_Freeze import setup, Executable
import requests.certs
# Dependencies are automatically detected, but it might need
# fine tuning.
buildOptions = dict(packages = [], excludes = [], include_msvcr=True,
include_files=[(requests.certs.where(),'cacert.pem')])
import sys
base = 'Win32GUI' if sys.platform=='win32' else None
executables = [
Executable('scripts\\hecs.py', base=base, targetName = 'hecs.exe')
]
setup(name='hecs',
version = '1.0',
description = 'Hecs',
options = dict(build_exe = buildOptions),
executables = executables)
| 29.5
| 74
| 0.676271
| 68
| 590
| 5.808824
| 0.735294
| 0.065823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012579
| 0.191525
| 590
| 19
| 75
| 31.052632
| 0.815514
| 0.120339
| 0
| 0
| 0
| 0
| 0.112403
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c806d8b85faac4749d3297eee869e84a9a44277c
| 2,742
|
py
|
Python
|
Elasticsearch/elasticsearchconnector.py
|
krajai/testt
|
3aaf5fd7fe85e712c8c1615852b50f9ccb6737e5
|
[
"BSD-3-Clause"
] | 1,114
|
2020-09-28T07:32:23.000Z
|
2022-03-31T22:35:50.000Z
|
Elasticsearch/elasticsearchconnector.py
|
krajai/testt
|
3aaf5fd7fe85e712c8c1615852b50f9ccb6737e5
|
[
"BSD-3-Clause"
] | 298
|
2020-10-29T09:39:17.000Z
|
2022-03-31T15:24:44.000Z
|
Elasticsearch/elasticsearchconnector.py
|
krajai/testt
|
3aaf5fd7fe85e712c8c1615852b50f9ccb6737e5
|
[
"BSD-3-Clause"
] | 153
|
2020-09-29T06:07:39.000Z
|
2022-03-31T17:41:16.000Z
|
# Import elasticsearch module
from elasticsearch import Elasticsearch,ImproperlyConfigured,TransportError
import json
class ElasticsearchConnector:
def __init__(self,credobject=None):
"""
Description:
Accepts elasticsearch connection parameters and connects to elasticsearch cloud
"""
#Parameter check
try:
assert credobject is not None,"Found credentials object empty"
except AssertionError:
print("Empty Credentials")
try:
with open(credobject, "r") as f:
credentials = json.load(f)
except OSError:
print("Unable to open file. Invalid path.")
return
except TypeError:
credentials = credobject
#Initializing parameters
self.user = credentials.get('user',None)
self.password = credentials.get('password',None)
self.endpoint = credentials.get('endpoint',None)
self.port = credentials.get('port',None)
self.protocol = credentials.get('protocol',None)
self.connection = self.get_connection()
def get_connection(self):
print("Establishing connection to Elasticsearch")
try:
es = Elasticsearch([self.endpoint],http_auth=(self.user,self.password),scheme=self.protocol,port=self.port)
print("Connection established")
return es
except ImproperlyConfigured as e:
print("Unable to connect to Elasticsearch server : Invalid credentials")
def save_data(self,parameters,data):
print("Saving data to Elasticsearch")
try:
resultset = self.connection.index(index=parameters.get('index',None),doc_type=parameters.get('type',None),body=data)
return resultset
except TransportError as e:
print("Unable to save data to elasticsearch. Please check your connection credentials")
def search_data(self,parameters,query,search_type='search'):
# import pdb;pdb.set_trace()
print("Fetching data from Elasticsearch server")
if(search_type == 'search'):
try:
resultset = self.connection.search(index=parameters.get('index',None), body=query[0])
return resultset
except TransportError as e:
print("Unable to search data. Please check your query and try again")
except AttributeError as e:
print("Please connect to Elasticsearch server and try again")
elif(search_type == 'msearch'):
response = []
try:
for each in query:
req_head = {'index': parameters.get('index',None), 'type': parameters.get('type',None)}
req_body = each
response.append(self.connection.msearch(body = [req_head,req_body]))
return response
except TransportError as e:
print("Unable to search data. Please check your query and try again")
except AttributeError as e:
print("Please connect to Elasticsearch server and try again")
else:
print("Invalid Search type : Use 'search' or 'msearch' as valid search types")
| 35.153846
| 119
| 0.73523
| 345
| 2,742
| 5.791304
| 0.284058
| 0.052553
| 0.024024
| 0.028028
| 0.256757
| 0.183183
| 0.183183
| 0.183183
| 0.183183
| 0.15015
| 0
| 0.000434
| 0.160102
| 2,742
| 78
| 120
| 35.153846
| 0.86713
| 0.076951
| 0
| 0.278689
| 0
| 0
| 0.287293
| 0
| 0
| 0
| 0
| 0
| 0.032787
| 1
| 0.065574
| false
| 0.032787
| 0.032787
| 0
| 0.196721
| 0.213115
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8093b0fe4419003974199d64ec5c9a63aa70c9e
| 4,434
|
py
|
Python
|
pyvino_utils/models/recognition/gaze_estimation.py
|
venky4121994/openvinoface
|
a620138b94f865fb19e6165abde2237c85ca8764
|
[
"MIT"
] | 4
|
2020-08-31T17:19:57.000Z
|
2020-10-03T13:59:10.000Z
|
pyvino_utils/models/recognition/gaze_estimation.py
|
B0N0AI/pyvino_utils
|
0d42741eb446b038eae2917b621d9c1ffbc42452
|
[
"MIT"
] | 2
|
2020-09-13T08:04:36.000Z
|
2020-09-13T08:04:58.000Z
|
pyvino_utils/models/recognition/gaze_estimation.py
|
mmphego/pyvino_utils
|
0d42741eb446b038eae2917b621d9c1ffbc42452
|
[
"MIT"
] | null | null | null |
import time
import cv2
import numpy as np
from ..openvino_base.base_model import Base
class GazeEstimation(Base):
"""Class for the Gaze Estimation Recognition Model."""
def __init__(
self,
model_name,
source_width=None,
source_height=None,
device="CPU",
threshold=0.60,
extensions=None,
**kwargs,
):
super().__init__(
model_name,
source_width,
source_height,
device,
threshold,
extensions,
**kwargs,
)
def preprocess_output(self, inference_results, image, show_bbox, **kwargs):
results = {}
gaze_vector = dict(zip(["x", "y", "z"], np.vstack(inference_results).ravel()))
# TODO: Figure out why I had to comment this code out?
# roll_val = kwargs["head_pose_angles"]["roll"]
# cos_theta = math.cos(roll_val * math.pi / 180)
# sin_theta = math.sin(roll_val * math.pi / 180)
# coords = {"x": None, "y": None}
# coords["x"] = gaze_vector["x"] * cos_theta + gaze_vector["y"] * sin_theta
# coords["y"] = gaze_vector["y"] * cos_theta - gaze_vector["x"] * sin_theta
if show_bbox:
self.draw_output(gaze_vector, image, **kwargs)
results["Gaze_Vector"] = gaze_vector
results["image"] = image
return results
@staticmethod
def draw_output(coords, image, **kwargs):
left_eye_point = kwargs["eyes_coords"]["left_eye_point"]
right_eye_point = kwargs["eyes_coords"]["right_eye_point"]
cv2.arrowedLine(
image,
(
left_eye_point[0] + int(coords["x"] * 500),
left_eye_point[1] - int(coords["y"] * 500),
),
(left_eye_point[0], left_eye_point[1]),
color=(0, 0, 255),
thickness=2,
tipLength=0.2,
)
cv2.arrowedLine(
image,
(
right_eye_point[0] + int(coords["x"] * 500),
right_eye_point[1] - int(coords["y"] * 500),
),
(right_eye_point[0], right_eye_point[1]),
color=(0, 0, 255),
thickness=2,
tipLength=0.2,
)
@staticmethod
def show_text(
image, coords, pos=550, font_scale=1.5, color=(255, 255, 255), thickness=1
):
"""Helper function for showing the text on frame."""
height, _ = image.shape[:2]
ypos = abs(height - pos)
text = "Gaze Vector: " + ", ".join(f"{x}: {y:.2f}" for x, y in coords.items())
cv2.putText(
image,
text,
(15, ypos),
fontFace=cv2.FONT_HERSHEY_PLAIN,
fontScale=font_scale,
color=color,
thickness=thickness,
)
def preprocess_input(self, image, **kwargs):
width, height = self.model.inputs["left_eye_image"].shape[2:]
p_left_eye_image = Base.preprocess_input(
Base, kwargs["eyes_coords"]["left_eye_image"], width, height
)
p_right_eye_image = Base.preprocess_input(
Base, kwargs["eyes_coords"]["right_eye_image"], width, height
)
return p_left_eye_image, p_right_eye_image
def predict(self, image, request_id=0, show_bbox=False, **kwargs):
p_left_eye_image, p_right_eye_image = self.preprocess_input(image, **kwargs)
head_pose_angles = list(kwargs.get("head_pose_angles").values())
predict_start_time = time.time()
status = self.exec_network.start_async(
request_id=request_id,
inputs={
"left_eye_image": p_left_eye_image,
"right_eye_image": p_right_eye_image,
"head_pose_angles": head_pose_angles,
},
)
status = self.exec_network.requests[request_id].wait(-1)
if status == 0:
pred_result = []
for output_name, data_ptr in self.model.outputs.items():
pred_result.append(
self.exec_network.requests[request_id].outputs[output_name]
)
predict_end_time = float(time.time() - predict_start_time) * 1000
gaze_vector, _ = self.preprocess_output(
pred_result, image, show_bbox=show_bbox, **kwargs
)
return (predict_end_time, gaze_vector)
| 32.844444
| 86
| 0.554804
| 519
| 4,434
| 4.447013
| 0.267823
| 0.039428
| 0.036395
| 0.02253
| 0.221837
| 0.174177
| 0.136915
| 0.098787
| 0.07539
| 0.034662
| 0
| 0.025718
| 0.324763
| 4,434
| 134
| 87
| 33.089552
| 0.745157
| 0.105774
| 0
| 0.196262
| 0
| 0
| 0.058257
| 0
| 0.009346
| 0
| 0
| 0.007463
| 0
| 1
| 0.056075
| false
| 0
| 0.037383
| 0
| 0.130841
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8095fa9e80674ff147ce29f4d9409ee896f3519
| 1,982
|
py
|
Python
|
src/testing/task_plot_share_of_educ_participants_with_rapid_test.py
|
covid-19-impact-lab/sid-germany
|
aef4bbfb326adaf9190c6d8880e15b3d6f150d28
|
[
"MIT"
] | 4
|
2021-04-24T14:43:47.000Z
|
2021-07-03T14:05:21.000Z
|
src/testing/task_plot_share_of_educ_participants_with_rapid_test.py
|
covid-19-impact-lab/sid-germany
|
aef4bbfb326adaf9190c6d8880e15b3d6f150d28
|
[
"MIT"
] | 4
|
2021-04-27T10:34:45.000Z
|
2021-08-31T16:40:28.000Z
|
src/testing/task_plot_share_of_educ_participants_with_rapid_test.py
|
covid-19-impact-lab/sid-germany
|
aef4bbfb326adaf9190c6d8880e15b3d6f150d28
|
[
"MIT"
] | null | null | null |
import warnings
import matplotlib.pyplot as plt
import pandas as pd
import pytask
import seaborn as sns
from src.config import BLD
from src.config import PLOT_END_DATE
from src.config import PLOT_SIZE
from src.config import PLOT_START_DATE
from src.config import SRC
from src.plotting.plotting import style_plot
from src.testing.shared import get_piecewise_linear_interpolation
@pytask.mark.depends_on(
{
"params": BLD / "params.pkl",
"plotting.py": SRC / "plotting" / "plotting.py",
"testing_shared.py": SRC / "testing" / "shared.py",
}
)
@pytask.mark.produces(
BLD
/ "figures"
/ "data"
/ "testing"
/ "share_of_educ_participants_with_rapid_test.pdf"
)
def task_plot_share_of_educ_participants_with_rapid_test(depends_on, produces):
params = pd.read_pickle(depends_on["params"])
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message="indexing past lexsort depth may impact performance."
)
educ_workers_params = params.loc[("rapid_test_demand", "educ_worker_shares")]
students_params = params.loc[("rapid_test_demand", "student_shares")]
share_educ_workers = get_piecewise_linear_interpolation(educ_workers_params)
share_educ_workers = share_educ_workers.loc[PLOT_START_DATE:PLOT_END_DATE]
share_students = get_piecewise_linear_interpolation(students_params)
share_students = share_students.loc[PLOT_START_DATE:PLOT_END_DATE]
fig, ax = plt.subplots(figsize=PLOT_SIZE)
sns.lineplot(
x=share_educ_workers.index,
y=share_educ_workers,
ax=ax,
label="Teachers (School, Preschool, Nursery)",
)
sns.lineplot(
x=share_students.index,
y=share_students,
ax=ax,
label="School Students",
)
ax.set_title("Share of Students and Teachers Receiving Rapid Tests")
fig, ax = style_plot(fig, ax)
fig.tight_layout()
fig.savefig(produces)
plt.close()
| 30.492308
| 85
| 0.712916
| 260
| 1,982
| 5.146154
| 0.342308
| 0.036622
| 0.04858
| 0.071001
| 0.210762
| 0.139013
| 0.09417
| 0
| 0
| 0
| 0
| 0
| 0.19223
| 1,982
| 64
| 86
| 30.96875
| 0.835728
| 0
| 0
| 0.071429
| 0
| 0
| 0.189707
| 0.023209
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0
| 0.214286
| 0
| 0.232143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c80966397626d332b933ed9036f4e46b5c441750
| 734
|
py
|
Python
|
app/models/brand.py
|
ertyurk/bugme
|
5a3ef3e089e0089055074c1c896c3fdc76600e93
|
[
"MIT"
] | null | null | null |
app/models/brand.py
|
ertyurk/bugme
|
5a3ef3e089e0089055074c1c896c3fdc76600e93
|
[
"MIT"
] | null | null | null |
app/models/brand.py
|
ertyurk/bugme
|
5a3ef3e089e0089055074c1c896c3fdc76600e93
|
[
"MIT"
] | null | null | null |
from typing import Optional
from pydantic import BaseModel, Field
class BrandModel(BaseModel):
brand: str = Field(...)
auth_key: Optional[str]
user_id: str = Field(...)
class Config:
allow_population_by_field_name = True
schema_extra = {
"example": {
"brand": "Lean Scale Bugger",
"user_id": "60a57e1d1201f43c9c51c044",
}
}
class UpdateBrandModel(BaseModel):
brand: Optional[str]
auth_key: Optional[str]
user_id: Optional[str]
class Config:
schema_extra = {
"example": {
"brand": "Lean Scale Bugger",
"user_id": "60a57e1d1201f43c9c51c044",
}
}
| 22.9375
| 54
| 0.553134
| 67
| 734
| 5.880597
| 0.432836
| 0.111675
| 0.076142
| 0.091371
| 0.467005
| 0.467005
| 0.345178
| 0.345178
| 0.345178
| 0.345178
| 0
| 0.070539
| 0.343324
| 734
| 31
| 55
| 23.677419
| 0.746888
| 0
| 0
| 0.48
| 0
| 0
| 0.163488
| 0.065395
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.08
| 0
| 0.48
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c80c247892056d339d30163cadca271c880389d5
| 443
|
py
|
Python
|
flaskapp/app.py
|
Chetan-Gahane/Detection-Of-Phishing-Websites
|
327c6bbd4fe77d465e290466f26a387760103ad7
|
[
"MIT"
] | null | null | null |
flaskapp/app.py
|
Chetan-Gahane/Detection-Of-Phishing-Websites
|
327c6bbd4fe77d465e290466f26a387760103ad7
|
[
"MIT"
] | null | null | null |
flaskapp/app.py
|
Chetan-Gahane/Detection-Of-Phishing-Websites
|
327c6bbd4fe77d465e290466f26a387760103ad7
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import Flask, flash, redirect, render_template, request, session, abort
import os
import newtrain
app = Flask(__name__)
@app.route('/')
def home(x):
return x
@app.route('/login', methods=['POST'])
def do_admin_login():
url_new=request.form['username']
x=newtrain.main(url_new)
return home(x)
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run(debug=True)
| 20.136364
| 82
| 0.683973
| 64
| 443
| 4.453125
| 0.578125
| 0.063158
| 0.105263
| 0.140351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005495
| 0.17833
| 443
| 22
| 83
| 20.136364
| 0.777473
| 0
| 0
| 0
| 0
| 0
| 0.060948
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0.0625
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c8100632cb345df1cb4918dfaf696ed8e91b2f92
| 8,607
|
py
|
Python
|
training/anticausal_classifier_train.py
|
SANCHES-Pedro/Diff-SCM
|
a7e7e6ed3a2cd1c21e3bf7a3ed8ed8b29a22cb69
|
[
"Apache-2.0"
] | 6
|
2022-02-22T05:07:05.000Z
|
2022-03-29T09:48:03.000Z
|
training/anticausal_classifier_train.py
|
SANCHES-Pedro/Diff-SCM
|
a7e7e6ed3a2cd1c21e3bf7a3ed8ed8b29a22cb69
|
[
"Apache-2.0"
] | null | null | null |
training/anticausal_classifier_train.py
|
SANCHES-Pedro/Diff-SCM
|
a7e7e6ed3a2cd1c21e3bf7a3ed8ed8b29a22cb69
|
[
"Apache-2.0"
] | 2
|
2022-02-20T08:45:54.000Z
|
2022-03-09T09:51:13.000Z
|
"""
Train a noised image classifier on ImageNet.
"""
import os
import blobfile as bf
import torch as th
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel.distributed import DistributedDataParallel as DDP
from torch.optim import AdamW
import torch
from pathlib import Path
import sys
sys.path.append(str(Path.cwd()))
from configs import default_mnist_configs
from utils import logger, dist_util
from utils.script_util import create_anti_causal_predictor, create_gaussian_diffusion
from utils.fp16_util import MixedPrecisionTrainer
from models.resample import create_named_schedule_sampler
from training.train_util import parse_resume_step_from_filename, log_loss_dict
from datasets import loader
def main():
config = default_mnist_configs.get_default_configs()
dist_util.setup_dist()
logger.configure(Path(config.experiment_name) / ("classifier_train_" + "_".join(config.classifier.label)),
format_strs=["log", "stdout", "csv", "tensorboard"])
logger.log("creating model and diffusion...")
diffusion = create_gaussian_diffusion(config)
model = create_anti_causal_predictor(config)
model.to(dist_util.dev())
if config.classifier.training.noised:
schedule_sampler = create_named_schedule_sampler(
config.classifier.training.schedule_sampler, diffusion
)
logger.log("creating data loader...")
data = loader.get_data_loader(config.data.path, config.classifier.training.batch_size, split_set='train',
which_label=config.classifier.label)
val_data = loader.get_data_loader(config.data.path, config.classifier.training.batch_size, split_set='val',
which_label=config.classifier.label)
logger.log("training...")
resume_step = 0
if config.classifier.training.resume_checkpoint:
resume_step = parse_resume_step_from_filename(config.classifier.training.resume_checkpoint)
if dist.get_rank() == 0:
logger.log(
f"loading model from checkpoint: {config.classifier.training.resume_checkpoint}... at {resume_step} step"
)
model.load_state_dict(
dist_util.load_state_dict(
config.classifier.training.resume_checkpoint, map_location=dist_util.dev()
)
)
# Needed for creating correct EMAs and fp16 parameters.
dist_util.sync_params(model.parameters())
mp_trainer = MixedPrecisionTrainer(
model=model, use_fp16=config.classifier.training.classifier_use_fp16, initial_lg_loss_scale=16.0
)
model = DDP(
model,
device_ids=[dist_util.dev()],
output_device=dist_util.dev(),
broadcast_buffers=False,
bucket_cap_mb=128,
find_unused_parameters=False,
)
logger.log(f"creating optimizer...")
opt = AdamW(mp_trainer.master_params, lr=config.classifier.training.lr,
weight_decay=config.classifier.training.weight_decay)
if config.classifier.training.resume_checkpoint:
opt_checkpoint = bf.join(
bf.dirname(config.classifier.training.resume_checkpoint), f"opt{resume_step:06}.pt"
)
logger.log(f"loading optimizer state from checkpoint: {opt_checkpoint}")
opt.load_state_dict(
dist_util.load_state_dict(opt_checkpoint, map_location=dist_util.dev())
)
logger.log("training classifier model...")
def forward_backward_log(data_loader, prefix="train"):
data_dict = next(data_loader)
labels = {}
for label_name in config.classifier.label:
assert label_name in list(data_dict.keys()), f'label {label_name} are not in data_dict{data_dict.keys()}'
labels[label_name] = data_dict[label_name].to(dist_util.dev())
batch = data_dict["image"].to(dist_util.dev())
# Noisy images
if config.classifier.training.noised:
t, _ = schedule_sampler.sample(batch.shape[0], dist_util.dev())
batch = diffusion.q_sample(batch, t)
else:
t = th.zeros(batch.shape[0], dtype=th.long, device=dist_util.dev())
loss_dict = get_predictor_loss(model, labels, batch, t)
loss = torch.stack(list(loss_dict.values())).sum()
losses = {f"{prefix}_{loss_name}": loss_value.detach() for loss_name, loss_value in loss_dict.items()}
log_loss_dict(diffusion, t, losses)
del losses
loss = loss.mean()
if loss.requires_grad:
mp_trainer.zero_grad()
mp_trainer.backward(loss)
for step in range(config.classifier.training.iterations - resume_step):
logger.logkv("step", step + resume_step)
logger.logkv(
"samples",
(step + resume_step + 1) * config.classifier.training.batch_size * dist.get_world_size(),
)
if config.classifier.training.anneal_lr:
set_annealed_lr(opt, config.classifier.training.lr,
(step + resume_step) / config.classifier.training.iterations)
forward_backward_log(data)
mp_trainer.optimize(opt)
if val_data is not None and not step % config.classifier.training.eval_interval:
with th.no_grad():
with model.no_sync():
model.eval()
forward_backward_log(val_data, prefix="val")
model.train()
if not step % config.classifier.training.log_interval:
logger.dumpkvs()
if (
step
and dist.get_rank() == 0
and not (step + resume_step) % config.classifier.training.save_interval
):
logger.log("saving model...")
save_model(mp_trainer, opt, step + resume_step)
if dist.get_rank() == 0:
logger.log("saving model...")
save_model(mp_trainer, opt, step + resume_step)
dist.barrier()
def get_predictor_loss(model, labels, batch, t):
output = model(batch, timesteps=t)
loss_dict = {}
loss_dict["loss"] = F.cross_entropy(output, list(labels.values())[0], reduction="mean")
return loss_dict
def set_annealed_lr(opt, base_lr, frac_done):
lr = base_lr * (1 - frac_done)
for param_group in opt.param_groups:
param_group["lr"] = lr
def save_model(mp_trainer, opt, step):
if dist.get_rank() == 0:
th.save(
mp_trainer.master_params_to_state_dict(mp_trainer.master_params),
os.path.join(logger.get_dir(), f"model{step:06d}.pt"),
)
th.save(opt.state_dict(), os.path.join(logger.get_dir(), f"opt{step:06d}.pt"))
def compute_top_k(logits, labels, k, reduction="mean"):
_, top_ks = th.topk(logits, k, dim=-1)
if reduction == "mean":
return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
elif reduction == "none":
return (top_ks == labels[:, None]).float().sum(dim=-1)
def split_microbatches(microbatch, *args):
bs = len(args[0])
if microbatch == -1 or microbatch >= bs:
yield tuple(args)
else:
for i in range(0, bs, microbatch):
yield tuple(x[i: i + microbatch] if x is not None else None for x in args)
"""
for i, (sub_batch, sub_labels, sub_t) in enumerate(
split_microbatches(config.classifier.training.microbatch, batch, labels, t)
):
if not config.classifier.noise_conditioning:
sub_t = None
if prefix == "train" and config.classifier.training.adversarial_training:
sub_batch_perturbed = adversarial_attacker.perturb(model, sub_batch, sub_labels, sub_t)
logits_perturbed = model(sub_batch_perturbed, timesteps=sub_t)
loss += F.cross_entropy(logits_perturbed, sub_labels, reduction="none")
loss /= 2
adversarial_sub_labels = get_random_vector_excluding(sub_labels)
adversarial_sub_batch = fgsm_attack(sub_batch, sub_batch.grad.data)
adversarial_logits = model(adversarial_sub_batch, timesteps=sub_t)
"""
# FGSM attack code
def fgsm_attack(original_batch, data_grad, epsilon: float = 0.15):
epsilon = th.tensor(epsilon).to(data_grad.device)
# Collect the element-wise sign of the data gradient
sign_data_grad = data_grad.sign()
# Create the perturbed image by adjusting each pixel of the input image
perturbed_batch = original_batch + epsilon * sign_data_grad
# Adding clipping to maintain [-1,1] range
perturbed_batch = th.clamp(perturbed_batch, -1, 1)
# Return the perturbed image
return perturbed_batch
if __name__ == "__main__":
main()
| 38.084071
| 121
| 0.668526
| 1,111
| 8,607
| 4.937894
| 0.227723
| 0.084579
| 0.104995
| 0.032811
| 0.233139
| 0.155669
| 0.100073
| 0.072913
| 0.060518
| 0.048487
| 0
| 0.006595
| 0.224817
| 8,607
| 225
| 122
| 38.253333
| 0.815647
| 0.036947
| 0
| 0.093168
| 0
| 0
| 0.071656
| 0.013028
| 0
| 0
| 0
| 0
| 0.006211
| 1
| 0.049689
| false
| 0
| 0.10559
| 0
| 0.180124
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c81323b7eda0896694f1dbe20031469d75f77fed
| 3,015
|
py
|
Python
|
pythonclient/karmen/karmen.py
|
jrcichra/karmen
|
4d25d635509ebffa295b085ae7fa3932e3a36344
|
[
"MIT"
] | 3
|
2020-03-02T13:09:07.000Z
|
2021-12-27T16:27:23.000Z
|
pythonclient/karmen/karmen.py
|
jrcichra/karmen
|
4d25d635509ebffa295b085ae7fa3932e3a36344
|
[
"MIT"
] | 5
|
2020-03-02T04:53:54.000Z
|
2021-12-17T23:57:12.000Z
|
pythonclient/karmen/karmen.py
|
jrcichra/karmen
|
4d25d635509ebffa295b085ae7fa3932e3a36344
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3 -u
import threading
import time
import queue
import socket
import grpc
import karmen.karmen_pb2 as pb
import karmen.karmen_pb2_grpc as pb_grpc
class Karmen:
def __init__(self, name=socket.gethostname(), hostname="localhost", port=8080):
super().__init__()
self.name = name
self.channel = grpc.insecure_channel(f"{hostname}:{port}")
self.stub = pb_grpc.KarmenStub(self.channel)
self.actions = {}
def Pass(self) -> int:
return 200
def ping(self) -> str:
result = self.stub.PingPong(pb.Ping(message="Python!"))
return result.message
def runEvent(self, name, parameters=None, q=None):
event = pb.Event(eventName=name, timestamp=int(time.time()))
result = self.stub.EmitEvent(pb.EventRequest(
requesterName=self.name, event=event, parameters=parameters))
# if called from async, put the result in the queue
if q is not None:
q.put(result)
return result
def runEventAsync(self, name, parameters=None):
q = queue.Queue()
threading.Thread(target=self.runEvent, args=(
name, parameters, q)).start()
return q
def addAction(self, func, name):
self.actions[name] = func
def setupActions(self):
send_queue = queue.SimpleQueue()
# set up the two way connection
recv = self.stub.ActionDispatcher(
iter(send_queue.get, None))
# send who we are
send_queue.put(pb.ActionResponse(hostname=self.name))
threading.Thread(target=self.handleActions,
args=(recv, send_queue)).start()
def handleActions(self, recv, send_queue):
while True:
# blocking for actions
msg = next(recv)
# got an action
# print(f"Got an action!")
# print(msg)
# run the action
threading.Thread(target=self.handleAction,
args=(msg, send_queue)).start()
def handleAction(self, msg, send_queue):
# run the action
print(f"Running action: {msg.action.actionName}")
result = pb.ActionResponse()
self.actions[msg.action.actionName](
msg.action.parameters, result.result)
print(f"Finished running action: {msg.action.actionName}")
send_queue.put(result)
def register(self) -> int:
result = self.stub.Register(pb.RegisterRequest(
name=self.name, timestamp=int(time.time())))
self.setupActions()
return result.result.code
if __name__ == "__main__":
def sleep(parameters, result):
print(f"Sleeping for {parameters['seconds']} seconds")
time.sleep(int(parameters['seconds']))
print(f"Done sleeping for {parameters['seconds']} seconds")
result.code = 200
k = Karmen(name="bob")
print(k.ping())
k.addAction(sleep, "sleep")
k.register()
print(k.runEvent("pleaseSleep"))
| 30.15
| 83
| 0.60995
| 355
| 3,015
| 5.095775
| 0.309859
| 0.039801
| 0.023217
| 0.041459
| 0.126036
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00592
| 0.271642
| 3,015
| 99
| 84
| 30.454545
| 0.817851
| 0.071973
| 0
| 0
| 0
| 0
| 0.088594
| 0.032999
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161765
| false
| 0.014706
| 0.102941
| 0.014706
| 0.352941
| 0.088235
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c817b460ee65b13241ef6e94463df88bf762261b
| 765
|
py
|
Python
|
legacy/legacy/recommenders/visual_gmf.py
|
csmithchicago/openrec
|
5a9cf03abe0db0636107985f9f19d6351e4afe68
|
[
"MIT"
] | null | null | null |
legacy/legacy/recommenders/visual_gmf.py
|
csmithchicago/openrec
|
5a9cf03abe0db0636107985f9f19d6351e4afe68
|
[
"MIT"
] | 6
|
2020-01-28T22:51:16.000Z
|
2022-02-10T00:11:19.000Z
|
legacy/legacy/recommenders/visual_gmf.py
|
csmithchicago/openrec
|
5a9cf03abe0db0636107985f9f19d6351e4afe68
|
[
"MIT"
] | null | null | null |
from openrec.legacy.recommenders import VisualPMF
from openrec.legacy.modules.interactions import PointwiseGeCE
class VisualGMF(VisualPMF):
def _build_default_interactions(self, train=True):
self._add_module(
"interaction",
PointwiseGeCE(
user=self._get_module("user_vec", train=train).get_outputs()[0],
item=self._get_module("item_vec", train=train).get_outputs()[0],
item_bias=self._get_module("item_bias", train=train).get_outputs()[0],
labels=self._get_input("labels"),
l2_reg=self._l2_reg,
train=train,
scope="PointwiseGeCE",
reuse=not train,
),
train=train,
)
| 34.772727
| 86
| 0.589542
| 81
| 765
| 5.283951
| 0.419753
| 0.140187
| 0.091122
| 0.140187
| 0.179907
| 0.130841
| 0.130841
| 0
| 0
| 0
| 0
| 0.009346
| 0.300654
| 765
| 21
| 87
| 36.428571
| 0.790654
| 0
| 0
| 0.111111
| 0
| 0
| 0.071895
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c818c2c94bfac62e873d6b6ae455389a5b8e8196
| 732
|
py
|
Python
|
tests/test_tag.py
|
danielwe/explore-courses-api
|
e08d219b154e7fdb16690e4cd02aa239366f6747
|
[
"MIT"
] | 7
|
2019-06-17T07:45:54.000Z
|
2022-01-31T01:09:22.000Z
|
tests/test_tag.py
|
illiteratecoder/Explore-Courses-API
|
b2dc41092882e4b2b7945609e4e85b8ac1702bc7
|
[
"MIT"
] | null | null | null |
tests/test_tag.py
|
illiteratecoder/Explore-Courses-API
|
b2dc41092882e4b2b7945609e4e85b8ac1702bc7
|
[
"MIT"
] | 1
|
2021-11-14T22:23:59.000Z
|
2021-11-14T22:23:59.000Z
|
from xml.etree import ElementTree as ET
from explorecourses import *
class TestTag(object):
@classmethod
def setup_class(cls):
text_tag = (
'<tag>'
'<organization>EARTHSYS</organization>'
'<name>energy_foundation</name>'
'</tag>'
)
cls.xml_tag = ET.fromstring(text_tag)
def test_create_tag(self):
tag = Tag(self.xml_tag)
assert tag != None
def test_tag_attributes(self):
tag = Tag(self.xml_tag)
assert tag.organization == "EARTHSYS"
assert tag.name == "energy_foundation"
def test_tag_string(self):
tag = Tag(self.xml_tag)
assert str(tag) == "EARTHSYS::energy_foundation"
| 20.914286
| 56
| 0.592896
| 84
| 732
| 4.97619
| 0.369048
| 0.057416
| 0.07177
| 0.100478
| 0.200957
| 0.200957
| 0.200957
| 0.138756
| 0
| 0
| 0
| 0
| 0.296448
| 732
| 34
| 57
| 21.529412
| 0.81165
| 0
| 0
| 0.136364
| 0
| 0
| 0.177596
| 0.128415
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c81a08103667814c6eb2d1d517a2b39db440ed7f
| 458
|
py
|
Python
|
SScriptCompiler/examples/thresholdcounter/states/init_s.py
|
alklasil/SScript
|
de4481bf96e79b9ee157e266ea9fe8b1bfb3701e
|
[
"MIT"
] | null | null | null |
SScriptCompiler/examples/thresholdcounter/states/init_s.py
|
alklasil/SScript
|
de4481bf96e79b9ee157e266ea9fe8b1bfb3701e
|
[
"MIT"
] | 8
|
2018-03-10T19:20:43.000Z
|
2018-04-30T18:11:17.000Z
|
SScriptCompiler/examples/thresholdcounter/states/init_s.py
|
alklasil/SScript
|
de4481bf96e79b9ee157e266ea9fe8b1bfb3701e
|
[
"MIT"
] | null | null | null |
def init_s(data):
return ("init", [
[
# set configuration time
"$getTime", "configuration_millis",
# set state initially below lower threshold
"$=(const)=", "state", "@<t",
"$printInt_ln", data['sensorIdentifier'],
# set requestStringGenerator
"$esp_setRequestStringGenerator", [
"@requestStringGeneratorState"
],
],
])
| 26.941176
| 55
| 0.5
| 30
| 458
| 7.5
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.373362
| 458
| 16
| 56
| 28.625
| 0.783972
| 0.19869
| 0
| 0.181818
| 0
| 0
| 0.374656
| 0.15978
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0
| 0.090909
| 0.181818
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|