hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cfc750727d6c4741228488819c79fe8825da6d65 | 15,095 | py | Python | data_loader.py | armyja/CHAOS_GCN | 685c84fdabf6db71cfc007cec41a72d900422920 | [
"MIT"
] | 4 | 2019-10-05T12:54:51.000Z | 2021-03-29T11:41:50.000Z | data_loader.py | armyja/CHAOS_GCN | 685c84fdabf6db71cfc007cec41a72d900422920 | [
"MIT"
] | null | null | null | data_loader.py | armyja/CHAOS_GCN | 685c84fdabf6db71cfc007cec41a72d900422920 | [
"MIT"
] | 3 | 2019-10-05T12:54:55.000Z | 2021-07-15T05:32:37.000Z | import math
import random
import torch
import numpy as np
from torch.utils.data.dataset import Dataset
from PIL import Image
import os
from torchvision import transforms
from utils import *
# 1 x n_class x height x width tensor
def decode_output_to_label(temp):
n, c, h, w = temp.size()
temp = temp.transpose(1, 2).transpose(2, 3).squeeze(0).view(h, w, c)
if torch.cuda.is_available():
temp = temp.cpu()
temp = temp.argmax(-1)
temp = torch.LongTensor(temp.view(1, 1, h, w))
return temp
# heightxwidth
class OrganSeg(Dataset):
def __init__(self, current_fold, list_path, n_class, organ_id, slice_threshold=0, transforms=True):
self.organ_ID = int(organ_id)
self.n_class = int(n_class)
self.transforms = transforms
self.augmentations = None
image_list = open(training_set_filename(list_path, current_fold), 'r').read().splitlines()
self.training_image_set = np.zeros((len(image_list)), dtype=np.int)
for i in range(len(image_list)):
s = image_list[i].split(' ')
self.training_image_set[i] = int(s[0])
slice_list = open(list_training_all(list_path), 'r').read().splitlines()
self.slices = len(slice_list)
self.image_ID = np.zeros(self.slices, dtype=np.int)
self.slice_ID = np.zeros(self.slices, dtype=np.int)
self.image_filename = ['' for l in range(self.slices)]
self.label_filename = ['' for l in range(self.slices)]
self.average = np.zeros(self.slices)
self.pixels = np.zeros(self.slices, dtype=np.int)
for l in range(self.slices):
s = slice_list[l].split(' ')
self.image_ID[l] = s[0]
self.slice_ID[l] = s[1]
self.image_filename[l] = s[2] # important
self.label_filename[l] = s[3] # important
self.average[l] = float(s[4]) # pixel value avg
self.pixels[l] = int(s[organ_id + 5 - 1]) # sum of label
if 0 < slice_threshold < 1: # 0.98
pixels_index = sorted(range(self.slices), key=lambda l: self.pixels[l])
last_index = int(math.floor((self.pixels > 0).sum() * slice_threshold))
min_pixels = self.pixels[pixels_index[-last_index]]
else: # or set up directly
min_pixels = slice_threshold
# slice_threshold = min_pixels = 0 means all organ
self.active_index = [l for l, p in enumerate(self.pixels)
if p >= min_pixels and self.image_ID[l] in self.training_image_set] # true active
colors = [ #
[0, 0, 0],
[128, 64, 128],
[244, 35, 232],
[70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
]
self.label_colours = dict(zip(range(self.n_class), colors))
def __getitem__(self, index):
# stuff
self.index1 = self.active_index[index]
if '.dcm' in self.image_filename[self.index1]:
image1 = dcm2npy(self.image_filename[self.index1]).astype(np.float32)
elif '.npy' in self.image_filename[self.index1]:
image1 = npy2npy(self.image_filename[self.index1]).astype(np.float32)
if 'T1DUAL' in self.image_filename[self.index1]:
self.low_range = 0.0
self.high_range = 1200.0
elif 'T2SPIR' in self.image_filename[self.index1]:
self.low_range = 0.0
self.high_range = 1800.0
# set range
np.minimum(np.maximum(image1, self.low_range, image1), self.high_range, image1)
if random.randint(0, 1) == 1:
image1 = self.high_range + self.low_range - image1
# image1 -= self.low_range
# image1 /= (self.high_range - self.low_range)
if '.png' in self.label_filename[self.index1]:
label1 = png2npy(self.label_filename[self.index1])
elif '.npy' in self.label_filename[self.index1]:
label1 = npy2npy(self.label_filename[self.index1], mask=True)
width = label1.shape[0]
height = label1.shape[1]
lbl = label1.reshape(1, width, height)
img = image1.reshape(1, width, height)
if self.transforms is not None:
img, lbl = self.transform(img, lbl)
width, height = 256, 256
lbl = lbl.reshape(width, height)
img = img.reshape(width, height)
# set rotate
# rotate_time = random.randint(0, 3)
# lbl = np.rot90(lbl, rotate_time)
# img = np.rot90(img, rotate_time)
# set flip
# flip_time = random.randint(0, 1)
# if flip_time == 1:
# lbl = lbl.T
# img = img.T
# mix_rate = random.randint(0, 5)
# if mix_rate >= 8:
# length = len(self.active_index)
# self.random_index = (self.index1 + random.randint(0, length - 1)) % length
# if '.dcm' in self.image_filename[self.random_index]:
# image1 = dcm2npy(self.image_filename[self.random_index]).astype(np.float32)
# elif '.npy' in self.image_filename[self.random_index]:
# image1 = npy2npy(self.image_filename[self.random_index]).astype(np.float32)
# np.minimum(np.maximum(image1, self.low_range, image1), self.high_range, image1)
#
# width = image1.shape[0]
# height = image1.shape[1]
# image1 = image1.reshape(1, width, height)
# image1, image1 = self.transform(image1, image1)
#
# width, height = 256, 256
# image1 = image1.reshape(width, height)
# img = img * 0.6 + image1 * 0.4
img = np.repeat(img.reshape(1, width, height), 3, axis=0)
lbl = lbl.reshape(1, width, height)
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
img = np.ascontiguousarray(img, dtype=np.float32)
lbl = np.ascontiguousarray(lbl, dtype=np.int64)
return img, lbl
def transform(self, img, lbl):
W = 256
H = 256
if lbl.shape[1] > H and lbl.shape[2] > W:
X = int((lbl.shape[1] - H) / 2)
Y = int((lbl.shape[2] - W) / 2)
lbl = lbl[:, X:X + H, Y:Y + W]
if img.shape[1] > H and img.shape[2] > W:
X = int((img.shape[1] - H) / 2)
Y = int((img.shape[2] - W) / 2)
img = img[:, X:X + H, Y:Y + W]
# transformations_train = transforms.Compose([transforms.RandomRotation(10),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor()])
# img = transformations_train(img)
# lbl = transformations_train(lbl)
return img, lbl
def decode_segmap(self, temp, bias=0):
n, c, h, w = temp.size()
temp = temp.view(h, w)
temp = temp.numpy()
temp = temp.astype(np.int8)
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, self.n_class):
r[temp == l] = self.label_colours[l][0 + bias * 3]
g[temp == l] = self.label_colours[l][1 + bias * 3]
b[temp == l] = self.label_colours[l][2 + bias * 3]
rgb = np.zeros((3, temp.shape[0], temp.shape[1]))
rgb[0, :, :] = r
rgb[1, :, :] = g
rgb[2, :, :] = b
return rgb
def __len__(self):
return len(self.active_index) # of how many data(images?) you have
class OrganTest(Dataset):
def __init__(self, current_fold, list_path, transforms=None):
self.augmentations = None
self.transforms = transforms
image_list = open(testing_set_filename(list_path, current_fold), 'r').read().splitlines()
self.testing_image_set = np.zeros((len(image_list)), dtype=np.int)
for i in range(len(image_list)):
s = image_list[i].split(' ')
self.testing_image_set[i] = int(s[0])
slice_list = open(list_training_all(list_path), 'r').read().splitlines()
self.slices = len(slice_list)
self.image_ID = np.zeros(self.slices, dtype=np.int)
self.pixels = np.zeros(self.slices, dtype=np.int)
self.image_filename = ['' for l in range(self.slices)]
self.label_filename = ['' for l in range(self.slices)]
for l in range(self.slices):
s = slice_list[l].split(' ')
self.image_ID[l] = s[0]
self.image_filename[l] = s[2] # important
self.label_filename[l] = s[3] # important
self.active_index = [l for l, p in enumerate(self.pixels)
if self.image_ID[l] in self.testing_image_set] # true active
def __getitem__(self, index):
# stuff
self.index1 = self.active_index[index]
image1 = dcm2npy(self.image_filename[self.index1]).astype(np.float32)
if 'T1DUAL' in self.image_filename[self.index1]:
self.low_range = 0.0
self.high_range = 1200.0
elif 'T2SPIR' in self.image_filename[self.index1]:
self.low_range = 0.0
self.high_range = 1800.0
np.minimum(np.maximum(image1, self.low_range, image1), self.high_range, image1)
# image1 -= self.low_range
# image1 /= (self.high_range - self.low_range)
label1 = png2npy(self.label_filename[self.index1])
width = label1.shape[0]
height = label1.shape[1]
img = np.repeat(image1.reshape(1, width, height), 3, axis=0)
lbl = label1.reshape(1, width, height)
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
if self.transforms is not None:
img = self.transforms(img)
lbl = self.transforms(lbl)
return img, lbl
def __len__(self):
return len(self.active_index)
class OrganVolTest(Dataset):
def __init__(self, current_fold, list_path, transforms=None):
self.augmentations = None
self.n_class = 5
self.transforms = transforms
image_list = open(testing_set_filename(list_path, current_fold), 'r').read().splitlines()
self.testing_image_set = np.zeros((len(image_list)), dtype=np.int)
for i in range(len(image_list)):
s = image_list[i].split(' ')
self.testing_image_set[i] = int(s[0])
slice_list = open(list_training_all(list_path), 'r').read().splitlines()
self.slices = len(slice_list)
self.image_ID = np.zeros(self.slices, dtype=np.int)
self.pixels = np.zeros(self.slices, dtype=np.int)
self.image_filename = ['' for l in range(self.slices)]
self.label_filename = ['' for l in range(self.slices)]
for l in range(self.slices):
s = slice_list[l].split(' ')
self.image_ID[l] = s[0]
self.image_filename[l] = s[2] # important
self.label_filename[l] = s[3] # important
colors = [ #
[0, 0, 0],
[63, 63, 63],
[126, 126, 126],
[189, 189, 189],
[252, 252, 252],
[128, 64, 128],
# [70, 70, 70],
[102, 102, 156],
[190, 153, 153],
# [153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[244, 35, 32],
[152, 251, 52],
[0, 130, 80],
[244, 35, 232],
[152, 251, 152],
[0, 130, 180],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[0, 80, 100],
[0, 0, 230],
[119, 11, 32],
]
self.label_colours = colors
def __getitem__(self, index):
# stuff
self.index1 = self.testing_image_set[index]
self.active_index = [l for l, p in enumerate(self.pixels)
if self.image_ID[l] == self.index1] # true active
if '.dcm' in self.image_filename[self.active_index[0]]:
tmp = dcm2npy(self.image_filename[self.active_index[0]]).astype(np.float32)
elif '.npy' in self.image_filename[self.active_index[0]]:
tmp = npy2npy(self.image_filename[self.active_index[0]]).astype(np.float32)
# tmp = dcm2npy(self.image_filename[self.active_index[0]]).astype(np.float32)
width = tmp.shape[0]
height = tmp.shape[1]
print(width, height)
W = 384
H = 384
img_vol = np.zeros((len(self.active_index), 3, H, W), dtype=np.float32)
lbl_vol = np.zeros((len(self.active_index), height, width), dtype=np.int64)
for idx, id in enumerate(self.active_index):
if '.dcm' in self.image_filename[id]:
image1 = dcm2npy(self.image_filename[id]).astype(np.float32)
elif '.npy' in self.image_filename[id]:
image1 = npy2npy(self.image_filename[id]).astype(np.float32)
# image1 = dcm2npy(self.image_filename[id]).astype(np.float32)
if '.png' in self.label_filename[id]:
label1 = png2npy(self.label_filename[id])
elif '.npy' in self.label_filename[id]:
label1 = npy2npy(self.label_filename[id], mask=True)
# label1 = png2npy(self.label_filename[id])
img = np.repeat(image1.reshape(1, width, height), 3, axis=0)
# lbl = label1.reshape(1, width, height)
lbl = img[0]
W = 384
H = 384
if height > H and width > W:
X = int((height - H) / 2)
Y = int((width - W) / 2)
img = img[:, X:X + H, Y:Y + W]
img_vol[idx, :] = img
lbl_vol[idx, :] = lbl
if self.augmentations is not None:
img, lbl = self.augmentations(img, lbl)
if self.transforms is not None:
img = self.transforms(img)
lbl = self.transforms(lbl)
return img_vol, lbl_vol, self.index1, width
def __len__(self):
return len(self.testing_image_set)
def decode_segmap(self, temp, bias=0):
n, c, h, w = temp.size()
temp = temp.view(c, h, w)
temp = temp.numpy()
temp = temp.astype(np.uint8)
r = temp.copy()
g = temp.copy()
b = temp.copy()
for l in range(0, self.n_class):
r[temp == l] = self.label_colours[l + bias * self.n_class][0]
g[temp == l] = self.label_colours[l + bias * self.n_class][1]
b[temp == l] = self.label_colours[l + bias * self.n_class][2]
l = 0
r[temp == l] = self.label_colours[l][0]
g[temp == l] = self.label_colours[l][1]
b[temp == l] = self.label_colours[l][2]
rgb = np.zeros((c, 3, h, w)).astype(np.uint8)
rgb[:, 0, :, :] = r
rgb[:, 1, :, :] = g
rgb[:, 2, :, :] = b
return rgb
| 37.7375 | 111 | 0.549718 | 2,021 | 15,095 | 3.97526 | 0.109847 | 0.042569 | 0.061364 | 0.04705 | 0.699776 | 0.676002 | 0.639781 | 0.592482 | 0.541573 | 0.491785 | 0 | 0.055588 | 0.31474 | 15,095 | 399 | 112 | 37.83208 | 0.721094 | 0.131567 | 0 | 0.517361 | 0 | 0 | 0.005827 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045139 | false | 0 | 0.03125 | 0.010417 | 0.121528 | 0.003472 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfcb567563b3d4494c056cf9b6b3cc3c9bd24ae3 | 12,191 | py | Python | core/models/bev_speed_model.py | timothijoe/DI-drive | 3cddefc85bbbca6bcdd8a4d796decacaf8d81778 | [
"Apache-2.0"
] | null | null | null | core/models/bev_speed_model.py | timothijoe/DI-drive | 3cddefc85bbbca6bcdd8a4d796decacaf8d81778 | [
"Apache-2.0"
] | null | null | null | core/models/bev_speed_model.py | timothijoe/DI-drive | 3cddefc85bbbca6bcdd8a4d796decacaf8d81778 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from typing import Dict, Optional, Tuple, List, Union
from ding.torch_utils import MLP
class BEVSpeedConvEncoder(nn.Module):
"""
Convolutional encoder of Bird-eye View image and speed input. It takes a BeV image and a speed scalar as input.
The BeV image is encoded by a convolutional encoder, to get a embedding feature which is half size of the
embedding length. Then the speed value is repeated for half embedding length time, and concated to the above
feature to get a final feature.
:Arguments:
- obs_shape (Tuple): BeV image shape.
- hidden_dim_list (List): Conv encoder hidden layer dimension list.
- embedding_size (int): Embedding feature dimensions.
- kernel_size (List, optional): Conv kernel size for each layer. Defaults to [8, 4, 3].
- stride (List, optional): Conv stride for each layer. Defaults to [4, 2, 1].
"""
def __init__(
self,
obs_shape: Tuple,
hidden_dim_list: List,
embedding_size: int,
kernel_size: List = [8, 4, 3],
stride: List = [4, 2, 1],
) -> None:
super().__init__()
assert len(kernel_size) == len(stride), (kernel_size, stride)
self._obs_shape = obs_shape
self._embedding_size = embedding_size
self._relu = nn.ReLU()
layers = []
input_dim = obs_shape[0]
for i in range(len(hidden_dim_list)):
layers.append(nn.Conv2d(input_dim, hidden_dim_list[i], kernel_size[i], stride[i]))
layers.append(self._relu)
input_dim = hidden_dim_list[i]
layers.append(nn.Flatten())
self._model = nn.Sequential(*layers)
flatten_size = self._get_flatten_size()
self._mid = nn.Linear(flatten_size, self._embedding_size // 2)
def _get_flatten_size(self) -> int:
test_data = torch.randn(1, *self._obs_shape)
with torch.no_grad():
output = self._model(test_data)
return output.shape[1]
def forward(self, data: Dict) -> torch.tensor:
"""
Forward computation of encoder
:Arguments:
- data (Dict): Input data, must contain 'birdview' and 'speed'
:Returns:
torch.tensor: Embedding feature.
"""
image = data['birdview'].permute(0, 3, 1, 2)
speed = data['speed']
x = self._model(image)
x = self._mid(x)
speed_embedding_size = self._embedding_size - self._embedding_size // 2
speed_vec = torch.unsqueeze(speed, 1).repeat(1, speed_embedding_size)
h = torch.cat((x, speed_vec), dim=1)
return h
class FCContinuousNet(nn.Module):
"""
Overview:
FC continuous network which is used in ``QAC``.
A main feature is that it uses ``_final_tanh`` to control whether
add a tanh layer to scale the output to (-1, 1).
Interface:
__init__, forward
"""
def __init__(
self,
input_size: int,
output_size: int,
embedding_size: int = 64,
final_tanh: bool = False,
layer_num: int = 1,
) -> None:
super(FCContinuousNet, self).__init__()
self._act = nn.ReLU()
self._main = nn.Sequential(
MLP(input_size, embedding_size, embedding_size, layer_num + 1, activation=self._act),
nn.Linear(embedding_size, output_size)
)
self._final_tanh = final_tanh
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self._main(x)
if self._final_tanh:
x = torch.tanh(x)
if x.shape[1] == 1:
x = x.squeeze(1)
return x
class BEVSpeedDeterminateNet(nn.Module):
"""
Actor Neural Network takes Bird-eye View image and speed and outputs actions determinately. It use a
``BEVSpeedConvEncoder`` to get a embedding feature, and use a fully-connected layer to get final output.
It can be used as actor or critic network depending on forward arguments.
:Arguments:
- obs_shape (Tuple, optional): BeV image shape. Defaults to [5, 32, 32].
- action_shape (Union[int, tuple], optional): Action shape. Defaults to 2.
- encoder_hidden_dim_list (List, optional): Conv encoder hidden layer dimension list.
Defaults to [64, 128, 256].
- encoder_embedding_size (int, optional): Encoder output embedding size. Defaults to 512.
- head_embedding_dim (int, optional): FC hidden layer dimension. Defaults to 512.
- is_critic (bool, optional): Whether used as critic. Defaults to False.
"""
def __init__(
self,
obs_shape: Tuple = [5, 32, 32],
action_shape: Union[int, tuple] = 2,
encoder_hidden_dim_list: List = [64, 128, 256],
encoder_embedding_size: int = 512,
head_embedding_dim: int = 512,
is_critic: bool = False,
) -> None:
super().__init__()
self._obs_shape = obs_shape
self._act_shape = action_shape
self._is_critic = is_critic
self._encoder = BEVSpeedConvEncoder(
self._obs_shape, encoder_hidden_dim_list, encoder_embedding_size, [3, 3, 3], [2, 2, 2]
)
if is_critic:
self._head = FCContinuousNet(encoder_embedding_size + self._act_shape, 1, head_embedding_dim)
else:
self._head = FCContinuousNet(encoder_embedding_size, self._act_shape, head_embedding_dim, final_tanh=True)
def forward(self, obs: Dict, action: Optional[Dict] = None) -> torch.tensor:
"""
Forward computation of network. If is critic, action must not be ``None``
:Arguments:
- obs (Dict): Observation dict.
- action (Dict, optional): Action dict. Defaults to None.
:Returns:
torch.tensor: Actions or critic value.
"""
embedding = self._encoder(obs)
if self._is_critic:
assert action is not None
obs_action_input = torch.cat([embedding, action], dim=1)
q = self._head(obs_action_input)
return q
output = self._head(embedding)
return output
class BEVSpeedStochasticNet(nn.Module):
"""
Actor Neural Network takes Bird-eye View image and speed and outputs actions stochasticly. It use a
``BEVSpeedConvEncoder`` to get a embedding feature, and use a fully-connected layer to get mean and
std values.
:Arguments:
- obs_shape (Tuple, optional): BeV image shape. Defaults to [5, 32, 32].
- action_shape (Union[int, tuple], optional): Action shape. Defaults to 2.
- encoder_hidden_dim_list (List, optional): Conv encoder hidden layer dimension list.
Defaults to [64, 128, 256].
- policy_hideen_size (int, optional): Encoder output embedding size. Defaults to 512.
- log_std_min (int, optional): Log std min value. Defaults to -20.
- log_std_max (int, optional): Log std max value. Defaults to 2.
- init_w (float, optional): Clip value of mean and std layer weights. Defaults to 3e-3.
"""
def __init__(
self,
obs_shape: Tuple = [5, 32, 32],
action_shape: Union[int, tuple] = 2,
encoder_hidden_dim_list: List = [64, 128, 256],
policy_hideen_size: int = 512,
log_std_min: int = -20,
log_std_max: int = 2,
init_w: float = 3e-3,
) -> None:
super().__init__()
self._obs_shape = obs_shape
self._act_shape = action_shape
self._log_std_min = log_std_min
self._log_std_max = log_std_max
self._encoder = BEVSpeedConvEncoder(
self._obs_shape, encoder_hidden_dim_list, policy_hideen_size, [3, 3, 3], [2, 2, 2]
)
self._mean_layer = nn.Linear(policy_hideen_size, action_shape)
self._mean_layer.weight.data.uniform_(-init_w, init_w)
self._mean_layer.bias.data.uniform_(-init_w, init_w)
self._log_std_layer = nn.Linear(policy_hideen_size, action_shape)
self._log_std_layer.weight.data.uniform_(-init_w, init_w)
self._log_std_layer.bias.data.uniform_(-init_w, init_w)
def forward(self, obs: Dict) -> Tuple[torch.tensor, torch.tensor]:
"""
Forward computation of network.
:Arguments:
- obs (Dict): Observation dict.
:Returns:
Tuple[torch.tensor, torch.tensor]: Mean and std value for actions.
"""
embedding = self._encoder(obs)
mean = self._mean_layer(embedding)
log_std = self._log_std_layer(embedding)
log_std = torch.clamp(log_std, self._log_std_min, self._log_std_max)
return mean, log_std
class BEVSpeedSoftQNet(nn.Module):
def __init__(
self,
obs_shape: Tuple = [5, 32, 32],
action_shape: Union[int, tuple] = 2,
encoder_hidden_dim_list: List = [64, 128, 256],
soft_q_hidden_size: int = 512,
init_w: float = 3e-3,
) -> None:
super().__init__()
self._obs_shape = obs_shape
self._act_shape = action_shape
self._encoder = BEVSpeedConvEncoder(
self._obs_shape, encoder_hidden_dim_list, soft_q_hidden_size, [3, 3, 3], [2, 2, 2]
)
self._output_layer = nn.Linear(soft_q_hidden_size + self._act_shape, 1)
self._output_layer.weight.data.uniform_(-init_w, init_w)
self._output_layer.bias.data.uniform_(-init_w, init_w)
def forward(self, obs, action):
embedding = self._encoder(obs)
obs_action_input = torch.cat([embedding, action], dim=1)
output = self._output_layer(obs_action_input)
return output
class BEVSpeedProximalNet(nn.Module):
def __init__(
self,
obs_shape: Tuple = [5, 32, 32],
action_shape: Union[int, tuple] = 2,
encoder_embedding_size: int = 512,
encoder_hidden_dim_list: List = [64, 128, 256],
head_hidden_size=128,
head_layer_num=2,
is_critic=False,
) -> None:
super().__init__()
self._obs_shape = obs_shape
self._act_shape = action_shape
self._encoder_embedding_size = encoder_embedding_size
self._head_hidden_size = head_hidden_size
self._head_layer_num = head_layer_num
self._encoder = BEVSpeedConvEncoder(
self._obs_shape, encoder_hidden_dim_list, encoder_embedding_size, [3, 3, 3], [2, 2, 2]
)
self._is_critic = is_critic
if self._is_critic:
self._head = self._setup_critic()
else:
self._head = self._setup_actor()
def _setup_actor(self):
if isinstance(self._act_shape, tuple):
return nn.ModuleList([self._setup_1dim_actor(a) for a in self._act_shape])
else:
return self._setup_1dim_actor(self._act_shape)
def _setup_critic(self):
input_size = self._encoder_embedding_size
layers = []
for _ in range(self._head_layer_num):
layers.append(nn.Linear(input_size, self._head_hidden_size))
layers.append(nn.ReLU())
input_size = self._head_hidden_size
layers.append(nn.Linear(input_size, 1))
output = nn.Sequential(*layers)
return output
def _setup_1dim_actor(self, act_shape: int) -> torch.nn.Module:
input_size = self._encoder_embedding_size
layers = []
for _ in range(self._head_layer_num):
layers.append(nn.Linear(input_size, self._head_hidden_size))
layers.append(nn.ReLU())
input_size = self._head_hidden_size
layers.append(nn.Linear(input_size, act_shape))
output = nn.Sequential(*layers)
return output
def forward(self, obs):
embedding = self._encoder(obs)
# Because we use the value AC, so the input of the head of actor and critic is the same form
if self._is_critic:
output = self._head(embedding)
else:
output = self._head(embedding)
return output
| 37.860248 | 118 | 0.618817 | 1,579 | 12,191 | 4.48955 | 0.124763 | 0.04768 | 0.027507 | 0.028213 | 0.531669 | 0.454366 | 0.405135 | 0.378051 | 0.367753 | 0.317252 | 0 | 0.021782 | 0.284472 | 12,191 | 321 | 119 | 37.978193 | 0.790898 | 0.26618 | 0 | 0.446602 | 0 | 0 | 0.001517 | 0 | 0 | 0 | 0 | 0 | 0.009709 | 1 | 0.07767 | false | 0 | 0.019417 | 0 | 0.184466 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfcf530f45cfc31304305ccd0a5618dda6f958ac | 1,266 | py | Python | perception/manage.py | ramaneswaran/perception | 045b85634412355d66b2db6a102a97796c9aa11f | [
"Apache-2.0"
] | 1 | 2021-04-14T10:58:13.000Z | 2021-04-14T10:58:13.000Z | perception/manage.py | shivamsaraswat8/perception | 045b85634412355d66b2db6a102a97796c9aa11f | [
"Apache-2.0"
] | null | null | null | perception/manage.py | shivamsaraswat8/perception | 045b85634412355d66b2db6a102a97796c9aa11f | [
"Apache-2.0"
] | 1 | 2021-04-10T18:02:45.000Z | 2021-04-10T18:02:45.000Z | import os
from sqlalchemy.orm import Session
from perception.database import SessionLocal, engine
from perception import models, schemas
from perception.core.faiss_helper import FaissCore
models.Base.metadata.create_all(bind=engine)
# Dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
def get_food_by_index_id(db: Session, index_id: int):
try:
return db.query(models.Food).filter(models.Food.index_id == index_id).first()
except Exception as error:
print(repr(error))
def check_file_id(db: Session, file_id: int):
try:
result = db.query(models.Food).filter(models.Food.file_id == file_id).first()
return result
except Exception as error:
print(repr(error))
if __name__ == "__main__":
db = db = SessionLocal()
indexes = [0,1]
result = get_food_by_index_id(db, 0)
# result = db.query(models.Food).all()
# for obj in result:
# print(schemas.Food.from_orm(obj))
print(result.index_id)
# base_dir = os.path.dirname(os.path.realpath(__file__))
# index_store = os.path.join(base_dir, 'index_store')
# index = FaissCore('vector.index',index_store, dimension=6)
# print(index.size) | 23.018182 | 86 | 0.666667 | 174 | 1,266 | 4.632184 | 0.37931 | 0.052109 | 0.048387 | 0.063275 | 0.251861 | 0.215881 | 0.171216 | 0 | 0 | 0 | 0 | 0.004032 | 0.21643 | 1,266 | 55 | 87 | 23.018182 | 0.808468 | 0.227488 | 0 | 0.25 | 0 | 0 | 0.008247 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.178571 | 0 | 0.357143 | 0.107143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfd03e2de858927a123e0392f69647b19f5245c3 | 3,462 | py | Python | lamblayer/init.py | YU-SUKETAKAHASHI/lamblayer | 5650235d16a40c41e8395a1fae7484c8c297e2ef | [
"MIT"
] | 7 | 2021-12-24T03:51:28.000Z | 2022-01-31T02:50:46.000Z | lamblayer/init.py | YU-SUKETAKAHASHI/lamblayer | 5650235d16a40c41e8395a1fae7484c8c297e2ef | [
"MIT"
] | null | null | null | lamblayer/init.py | YU-SUKETAKAHASHI/lamblayer | 5650235d16a40c41e8395a1fae7484c8c297e2ef | [
"MIT"
] | null | null | null | import os
import json
import requests
import click
from .lamblayer import Lamblayer
class Init(Lamblayer):
def __init__(self, profile, region, log_level):
super().__init__(profile, region, log_level)
def __call__(self, function_name, download):
self.init(function_name, download)
def init(self, function_name, download):
"""
Inisialize function config file, and download layer zip contents.
Params
======
function_name: str
the name of function for inisialize
download: bool
download all layer zip contents, or not.
"""
self.logger.info(f"starting init {function_name}")
response = self.session.client("lambda").get_function(
FunctionName=function_name
)
try:
layers = response["Configuration"]["Layers"]
layer_version_arns = [layer["Arn"] for layer in layers]
except KeyError:
layer_version_arns = []
self.logger.info("createing function.json")
self.logger.debug(f"function_name: {function_name}")
self.logger.debug(f"layers: {layer_version_arns}")
self._gen_function_json(function_name, layer_version_arns)
if download:
self.logger.info("starging download layers")
for layer_version_arn in layer_version_arns:
self.logger.info(f"downloading {layer_version_arn}")
layer_content_url = self._get_layer_url(layer_version_arn)
self._download_layer(layer_content_url)
def _gen_function_json(self, function_name, layer_version_arns):
"""
Generate a function config file.
Params
======
function_name: str
the name of the function
layer_version_arns: str
the ARN of the layer version
"""
FUNCTION = "function.json"
config = {
"FunctionName": function_name,
"Layers": layer_version_arns,
}
if os.path.exists(FUNCTION):
if not click.confirm(f"Overwrite existing file {FUNCTION}?"):
self.logger.info("chanceled")
return 0
with open(FUNCTION, "w") as f:
json.dump(config, f, indent=4)
def _get_layer_url(self, layer_version_arn):
"""
Return a layer zip content url.
Params
======
layer_version_arn: str
the ARN of the layer version
Returns
=======
content_url: str
a url of layer zip content
"""
version = int(layer_version_arn.split(":")[-1])
layer_arn = layer_version_arn.rsplit(":", 1)[0]
response = self.session.client("lambda").get_layer_version(
LayerName=layer_arn,
VersionNumber=version,
)
content_url = response["Content"]["Location"]
return content_url
def _download_layer(self, layer_content_url):
"""
Download layer zip contents.
save path format : ./{layer name}-xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.zip
Params
======
layer_content_url: str
a url of layer zip content
"""
save_path = layer_content_url.split("/")[-1].split("?")[0] + ".zip"
response = requests.get(layer_content_url)
with open(save_path, "wb") as f:
f.write(response.content)
| 28.85 | 82 | 0.590699 | 389 | 3,462 | 5.015424 | 0.244216 | 0.110712 | 0.065607 | 0.033829 | 0.186571 | 0.157868 | 0.09226 | 0.034854 | 0.034854 | 0 | 0 | 0.00293 | 0.309936 | 3,462 | 119 | 83 | 29.092437 | 0.81373 | 0.200173 | 0 | 0 | 0 | 0 | 0.12131 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0.089286 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfd0af6f971f3ce7bf05347862e1f5a1232a4e5f | 3,135 | py | Python | deadunits/model_load.py | google-research/deadunits | 5f4c7d9dc0201cefeb3dc970bcaee66a78cfa423 | [
"Apache-2.0"
] | 3 | 2021-04-01T02:52:04.000Z | 2021-11-05T15:48:43.000Z | deadunits/model_load.py | google-research/deadunits | 5f4c7d9dc0201cefeb3dc970bcaee66a78cfa423 | [
"Apache-2.0"
] | null | null | null | deadunits/model_load.py | google-research/deadunits | 5f4c7d9dc0201cefeb3dc970bcaee66a78cfa423 | [
"Apache-2.0"
] | 2 | 2021-11-05T15:45:30.000Z | 2022-01-16T11:50:00.000Z | # coding=utf-8
# Copyright 2021 The Deadunits Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Implements various utility functions for loading and transforming models.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deadunits import data
from deadunits import generic_convnet
from deadunits import model_defs
import gin
from six.moves import zip
import tensorflow.compat.v2 as tf
INPUT_SHAPES = {'cub200': (2, 224, 224, 3),
'cifar10': (2, 32, 32, 3),
'imagenet': (2, 224, 224, 3)}
@gin.configurable
def get_model(model_arch_name=gin.REQUIRED,
dataset_name=gin.REQUIRED,
load_path=None,
prepare_for_pruning=False):
"""Creates or loads the model and returns it.
If the model does not match with the saved, version, usually no error or
warning is made, so be careful, CHECK YOUR VARIABLES.
Args:
model_arch_name: str, definition from .model_defs.py file.
dataset_name: str, either 'cifar10' or 'imagenet'.
load_path: str, checkpoint name/path to be load.
prepare_for_pruning: bool, if True the loaded model is copied in-to one with
TaylorScorer layer and layers are wrapped with MaskedLayer.
Returns:
generic_convnet.GenericConvnet, initialized or loaded model.
Raises:
ValueError: when the args doesn't match the specs.
IOError: when there is no checkpoint found at the path given.
"""
if dataset_name not in INPUT_SHAPES:
raise ValueError('Dataset_name: %s is not one of %s' %
(dataset_name, list(INPUT_SHAPES.keys())))
if not hasattr(model_defs, model_arch_name):
raise ValueError('Model name: %s...not in model_defs.py' % model_arch_name)
n_classes = data.N_CLASSES_BY_DATASET[dataset_name]
model_arch = (
getattr(model_defs, model_arch_name) + [['O', n_classes]])
model = generic_convnet.GenericConvnet(
model_arch=model_arch, name=model_arch_name)
dummy_var = tf.zeros(INPUT_SHAPES[dataset_name])
# Initializing model.
model(dummy_var)
if load_path is not None:
checkpoint = tf.train.Checkpoint(model=model)
checkpoint.restore(load_path)
if prepare_for_pruning:
old_model = model
model = generic_convnet.GenericConvnet(
model_arch=model_arch, name=model_arch_name,
use_taylor_scorer=True,
use_masked_layers=True)
model(dummy_var)
for v1, v2 in zip(old_model.trainable_variables,
model.trainable_variables):
v2.assign(v1)
return model
| 37.321429 | 80 | 0.717384 | 448 | 3,135 | 4.841518 | 0.415179 | 0.049793 | 0.053942 | 0.014753 | 0.082988 | 0.062702 | 0.062702 | 0.062702 | 0.062702 | 0.062702 | 0 | 0.018065 | 0.205423 | 3,135 | 83 | 81 | 37.771084 | 0.85267 | 0.439872 | 0 | 0.093023 | 0 | 0 | 0.054118 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0 | 0.209302 | 0 | 0.255814 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfd4fb606ff1d4d7e97383391e3aac4284986abd | 4,012 | py | Python | scripts/generate_topk.py | sarapapi/FBK-fairseq-ST | 33f381937c1589602944da8cf39e533802d283ca | [
"MIT"
] | 11 | 2021-02-28T23:33:18.000Z | 2022-02-11T20:42:18.000Z | scripts/generate_topk.py | sarapapi/FBK-fairseq-ST | 33f381937c1589602944da8cf39e533802d283ca | [
"MIT"
] | 1 | 2021-05-21T08:08:19.000Z | 2021-06-30T12:28:55.000Z | scripts/generate_topk.py | sarapapi/FBK-fairseq-ST | 33f381937c1589602944da8cf39e533802d283ca | [
"MIT"
] | 5 | 2021-03-15T02:05:38.000Z | 2022-02-14T09:20:20.000Z | import logging
import os
import torch
import numpy as np
from fairseq import utils, options, tasks, progress_bar, checkpoint_utils
from fairseq.data.knowledge_distillation import TeacherOutputDataset
logger = logging.getLogger(__name__)
def gen_outputs(args):
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
logger.info('{} {} {} examples'.format(args.data, args.gen_subset, len(dataset)))
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, _ = checkpoint_utils.load_model_ensemble(
args.path.split(':'), task=task, arg_overrides=eval(args.model_overrides))
assert len(models) == 1
model = models[0]
# Optimize ensemble for generation
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
model.max_positions()
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=8,
num_shards=args.num_shards,
shard_id=args.shard_id,
).next_epoch_itr(shuffle=False)
outputs = [None for _ in range(len(dataset))]
with progress_bar.build_progress_bar(args, itr) as t:
for sample in t:
s = utils.move_to_cuda(sample) if use_cuda else sample
if 'net_input' not in s:
continue
# We assume the target is already present and known
assert s['target'] is not None
targets = s['target']
with torch.no_grad():
net_output = model(**s['net_input'])
topk_outs, topk_idx = torch.topk(net_output[0], args.distill_topk, dim=-1) # B, T, k
non_padding_mask = targets.ne(task.target_dictionary.pad()).cpu().numpy().astype(bool)
topk_idx = topk_idx.cpu().numpy()
topk_outs = topk_outs.cpu().numpy()
for i, id_s in enumerate(s['id'].data):
outputs[id_s] = [
topk_idx[i, non_padding_mask[i]].tolist(),
topk_outs[i, non_padding_mask[i]].tolist()]
return outputs
def save_expert_outputs(args, expert_outputs):
logger.info("Start saving expert outputs..")
src_lang = args.source_lang
tgt_lang = args.target_lang
file_prefix = '{}.{}-{}.{}'.format(args.gen_subset, src_lang, tgt_lang, tgt_lang)
path = os.path.join(args.data, file_prefix + '.top{}_idx'.format(args.distill_topk))
TeacherOutputDataset.save_bin(path, [o[0] for o in expert_outputs], np.int32)
logger.info("Written {}".format(path))
path = os.path.join(args.data, file_prefix + '.top{}_out'.format(args.distill_topk))
TeacherOutputDataset.save_bin(path, [o[1] for o in expert_outputs], np.float32)
logger.info("Written {}".format(path))
if __name__ == '__main__':
parser = options.get_generation_parser()
parser.add_argument('--distill-topk', default=8, type=int)
args = options.parse_args_and_arch(parser)
assert args.path is not None, '--path required for generation!'
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert args.replace_unk is None or args.raw_text, \
'--replace-unk requires a raw text dataset (--raw-text)'
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
logger.info(args)
expert_outputs = gen_outputs(args)
save_expert_outputs(args, expert_outputs)
| 38.209524 | 102 | 0.662512 | 548 | 4,012 | 4.604015 | 0.332117 | 0.041221 | 0.02061 | 0.015854 | 0.1522 | 0.130797 | 0.069758 | 0.069758 | 0.069758 | 0 | 0 | 0.006094 | 0.222832 | 4,012 | 104 | 103 | 38.576923 | 0.803079 | 0.038883 | 0 | 0.024096 | 0 | 0 | 0.08054 | 0 | 0 | 0 | 0 | 0 | 0.060241 | 1 | 0.024096 | false | 0 | 0.072289 | 0 | 0.108434 | 0.012048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfdb04f375f82469afd2de9898e6269f06588e28 | 3,928 | py | Python | DTL/db/models/tablemodel.py | rocktavious/DevToolsLib | 117200c91a3361e04f7c8e07d2ed4999bbcfc469 | [
"MIT"
] | 1 | 2015-03-23T18:52:12.000Z | 2015-03-23T18:52:12.000Z | DTL/db/models/tablemodel.py | rocktavious/DevToolsLib | 117200c91a3361e04f7c8e07d2ed4999bbcfc469 | [
"MIT"
] | null | null | null | DTL/db/models/tablemodel.py | rocktavious/DevToolsLib | 117200c91a3361e04f7c8e07d2ed4999bbcfc469 | [
"MIT"
] | 2 | 2017-05-21T12:50:41.000Z | 2021-10-17T03:32:45.000Z | from DTL.qt import QtCore, QtGui
from DTL.qt.QtCore import Qt
#------------------------------------------------------------
#------------------------------------------------------------
class TableModel(QtCore.QAbstractTableModel):
#------------------------------------------------------------
def __init__(self, data=[[]], headers=[], parent=None):
super(TableModel, self).__init__(parent)
self.__data = data
self.__headers = headers
#------------------------------------------------------------
def rowCount(self, parent):
return len(self.__data)
#------------------------------------------------------------
def columnCount(self, parent):
return len(self.__data[0])
#------------------------------------------------------------
def flags(self, index):
return Qt.ItemIsEditable | Qt.ItemIsEnabled | Qt.ItemIsSelectable
#------------------------------------------------------------
def headerData(self, section, orientation, role):
if role == Qt.DisplayRole:
if orientation == Qt.Horizontal :
if section < len(self.__headers):
return self.__headers[section]
else:
return 'NONE'
else:
return section
#------------------------------------------------------------
def data(self, index, role):
row = index.row()
column = index.column()
value = self.__data[row][column]
if role == Qt.EditRole :
return value
if role == Qt.DisplayRole :
return value
if role == Qt.ToolTipRole :
return value
#if role == Qt.DecorationRole:
#pixmap = QtGui.QPixmap(26, 26)
#pixmap.fill(QtGui.QColor(0,0,0))
#icon = QtGui.QIcon(pixmap)
#return icon
#------------------------------------------------------------
def setData(self, index, value, role=Qt.EditRole):
if index.isValid():
if role == Qt.EditRole:
self.__data[index.row()][index.column()] = value
self.dataChanged.emit(index, index)
return True
return False
#------------------------------------------------------------
def insertRows(self, position, rows, parent=QtCore.QModelIndex()):
self.beginInsertRows(parent, position, position + rows - 1)
for i in range(rows):
default_values = ['' for i in range(self.columnCount(None))]
self.__data.insert(position, default_values)
self.endInsertRows()
return True
#------------------------------------------------------------
def removeRows(self, position, rows, parent=QtCore.QModelIndex()):
self.beginRemoveRows(parent, position, position + rows - 1)
for i in range(rows):
value = self.__data[position]
self.__data.remove(value)
self.endRemoveRows()
return True
#------------------------------------------------------------
def insertColumns(self, position, columns, parent=QtCore.QModelIndex()):
self.beginInsertColumns(parent, position, position + columns - 1)
rowCount = len(self.__data)
for i in range(columns):
for j in range(rowCount):
self.__data[j].insert(position, '')
self.endInsertColumns()
return True
#------------------------------------------------------------
def removeColumns(self, position, columns, parent=QtCore.QModelIndex()):
self.beginRemoveRows(parent, position, position + columns - 1)
rowCount = len(self.__data)
for i in range(columns):
for j in range(rowCount):
value = self.__data[j][position]
self.__data[j].remove(value)
self.endRemoveRows()
return True | 33.862069 | 76 | 0.455448 | 330 | 3,928 | 5.293939 | 0.236364 | 0.06411 | 0.027476 | 0.031483 | 0.372639 | 0.340011 | 0.265598 | 0.194619 | 0.146537 | 0.146537 | 0 | 0.004129 | 0.260183 | 3,928 | 116 | 77 | 33.862069 | 0.597041 | 0.231161 | 0 | 0.342857 | 0 | 0 | 0.001332 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157143 | false | 0 | 0.028571 | 0.042857 | 0.414286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfe0414c8157e5f726b4a1a487bb20c7b5854909 | 706 | py | Python | wendy/models/seeds/chair.py | AIFI-INC/wendy-framework | e752748428dad550eb9fa1833571c721c089bbc6 | [
"Apache-2.0"
] | null | null | null | wendy/models/seeds/chair.py | AIFI-INC/wendy-framework | e752748428dad550eb9fa1833571c721c089bbc6 | [
"Apache-2.0"
] | 5 | 2021-12-11T18:39:59.000Z | 2021-12-12T02:34:25.000Z | wendy/models/seeds/chair.py | AIFI-INC/wendy-framework | e752748428dad550eb9fa1833571c721c089bbc6 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import asyncio
from faker import Faker
faker = Faker()
sys.path.insert(0, os.path.abspath(os.curdir))
from config import init_db
from wendy.models import *
__all__ = [
'ChairFaker',
'seed_chair'
]
class ChairFaker(object):
async def generate(self, **kwargs):
await init_db()
fake = Chair(**kwargs)
await fake.save()
return fake
def seed_chair():
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait([
ChairFaker().generate(
position="Leader",
room_id=1
),
ChairFaker().generate(
position="Dev",
room_id=1
)
]))
loop.close()
| 19.081081 | 46 | 0.594901 | 83 | 706 | 4.891566 | 0.542169 | 0.049261 | 0.128079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005988 | 0.290368 | 706 | 36 | 47 | 19.611111 | 0.804391 | 0 | 0 | 0.129032 | 0 | 0 | 0.041076 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0 | 0.193548 | 0 | 0.290323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfe103929d40cc1fd5b17a3bd2751486f8d91d6a | 3,814 | py | Python | tasks/outlooksend.py | cmu-sei/usersim | 0a90e1c2f32ce27bbb564c7196050c50409989dd | [
"BSL-1.0"
] | 10 | 2018-05-07T07:52:51.000Z | 2021-09-04T05:34:46.000Z | tasks/outlooksend.py | cmu-sei/usersim | 0a90e1c2f32ce27bbb564c7196050c50409989dd | [
"BSL-1.0"
] | null | null | null | tasks/outlooksend.py | cmu-sei/usersim | 0a90e1c2f32ce27bbb564c7196050c50409989dd | [
"BSL-1.0"
] | 4 | 2018-04-09T17:59:13.000Z | 2019-11-17T01:33:35.000Z | # Copyright 2017 Carnegie Mellon University. See LICENSE.md file for terms.
import platform
try:
import win32com.client
except ImportError:
# Tasks must be importable on any platform.
pass
import api
from tasks import outlook
class OutlookSend(outlook.Outlook):
""" Interact with Outlook to send emails. Requires Outlook and OutlookRedemption to be installed. Windows-only.
"""
def __init__(self, config):
if not platform.system() == 'Windows':
raise OSError('This task is only compatible with Windows.')
self._config = config
self._outlook = outlook.SharedOutlook()
def __call__(self):
self._send_message()
def _send_message(self):
subject, body = self._get_content()
# Attempted workaround for emails sitting in Outbox. May not actually work correctly.
if self._outlook.outlook_application.Explorers.Count == 0:
folder = self._outlook.mapi_namespace.GetDefaultFolder(win32com.client.constants.olFolderOutbox)
folder.Display()
self._exchange_check()
# TODO: Make sure new order works.
outbox = self._outlook.mapi_namespace.GetDefaultFolder(win32com.client.constants.olFolderOutbox)
outlook_mail_item = self._outlook.outlook_application.CreateItem(win32com.client.constants.olMailItem)
outlook_mail_item = outlook_mail_item.Move(outbox)
outlook_mail_item.Subject = subject
outlook_mail_item.Body = body
outlook_mail_item.Save()
for file_ in self._config['attachments']:
outlook_mail_item.Attachments.Add(file_)
# Need to use Redemption to actually get it to send correctly.
new_email = win32com.client.Dispatch('Redemption.SafeMailItem')
new_email.Item = outlook_mail_item
new_email.Recipients.Add(self._config['destination'])
new_email.Recipients.ResolveAll()
new_email.Send()
def _get_content(self):
""" Get subject and body.
Returns:
str, str: First return value is email subject and second value is email body.
"""
if self._config['dynamic']:
subject = 'DYNAMIC OPTION NOT YET IMPLEMENTED'
body = 'DYNAMIC OPTION NOT YET IMPLEMENTED'
else:
subject = self._config['subject']
body = self._config['body']
return subject, body
@classmethod
def parameters(cls):
""" Information about this task's configuration.
Returns:
dict: With keys 'required' and 'optional', whose values are dicts with the task's required and optional
config keys, and whose values are human-readable strings giving information about that key.
"""
config = {}
required = {'username': 'str| The "From" address.',
'destination': 'str| The "To" address.',
'subject': 'str| Subject line. Specify empty string if optional parameter "dynamic" is used.',
'body': 'str| Message body. Specify empty string if optional parameter "dynamic" is used.'}
optional = {'attachments': '[str]| A list of paths to files that should be attached.',
'dynamic': 'bool| Generate subject and body. Default False.'}
config['required'] = required
config['optional'] = optional
return config
@classmethod
def validate(cls, config):
""" Validate the task configuration.
Raises:
KeyError: If a required key is missing.
ValueError: If a key's value is not valid.
"""
defaults = {'attachments': [],
'dynamic': False}
config = api.check_config(config, cls.parameters(), defaults)
return config
| 34.990826 | 115 | 0.640535 | 427 | 3,814 | 5.587822 | 0.388759 | 0.036882 | 0.050293 | 0.024308 | 0.131601 | 0.106454 | 0.106454 | 0.106454 | 0.106454 | 0 | 0 | 0.005398 | 0.271369 | 3,814 | 108 | 116 | 35.314815 | 0.853185 | 0.24043 | 0 | 0.066667 | 0 | 0 | 0.205618 | 0.008282 | 0 | 0 | 0 | 0.009259 | 0 | 1 | 0.1 | false | 0.016667 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfe3d87776cd3825a075db83aacd350f318e2832 | 2,146 | py | Python | monoweb/mono/cache.py | ragnraok/MonoReader | 4672f5f0ca48f69e9180b33b62e773ab323c2cbc | [
"MIT"
] | 1 | 2019-06-12T01:46:22.000Z | 2019-06-12T01:46:22.000Z | monoweb/mono/cache.py | ragnraok/MonoReader | 4672f5f0ca48f69e9180b33b62e773ab323c2cbc | [
"MIT"
] | null | null | null | monoweb/mono/cache.py | ragnraok/MonoReader | 4672f5f0ca48f69e9180b33b62e773ab323c2cbc | [
"MIT"
] | null | null | null | from flask import current_app
import pickle
import os
import time
import fcntl
class FileLock(object):
def __init__(self, filename, *args, **kwargs):
self.filename = filename
self.open_args = args
self.open_kwargs = kwargs
self.fileobj = None
def __enter__(self):
f = open(self.filename, *self.open_args, **self.open_kwargs)
while True:
fcntl.flock(f, fcntl.LOCK_EX)
fnew = open(self.filename, *self.open_args, **self.open_kwargs)
if os.path.sameopenfile(f.fileno(), fnew.fileno()):
fnew.close()
break
else:
f.close()
f = fnew
self.fileobj = f
return f
def __exit__(self, _exc_type, _exc_value, _trackback):
self.fileobj.close()
CACHE_FILE = "disk_cache"
class SimpleCache(object):
"""
a simple dick cache with file lock
"""
def __init__(self):
if not os.path.exists(CACHE_FILE):
f = open(CACHE_FILE, "w")
f.write(pickle.dumps({"testCache": "testCache"}))
f.close()
@classmethod
def create_instance(cls):
if hasattr(cls, '__instance'):
return cls.__instance
else:
cls.__instance = cls()
return cls.__instance
def __setitem__(self, key, value):
#self.__cache[key] = value
with FileLock(CACHE_FILE, "r+") as f:
cache = ''.join(f.readlines())
cache = pickle.loads(cache)
cache[key] = value
dumps_result = pickle.dumps(cache)
f.seek(0)
f.write(dumps_result)
f.flush()
current_app.logger.info('set key: %s, value: %s' % (key, value))
def __getitem__(self, key):
with FileLock(CACHE_FILE, "r") as f:
cache = ''.join(f.readlines())
cache = pickle.loads(cache)
current_app.logger.info("get key: %s, value: %s" % (key, cache.get(key)))
return cache.get(key)
def __len__(self):
return len(self.__cache)
cache = SimpleCache.create_instance()
| 28.613333 | 85 | 0.55918 | 254 | 2,146 | 4.468504 | 0.311024 | 0.042291 | 0.042291 | 0.052863 | 0.211454 | 0.188546 | 0.188546 | 0.188546 | 0.188546 | 0.114537 | 0 | 0.000686 | 0.321062 | 2,146 | 74 | 86 | 29 | 0.778312 | 0.027959 | 0 | 0.169492 | 0 | 0 | 0.041546 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135593 | false | 0 | 0.084746 | 0.016949 | 0.338983 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfed61c9b170b49c4d5f7fdd3356dd67308b7946 | 5,901 | py | Python | fnn.py | Yanjing-PENG/feedforward-neural-network | 0ef94c172fdd4773beb6e7ca26e5c944a97f84de | [
"MIT"
] | null | null | null | fnn.py | Yanjing-PENG/feedforward-neural-network | 0ef94c172fdd4773beb6e7ca26e5c944a97f84de | [
"MIT"
] | null | null | null | fnn.py | Yanjing-PENG/feedforward-neural-network | 0ef94c172fdd4773beb6e7ca26e5c944a97f84de | [
"MIT"
] | null | null | null | """
This program builds a two-layer neural network for the Iris dataset.
The first layer is a relu layer with 10 units, and the second one is
a softmax layer. The network structure is specified in the "train" function.
The parameters are learned using SGD. The forward propagation and backward
propagation are carried out in the "compute_neural_net_loss" function.
"""
import numpy as np
import os, sys
import math
# Data sets
IRIS_TRAINING = os.getcwd() + "/data/iris_training.csv"
IRIS_TEST = os.getcwd() + "/data/iris_test.csv"
def get_data():
# Load datasets.
train_data = np.genfromtxt(IRIS_TRAINING, skip_header=1,
dtype=float, delimiter=',')
test_data = np.genfromtxt(IRIS_TEST, skip_header=1,
dtype=float, delimiter=',')
train_x = train_data[:, :4]
train_y = train_data[:, 4].astype(np.int64)
test_x = test_data[:, :4]
test_y = test_data[:, 4].astype(np.int64)
return train_x, train_y, test_x, test_y
def compute_neural_net_loss(params, X, y, reg=0.0):
"""
Neural network loss function.
Inputs:
- params: dictionary of parameters, including "W1", "b1", "W2", "b2"
- X: N x D array of training data. Each row is a D-dimensional point.
- y: 1-d array of shape (N, ) for the training labels.
Returns:
- loss: the softmax loss with regularization
- grads: dictionary of gradients for the parameters in params
"""
# Unpack variables from the params dictionary
W1, b1 = params['W1'], params['b1']
W2, b2 = params['W2'], params['b2']
N, D = X.shape
loss = 0.0
grads = {}
# forward propagation
relu = lambda x : x * (x > 0)
z1 = X.dot(W1) + b1
u1 = np.vectorize(relu)(z1)
z2 = u1.dot(W2) + b2
u2 = np.vectorize(math.exp)(z2)
NLL = - (np.vectorize(math.log)((np.array([u2[i][y[i]] / u2[i].sum() for i in range(N)])))).sum()
loss = NLL / N + 0.5 * reg * ((W1 ** 2).sum() + (W2 ** 2).sum())
# backward propagation
d_relu = lambda x: 1 * (x >= 0)
delta2 = np.zeros(z2.shape)
for i in range(delta2.shape[0]):
for k in range(delta2.shape[1]):
delta2[i][k] = u2[i][k] / u2[i].sum() - (y[i] == k)
dW2 = np.zeros(W2.shape)
for i in range(N):
dW2 += (u1[i].reshape(-1, 1)).dot(delta2[i].reshape(1, -1))
dW2 = dW2 / N + reg * W2
db2 = np.zeros(len(b2))
for i in range(N):
db2 += delta2[i]
db2 = db2 / N
delta1 = np.zeros(z1.shape)
for i in range(delta1.shape[0]):
for j in range(delta1.shape[1]):
delta1[i][j] = d_relu(z1[i][j]) * (delta2[i].dot(W2[j].T))
dW1 = np.zeros(W1.shape)
for i in range(N):
dW1 += (X[i].reshape(-1, 1)).dot(delta1[i].reshape(1, -1))
dW1 = dW1 / N + reg * W1
db1 = np.zeros(len(b1))
for i in range(N):
db1 += delta1[i]
db1 = db1 / N
grads['W1']=dW1
grads['W2']=dW2
grads['b1']=db1
grads['b2']=db2
return loss, grads
def predict(params, X):
"""
Use the trained weights of this linear classifier to predict labels for
data points.
Inputs:
- params: dictionary of parameters, including "W1", "b1", "W2", "b2"
- X: N x D array of training data. Each row is a D-dimensional point.
Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
# Unpack variables from the params dictionary
W1, b1 = params['W1'], params['b1']
W2, b2 = params['W2'], params['b2']
y_pred = np.zeros(X.shape[1])
relu = lambda x: x * (x > 0)
z1 = np.dot(X,W1)+b1
u1 = relu(z1)
z2 = np.dot(u1,W2)+b2
y_pred = np.argmax(z2, axis=1)
return y_pred
def acc(ylabel, y_pred):
return np.mean(ylabel == y_pred)
def sgd_update(params, grads, learning_rate):
"""
Perform sgd update for parameters in params.
"""
for key in params:
params[key] += -learning_rate * grads[key]
def train(X, y, Xtest, ytest, learning_rate=1e-3, reg=1e-5, epochs=100, batch_size=20):
num_train, dim = X.shape
num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
num_iters_per_epoch = int(math.floor(1.0*num_train/batch_size))
params = {}
std = 0.001
params['W1'] = std * np.random.randn(dim, 10)
params['b1'] = np.zeros(10)
params['W2'] = std * np.random.randn(10, num_classes)
params['b2'] = np.zeros(num_classes)
for epoch in range(max_epochs):
perm_idx = np.random.permutation(num_train)
# perform mini-batch SGD update
for it in range(num_iters_per_epoch):
idx = perm_idx[it*batch_size:(it+1)*batch_size]
batch_x = X[idx]
batch_y = y[idx]
# evaluate loss and gradient
loss, grads = compute_neural_net_loss(params, batch_x, batch_y, reg)
# update parameters
sgd_update(params, grads, learning_rate)
# evaluate and print every 10 steps
if epoch % 10 == 0:
train_acc = acc(y, predict(params, X))
test_acc = acc(ytest, predict(params, Xtest))
print('Epoch %4d: loss = %.2f, train_acc = %.4f, test_acc = %.4f' \
% (epoch, loss, train_acc, test_acc))
return params
max_epochs = 200
batch_size = 20
learning_rate = 0.1
reg = 0.001
# get training and testing data
train_x, train_y, test_x, test_y = get_data()
params = train(train_x, train_y, test_x, test_y, learning_rate, reg, max_epochs, batch_size)
# Classify two new flower samples.
def new_samples():
return np.array(
[[6.4, 3.2, 4.5, 1.5],
[5.8, 3.1, 5.0, 1.7]], dtype=np.float32)
new_x = new_samples()
predictions = predict(params, new_x)
print("New Samples, Class Predictions: {}\n".format(predictions))
| 30.261538 | 101 | 0.604304 | 934 | 5,901 | 3.716274 | 0.217345 | 0.022184 | 0.0121 | 0.022184 | 0.229329 | 0.176894 | 0.131374 | 0.122155 | 0.10314 | 0.10314 | 0 | 0.046808 | 0.254194 | 5,901 | 194 | 102 | 30.417526 | 0.741877 | 0.261142 | 0 | 0.109091 | 0 | 0 | 0.040633 | 0.005434 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063636 | false | 0 | 0.027273 | 0.018182 | 0.145455 | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cfeff2979c260def21d3497db63dd0e2915c929e | 3,021 | py | Python | src/commands/context_create_override.py | EatBreatheCode/sublime_override_audit | 4170bccc88e5442e18f337e291b771769b00d3c4 | [
"MIT"
] | 31 | 2017-01-28T10:08:12.000Z | 2021-06-01T06:57:27.000Z | src/commands/context_create_override.py | EatBreatheCode/sublime_override_audit | 4170bccc88e5442e18f337e291b771769b00d3c4 | [
"MIT"
] | 34 | 2017-02-03T14:47:00.000Z | 2020-05-27T05:48:09.000Z | src/commands/context_create_override.py | EatBreatheCode/sublime_override_audit | 4170bccc88e5442e18f337e291b771769b00d3c4 | [
"MIT"
] | 8 | 2017-02-03T08:31:36.000Z | 2022-02-26T20:13:43.000Z | import sublime
import sublime_plugin
from os.path import isfile
from ..core import oa_setting, setup_new_override_view
from ..core import PackageListCollectionThread, ContextHelper
###----------------------------------------------------------------------------
class OverrideAuditContextCreateOverrideCommand(ContextHelper,sublime_plugin.TextCommand):
"""
When invoked on a read-only view that represents a package resource that
does not yet exist on disk (e.g. as opened by 'View Package Resource' in
the command palette), promote that view to be a potential new override.
"""
def run(self, edit, **kwargs):
target = self.view_target(self.view, **kwargs)
if self.package is not None:
target.window().run_command("override_audit_create_override", {
"package": self.package
})
else:
setup_new_override_view(target, reposition=False)
def description(self, **kwargs):
if self.package is not None:
return self.caption("Create Override in '%s'" % (self.package), **kwargs)
return self.caption("Override this resource", **kwargs)
def _ctx_package(self, **kwargs):
"""
Check the context of the command to see if it's being triggered on the
name of a package (only) which can contain overrides. If so, store the
name in the tracking variable and return it. Otherwise, reset the
tracking variable and return None.
"""
target = self.view_target(self.view, **kwargs)
ctx = self.view_context(target, False, **kwargs)
self.package = ctx.package if self.package_overrides_possible(target, ctx) else None
return self.package
def is_visible(self, **kwargs):
if self.always_visible(**kwargs):
return True
return self.package is not None or self.is_enabled(**kwargs)
def is_enabled(self, **kwargs):
# Always enabled if we're invoked via a context action on a package
# that can contain overrides.
if self._ctx_package(**kwargs) is not None:
return True
# The current buffers needs to be eligibile to promote to an override.
spp = sublime.packages_path()
view = self.view_target(self.view, **kwargs)
name = view.file_name()
# Unnamed or editable buffers can't represent new overrides, and neither
# can files not in the packages folder or files that already exist.
if (name is None or not view.is_read_only() or
not name.startswith(spp) or isfile(name)):
return False
# We can only enable the command if this file represents a resource
# that actually exists in the package.
res = name[len(spp) + 1:].replace("\\", "/")
if "Packages/" + res not in sublime.find_resources(res.split('/')[-1]):
return False
return True
###----------------------------------------------------------------------------
| 37.7625 | 92 | 0.617014 | 377 | 3,021 | 4.859416 | 0.328912 | 0.048035 | 0.03821 | 0.029476 | 0.121179 | 0.079694 | 0.06441 | 0 | 0 | 0 | 0 | 0.000882 | 0.249586 | 3,021 | 79 | 93 | 38.240506 | 0.807234 | 0.336312 | 0 | 0.225 | 0 | 0 | 0.049428 | 0.015609 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cff1461f14e1b3e15b0d50f0e2b63bd501d06025 | 2,557 | py | Python | nicos_mlz/pgaa/devices/sampledevices.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/pgaa/devices/sampledevices.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_mlz/pgaa/devices/sampledevices.py | jkrueger1/nicos | 5f4ce66c312dedd78995f9d91e8a6e3c891b262b | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Johannes Schwarz <johannes.schwarz@frm2.tum.de>
#
# *****************************************************************************
"""Auxiliary classes for the sample changer."""
from nicos.core import Attach, Moveable, Override, Readable, oneof, status
class SamplePusher(Moveable):
"""Move the sample up/down inside the sample changer device."""
valuetype = oneof('down', 'up')
attached_devices = {
'actuator': Attach('Actuator to perform the switch', Moveable),
'sensort': Attach('Sensor at top of the tube.', Readable),
'sensorl': Attach('Sensor at down of the tube', Readable),
}
parameter_overrides = {
'unit': Override(default=''),
'fmtstr': Override(default='%s'),
}
def doInit(self, mode):
self._target_sens = None
def doStart(self, target):
self._attached_actuator.move(target)
if target == 'up':
self._target_sens = self._attached_sensort
elif target == 'down':
self._target_sens = self._attached_sensorl
def doStatus(self, maxage=0):
# it is a local object so poller gives wrong state here but maw works
if self._target_sens:
if self._target_sens.read(maxage) == 0:
return status.BUSY, 'moving'
elif self._target_sens.read(maxage) == 1:
self._target_sens = None
return status.OK, 'idle'
def doRead(self, maxage=0):
if self._attached_sensort.read(maxage):
return 'up'
elif self._attached_sensorl.read(maxage):
return 'down'
| 37.057971 | 79 | 0.625342 | 321 | 2,557 | 4.900312 | 0.504673 | 0.050858 | 0.062301 | 0.036236 | 0.115702 | 0.035601 | 0 | 0 | 0 | 0 | 0 | 0.014624 | 0.224482 | 2,557 | 68 | 80 | 37.602941 | 0.778618 | 0.47282 | 0 | 0.0625 | 0 | 0 | 0.109589 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.03125 | 0 | 0.40625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cff38ffb9953a78d832f2d9e41f612523b84e3e1 | 4,265 | py | Python | custom_components/pioneer_async/config_flow.py | 2t0m/ha-pioneer_async | bc0361536b257eb9059c0ba5cfa6103e1907f8cb | [
"Apache-2.0"
] | null | null | null | custom_components/pioneer_async/config_flow.py | 2t0m/ha-pioneer_async | bc0361536b257eb9059c0ba5cfa6103e1907f8cb | [
"Apache-2.0"
] | null | null | null | custom_components/pioneer_async/config_flow.py | 2t0m/ha-pioneer_async | bc0361536b257eb9059c0ba5cfa6103e1907f8cb | [
"Apache-2.0"
] | null | null | null | """Config flow for pioneer_async integration."""
import logging
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_TIMEOUT,
)
from homeassistant.core import callback
from .pioneer_avr import PioneerAVR # pylint: disable=import-error
from .const import (
DATA_SCHEMA,
OPTIONS_DEFAULTS,
CONF_UNIQUE_ID,
CONF_COMMAND_DELAY,
CONF_VOLUME_WORKAROUND,
)
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: core.HomeAssistant, data):
"""
Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
_LOGGER.debug(">> validate_input(%s)", data)
try:
pioneer = PioneerAVR(data[CONF_HOST], data[CONF_PORT])
await pioneer.connect()
except:
raise CannotConnect # pylint: disable=raise-missing-from
await pioneer.shutdown()
del pioneer
# Return info that you want to store in the config entry.
device_unique_id = data[CONF_HOST] + ":" + str(data[CONF_PORT])
return {
**data,
CONF_UNIQUE_ID: device_unique_id,
}
class PioneerAVRFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle Pioneer AVR config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
_LOGGER.debug(">> config.async_step_user(%s)", user_input)
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
await self.async_set_unique_id(info[CONF_UNIQUE_ID])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=info[CONF_UNIQUE_ID], data=user_input
)
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return PioneerAVROptionsFlowHandler(config_entry)
class PioneerAVROptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for Harmony."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
_LOGGER.debug(">> options.__init__(%s)", config_entry)
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
_LOGGER.debug(">> options.async_step_init(%s)", user_input)
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
## Get current set of options and build options schema
options = {
**OPTIONS_DEFAULTS,
**(self.config_entry.options if self.config_entry.options else {}),
}
data_schema = vol.Schema(
{
## TODO: add sources option: how to ask the user for a dictionary in config flow?
vol.Optional(
CONF_SCAN_INTERVAL, default=options[CONF_SCAN_INTERVAL]
): int,
vol.Optional(CONF_TIMEOUT, default=options[CONF_TIMEOUT]): vol.Coerce(
float
),
vol.Optional(
CONF_COMMAND_DELAY, default=options[CONF_COMMAND_DELAY]
): vol.Coerce(float),
vol.Optional(
CONF_VOLUME_WORKAROUND, default=options[CONF_VOLUME_WORKAROUND]
): bool,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
| 32.807692 | 97 | 0.637749 | 484 | 4,265 | 5.357438 | 0.295455 | 0.034709 | 0.018511 | 0.013112 | 0.12418 | 0.084073 | 0.022368 | 0 | 0 | 0 | 0 | 0.000322 | 0.27245 | 4,265 | 129 | 98 | 33.062016 | 0.835321 | 0.121923 | 0 | 0.077778 | 0 | 0 | 0.045804 | 0.015078 | 0 | 0 | 0 | 0.007752 | 0 | 1 | 0.022222 | false | 0 | 0.088889 | 0 | 0.233333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cff5e002899e3ed1c30ff51720ac0be6ed86fc5b | 6,316 | py | Python | TwoTimeScaleHybridLearning/src/comparison_poc_3.py | sidsrini12/FURL_Sim | 55b420a771858c06f1aef58f48bb68302be36621 | [
"MIT"
] | null | null | null | TwoTimeScaleHybridLearning/src/comparison_poc_3.py | sidsrini12/FURL_Sim | 55b420a771858c06f1aef58f48bb68302be36621 | [
"MIT"
] | null | null | null | TwoTimeScaleHybridLearning/src/comparison_poc_3.py | sidsrini12/FURL_Sim | 55b420a771858c06f1aef58f48bb68302be36621 | [
"MIT"
] | null | null | null | import argparse
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pickle as pkl
import common.config as cfg
from common.utils import Struct
matplotlib.rcParams.update({'font.size': 24})
matplotlib.rcParams['lines.linewidth'] = 2.5
matplotlib.rcParams['lines.markersize'] = 4
ap = argparse.ArgumentParser()
ap.add_argument('--dataset', type=str, required=False, default='mnist')
ap.add_argument('--num-nodes', type=int, required=False, default=125)
ap.add_argument('--epochs', type=int, required=False)
ap.add_argument('--histories', type=str, nargs='+', required=True)
ap.add_argument('--baselines', type=str, nargs='+', required=True)
ap.add_argument('--labels', type=str, nargs='+', required=True)
ap.add_argument('--name', type=str, required=True)
ap.add_argument('--ncols', type=int, required=True)
ap.add_argument('--dpi', type=int, required=True)
ap.add_argument('--colors', type=str, nargs='+', required=False, default=[])
ap.add_argument('--fracs', type=float, nargs='+', required=False, default=[])
ap.add_argument('--accuracy', type=float, required=False)
args = vars(ap.parse_args())
args = Struct(**args)
fig = plt.figure(figsize=(30, 7.5))
ax1 = fig.add_subplot(131, projection='3d')
ax2 = fig.add_subplot(132, projection='3d')
ax3 = fig.add_subplot(133, projection='3d')
colors = ['k.-', 'r.:', 'm.:', 'b.:', 'g.:', 'c.:', 'y.:', 'k.:', 'r', 'b']
if len(args.colors):
colors = args.colors
def get_milestone_epoch(mile_list, milestone):
for idx, mile in enumerate(mile_list, 1):
if mile > milestone:
return idx
def calculate_num_euts(eut_schedule, mile):
return len([_ for _ in eut_schedule if _ <= mile])
milestones = {}
power = {}
delay = {}
cost = {}
c1, c2, c3 = 10**(-4), 10**(2), 0.5*10**(4)
for idx, history in enumerate(args.histories):
aux = history[:-4] + '_aux.pkl'
x_ax, y_ax, l_test, rounds, eps, eta_phi = pkl.load(
open('../ckpts/{}_{}/history/{}'.format(
args.dataset, args.num_nodes, history), 'rb'))
train_args, eut_schedule = pkl.load(
open('../ckpts/{}_{}/history/{}'.format(
args.dataset, args.num_nodes, aux), 'rb'))
nc = train_args.num_clusters[0]
nw = train_args.num_workers
e_glob, e_d2d = cfg.E_glob, cfg.E_glob*train_args.e_frac
d_glob, d_d2d = cfg.D_glob, cfg.D_glob*train_args.d_frac
alpha = 1600
miles = get_milestone_epoch(y_ax, args.accuracy)
tag = 'E_{}_D_{}'.format(train_args.e_frac, train_args.d_frac)
milestones[tag] = miles
rounds = sum(rounds[:miles])*train_args.num_clusters[0]
num_eut = calculate_num_euts(eut_schedule, miles)
cost[tag] = c1*(num_eut*nc*e_glob + nw*rounds*e_d2d) + \
c2*(num_eut*d_glob + rounds*d_d2d) + \
sum([
c3*(1-(eut_schedule[i-1]+alpha)/(
eut_schedule[i-1]+eut_schedule[i]+alpha)
) for i in range(1, len(eut_schedule))
])
power[tag] = (num_eut*nc*e_glob*d_glob) + (nw*rounds*e_d2d*d_d2d)
delay[tag] = (num_eut*d_glob) + (rounds*d_d2d)
for (idx, history), n in zip(enumerate(args.baselines),('central')):
x_ax, y_ax, l_test, rounds, eps, eta_phi, beta, mu = pkl.load(
open('../ckpts/{}_{}/history/{}'.format(
args.dataset, args.num_nodes, history), 'rb'))
miles = get_milestone_epoch(y_ax, args.accuracy)
milestones[n] = miles
# cost[n] = c1*(train_args.epochs*nw*e_glob) + c2*(train_args.epochs*d_glob)
power[n] = miles*nw*e_glob*d_glob
delay[n] = miles*d_glob
fracs = args.fracs
n = len(fracs)
power_mat = np.zeros((n, n))
delay_mat = np.zeros((n, n))
miles_mat = np.zeros((n, n))
costs_mat = np.zeros((n, n))
for i, ie in enumerate(fracs):
for j, jd in enumerate(fracs):
tag = 'E_{}_D_{}'.format(ie, jd)
power_mat[i,n-j-1] = power[tag]
delay_mat[i,n-j-1] = delay[tag]
miles_mat[i,n-j-1] = milestones[tag]
costs_mat[i,n-j-1] = cost[tag]
column_names = list(map(str, fracs[::-1]))
row_names = list(map(str, fracs))
r, c = len(fracs), len(fracs)
xpos = np.arange(0, r, 1)
ypos = np.arange(0, c, 1)
xpos, ypos = np.meshgrid(xpos+0.25, ypos+0.25)
x, y = np.meshgrid(np.arange(0, r+1, 1),
np.arange(0, c+1, 1))
xpos = xpos.flatten()
ypos = ypos.flatten()
zpos = np.zeros(r*c)
dx = 0.5 * np.ones_like(zpos)
dy = dx.copy()
dz = costs_mat.flatten()/(10**4)
flat = np.ones((r+1, c+1))*milestones['c']
cs = ['m', 'b', 'g', 'c'] * c
ax1.bar3d(xpos, ypos, zpos, dx, dy, dz, color=cs)
# ax1.plot_surface(x, y, flat, alpha=0.4, color='k')
ax1.w_xaxis.set_ticks([0.25, 1.25, 2.25, 3.25])
ax1.w_xaxis.set_ticklabels(column_names)
ax1.w_yaxis.set_ticks([0.25, 1.25, 2.25, 3.25])
ax1.w_yaxis.set_ticklabels(row_names)
ax1.set_xlabel('delay fraction', labelpad=25)
ax1.set_ylabel('energy fraction', labelpad=25)
ax1.set_zlabel('cumm. cost ($x 10^4$)', labelpad=10)
k=(10**6)
dz = power_mat.flatten()/k
flat = np.ones((r+1, c+1))*power['c']/k
ax2.bar3d(xpos, ypos, zpos, dx, dy, dz, color=cs)
# ax2.plot_surface(x, y, flat, alpha=0.6, color='k')
ax2.w_xaxis.set_ticks([0.25, 1.25, 2.25, 3.25])
ax2.w_xaxis.set_ticklabels(column_names)
ax2.w_yaxis.set_ticks([0.25, 1.25, 2.25, 3.25])
ax2.w_yaxis.set_ticklabels(row_names)
ax2.set_xlabel('delay fraction', labelpad=25)
ax2.set_ylabel('energy fraction', labelpad=25)
ax2.set_zlabel('cumm. power ($x 10^6$ J)', labelpad=10)
k=100
dz = delay_mat.flatten()/k
flat = np.ones((r+1,c+1))*delay['c']/k
ax3.bar3d(xpos, ypos, zpos, dx, dy, dz, color=cs)
# ax3.plot_surface(x, y, flat, alpha=0.6, color='k')
ax3.w_xaxis.set_ticks([0.25, 1.25, 2.25, 3.25])
ax3.w_xaxis.set_ticklabels(column_names)
ax3.w_yaxis.set_ticks([0.25, 1.25, 2.25, 3.25])
ax3.w_yaxis.set_ticklabels(row_names)
ax3.set_xlabel('delay fraction', labelpad=25)
ax3.set_ylabel('energy fraction', labelpad=25)
ax3.set_zlabel('cumm. delay ($10^2$ s)', labelpad=10)
ax1.set_title('(a)', y=-0.2)
ax2.set_title('(b)', y=-0.2)
ax3.set_title('(c)', y=-0.2)
args.name = args.name.format(args.accuracy)
print('Saving: ', args.name)
fig.subplots_adjust(wspace=0.025)
plt.savefig('../ckpts/{}_{}/plots/{}'.format(
args.dataset, args.num_nodes, args.name),
bbox_inches='tight', pad_inches=0.5, dpi=args.dpi)
| 36.72093 | 80 | 0.651203 | 1,070 | 6,316 | 3.683178 | 0.193458 | 0.015225 | 0.039584 | 0.025882 | 0.446841 | 0.354986 | 0.25628 | 0.20071 | 0.153768 | 0.130931 | 0 | 0.047584 | 0.15152 | 6,316 | 171 | 81 | 36.935673 | 0.687815 | 0.03594 | 0 | 0.047619 | 0 | 0 | 0.082676 | 0.016108 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013605 | false | 0 | 0.054422 | 0.006803 | 0.081633 | 0.006803 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cff7f0dca5d382c69b5e9605f440ff2bd5bd0d15 | 638 | py | Python | code/ex3.2-aio_multiple_requests.py | MA3STR0/PythonAsyncWorkshop | e7ecbcf602be4e858b6b7415335da5fa35018605 | [
"MIT"
] | 2 | 2015-11-26T15:33:28.000Z | 2015-11-29T23:28:34.000Z | code/ex3.2-aio_multiple_requests.py | MA3STR0/PythonAsyncWorkshop | e7ecbcf602be4e858b6b7415335da5fa35018605 | [
"MIT"
] | null | null | null | code/ex3.2-aio_multiple_requests.py | MA3STR0/PythonAsyncWorkshop | e7ecbcf602be4e858b6b7415335da5fa35018605 | [
"MIT"
] | 3 | 2017-07-25T08:02:15.000Z | 2020-10-26T10:06:15.000Z | import asyncio
import aiohttp
import time
URLS = [
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
'http://127.0.0.1:8000',
]
@asyncio.coroutine
def request_greetings():
response_tasks = yield from asyncio.wait([aiohttp.get(url) for url in URLS])
text_tasks = yield from asyncio.wait(
[task.result().text() for task in response_tasks[0]]
)
texts = [task.result() for task in text_tasks[0]]
return '\n'.join(texts)
loop = asyncio.get_event_loop()
t1 = time.time()
greetings = loop.run_until_complete(request_greetings())
print(time.time() - t1, 'seconds passed')
print(greetings)
loop.close()
| 23.62963 | 80 | 0.673981 | 96 | 638 | 4.375 | 0.416667 | 0.05 | 0.057143 | 0.064286 | 0.219048 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0 | 0.06379 | 0.164577 | 638 | 26 | 81 | 24.538462 | 0.724203 | 0 | 0 | 0.136364 | 0 | 0 | 0.123824 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0.045455 | 0.136364 | 0 | 0.227273 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cff94841fb558efbf47ac2efacf5957f299d35b0 | 394 | py | Python | utils.py | sidchilling/zerodha_TA | b1c67cfdc16e8ef569f13c8529621322d0eadb27 | [
"Apache-2.0"
] | 13 | 2018-04-12T06:03:44.000Z | 2021-05-22T22:42:53.000Z | utils.py | sidchilling/zerodha_TA | b1c67cfdc16e8ef569f13c8529621322d0eadb27 | [
"Apache-2.0"
] | null | null | null | utils.py | sidchilling/zerodha_TA | b1c67cfdc16e8ef569f13c8529621322d0eadb27 | [
"Apache-2.0"
] | 15 | 2018-12-28T21:34:46.000Z | 2022-01-16T14:54:05.000Z | from mongoengine import *
from models import *
def get_symbol_data(symbol):
db_client = connect(db = 'stocks_db')
data = []
for sp in StockPrice.objects(symbol = symbol).order_by('date'):
data.append({
'date': sp.date,
'open': sp.open,
'high': sp.high,
'low': sp.low,
'close': sp.close,
'volume': sp.volume
})
db_client.close()
return data | 23.176471 | 65 | 0.604061 | 53 | 394 | 4.377358 | 0.509434 | 0.068966 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.241117 | 394 | 17 | 66 | 23.176471 | 0.77592 | 0 | 0 | 0 | 0 | 0 | 0.098734 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.125 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cff9ae37ed184a7106978b35ce97b96f7c324bca | 1,434 | py | Python | sa/profiles/HP/GbE2/get_mac_address_table.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 84 | 2017-10-22T11:01:39.000Z | 2022-02-27T03:43:48.000Z | sa/profiles/HP/GbE2/get_mac_address_table.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 22 | 2017-12-11T07:21:56.000Z | 2021-09-23T02:53:50.000Z | sa/profiles/HP/GbE2/get_mac_address_table.py | prorevizor/noc | 37e44b8afc64318b10699c06a1138eee9e7d6a4e | [
"BSD-3-Clause"
] | 23 | 2017-12-06T06:59:52.000Z | 2022-02-24T00:02:25.000Z | # ---------------------------------------------------------------------
# HP.GbE2.get_mac_address_table
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetmacaddresstable import IGetMACAddressTable
from noc.core.text import parse_table
class Script(BaseScript):
name = "HP.GbE2.get_mac_address_table"
interface = IGetMACAddressTable
def execute(self, interface=None, vlan=None, mac=None):
cmd = "/info/l2/fdb"
if vlan:
cmd += "/vlan %d" % vlan
svlan = str(vlan)
elif mac:
cmd += "/find %s" % mac
elif interface:
cmd += "/port %s" % interface
else:
cmd += "/dump"
r = []
for m, v, port, trk, state in parse_table(self.cli(cmd)):
if not m:
continue
if (not mac or m.upper() == mac) and (not vlan or v == svlan):
p = trk if trk else port
if interface and interface != p:
continue
if v == "4095": # Built-in vlans on port 19
continue
r += [{"vlan_id": v, "mac": m, "interfaces": [p], "type": "D"}]
return r
| 34.97561 | 79 | 0.456764 | 153 | 1,434 | 4.222222 | 0.470588 | 0.032508 | 0.027864 | 0.037152 | 0.074303 | 0.074303 | 0 | 0 | 0 | 0 | 0 | 0.016983 | 0.301953 | 1,434 | 40 | 80 | 35.85 | 0.628372 | 0.237796 | 0 | 0.103448 | 0 | 0 | 0.091328 | 0.026753 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.103448 | 0 | 0.275862 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cffa548efb720426a20010047da63946608d1f48 | 1,628 | py | Python | src/api_trt/prepare_models.py | a7ypically/InsightFace-REST | 8436a9308ba335102ae059f57a4fc83c5e7098b5 | [
"Apache-2.0"
] | null | null | null | src/api_trt/prepare_models.py | a7ypically/InsightFace-REST | 8436a9308ba335102ae059f57a4fc83c5e7098b5 | [
"Apache-2.0"
] | null | null | null | src/api_trt/prepare_models.py | a7ypically/InsightFace-REST | 8436a9308ba335102ae059f57a4fc83c5e7098b5 | [
"Apache-2.0"
] | null | null | null | import os
import logging
from modules.utils.helpers import parse_size, tobool, validate_max_size
from modules.model_zoo.getter import prepare_backend
from modules.configs import Configs
from env_parser import EnvConfigs
log_level = os.getenv('LOG_LEVEL', 'INFO')
logging.basicConfig(
level=log_level,
format='%(asctime)s %(levelname)s - %(message)s',
datefmt='[%H:%M:%S]',
)
def prepare_models(root_dir: str = '/models'):
model_configs = Configs(models_dir=root_dir)
env_configs = EnvConfigs()
rec_name = env_configs.models.rec_name
det_name = env_configs.models.det_name
ga_name = env_configs.models.ga_name
mask_detector = env_configs.models.mask_detector
max_size = env_configs.defaults.max_size
if max_size is None:
max_size = [640, 640]
max_size = validate_max_size(max_size)
models = [model for model in [det_name, rec_name, ga_name, mask_detector] if model is not None]
for model in models:
batch_size = 1
if model_configs.models[model].get('allow_batching'):
if model == det_name:
batch_size = env_configs.models.det_batch_size
else:
batch_size = env_configs.models.rec_batch_size
logging.info(f"Preparing '{model}' model...")
prepare_backend(model_name=model, backend_name=env_configs.models.backend_name, im_size=max_size,
force_fp16=env_configs.models.fp16,
max_batch_size=batch_size, config=model_configs)
logging.info(f"'{model}' model ready!")
if __name__ == "__main__":
prepare_models()
| 30.716981 | 105 | 0.687961 | 225 | 1,628 | 4.648889 | 0.297778 | 0.124283 | 0.122371 | 0.076482 | 0.047801 | 0 | 0 | 0 | 0 | 0 | 0 | 0.0086 | 0.214373 | 1,628 | 52 | 106 | 31.307692 | 0.809226 | 0 | 0 | 0 | 0 | 0 | 0.086609 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.157895 | 0 | 0.184211 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cffa6ded7c7c5d1fba7de3158859d833e3843ca3 | 8,202 | py | Python | TUI/ScriptMenu.py | r-owen/TUI | 8f130368254161a2748167b7c8260cc24170c28c | [
"BSD-3-Clause"
] | 1 | 2015-04-29T20:28:20.000Z | 2015-04-29T20:28:20.000Z | TUI/ScriptMenu.py | ApachePointObservatory/TUI | 8f130368254161a2748167b7c8260cc24170c28c | [
"BSD-3-Clause"
] | 1 | 2017-06-05T22:53:58.000Z | 2017-06-05T22:53:58.000Z | TUI/ScriptMenu.py | r-owen/TUI | 8f130368254161a2748167b7c8260cc24170c28c | [
"BSD-3-Clause"
] | 1 | 2020-01-28T06:28:02.000Z | 2020-01-28T06:28:02.000Z | #!/usr/bin/env python
"""Creates the Script menu.
To Do:
- add html help; note that this will have to be fed to ScriptWdg,
RO.ScriptWdg has no idea of TUI help
History:
2004-07-19 ROwen
2004-08-11 ROwen Modified for updated RO.Wdg.Toplevel.
2004-08-23 ROwen Added some diagnostic print statements (commented out).
2004-10-11 ROwen Modified to reject files whose names begin with ".".
2004-10-28 ROwen Bug fix: Open... was broken.
2005-09-22 ROwen Fix PR 272: standard scripts not available on Mac;
this was broken by the packaging overhaul for TUI 1.0.1.
Fix PR 132: Script menu may not load at first on MacOS X;
this was fixed via a hideous hack.
Modified to check/rebuild the entire menu when the root
menu is shown, instead of using lazy check/rebuild;
this simplified the hack for PR 132.
Modified to prebuild the menu at startup.
Modified test code to show a standard pull-down menu.
2011-06-16 ROwen Ditched obsolete "except (SystemExit, KeyboardInterrupt): raise" code
2012-07-18 ROwen Removed use of update_idletasks and an ugly Mac workaround that is no longer required.
2014-02-12 ROwen Moved some code to TUI.Base.ScriptLoader so other users could get to it more easily.
2015-03-18 ROwen Removed _RootNode.isAqua because it was not being used.
"""
import os
import Tkinter
import tkFileDialog
import RO.Alg
from TUI.Base.ScriptLoader import getScriptDirs, ScriptLoader
__all__ = ["getScriptMenu"]
def getScriptMenu(master):
scriptDirs = getScriptDirs()
rootNode = _RootNode(master=master, label="", pathList=scriptDirs)
rootNode.checkMenu(recurse=True)
return rootNode.menu
class _MenuNode:
"""Menu and related information about sub-menu of the Scripts menu
Each node represents one level of hiearchy in the various scripts directories.
The contents of a given subdir are dynamically tested, but the existence
of a particular subdirectory is not. This sounds like a mistake to me;
if a given subdir exists in any scripts dir, it should be checked every time
in all scripts dirs.
"""
def __init__(self, parentNode, label, pathList):
"""Construct a _MenuNode
Inputs:
- parentNode: parent menu node
- label: label of this sub-menu
- pathList: list of paths to this subdirectory in the script hierarchy
(one entry for each of the following, but only if the subdir exists:
built-in scripts dir, local TUIAddtions/Scripts and shared TUIAdditions/Scripts)
"""
# print "_MenuNode(%r, %r, %r)" % (parentNode, label, pathList)
self.parentNode = parentNode
self.label = label
self.pathList = pathList
self.itemDict = {}
self.subDict = RO.Alg.ListDict()
self.subNodeList = []
self._setMenu()
def _setMenu(self):
self.menu = Tkinter.Menu(
self.parentNode.menu,
tearoff = False,
# postcommand = self.checkMenu,
)
self.parentNode.menu.add_cascade(
label = self.label,
menu = self.menu,
)
def checkMenu(self, recurse=True):
"""Check contents of menu and rebuild if anything has changed.
Return True if anything rebuilt.
"""
# print "%s checkMenu" % (self,)
newItemDict = {}
newSubDict = RO.Alg.ListDict()
didRebuild = False
for path in self.pathList:
for baseName in os.listdir(path):
# reject files that would be invisible on unix
if baseName.startswith("."):
continue
baseBody, baseExt = os.path.splitext(baseName)
fullPath = os.path.normpath(os.path.join(path, baseName))
if os.path.isfile(fullPath) and baseExt.lower() == ".py":
# print "checkMenu newItem[%r] = %r" % (baseBody, fullPath)
newItemDict[baseBody] = fullPath
elif os.path.isdir(fullPath) and baseExt.lower() != ".py":
# print "checkMenu newSubDir[%r] = %r" % (baseBody, fullPath)
newSubDict[baseName] = fullPath
# else:
# print "checkMenu ignoring %r = %r" % (baseName, fullPath)
if (self.itemDict != newItemDict) or (self.subDict != newSubDict):
didRebuild = True
# rebuild contents
# print "checkMenu rebuild contents"
self.itemDict = newItemDict
self.subDict = newSubDict
self.menu.delete(0, "end")
self.subNodeList = []
self._fillMenu()
# else:
# print "checkMenu do not rebuild contents"
if recurse:
for subNode in self.subNodeList:
subRebuilt = subNode.checkMenu(recurse=True)
didRebuild = didRebuild or subRebuilt
return didRebuild
def _fillMenu(self):
"""Fill the menu.
"""
# print "%s _fillMenu"
itemKeys = self.itemDict.keys()
itemKeys.sort()
# print "%s found items: %s" % (self, itemKeys)
for label in itemKeys:
subPathList = list(self.getLabels()) + [label]
fullPath = self.itemDict[label]
# print "adding script %r: %r" % (label, fullPath)
self.menu.add_command(
label = label,
command = ScriptLoader(subPathList=subPathList, fullPath=fullPath),
)
subdirList = self.subDict.keys()
subdirList.sort()
# print "%s found subdirs: %s" % (self, subdirList)
for subdir in subdirList:
pathList = self.subDict[subdir]
# print "adding submenu %r: %r" % (subdir, pathList)
self.subNodeList.append(_MenuNode(self, subdir, pathList))
def getLabels(self):
"""Return a list of labels all the way up to, but not including, the root node.
"""
retVal = self.parentNode.getLabels()
retVal.append(self.label)
return retVal
def __str__(self):
return "%s %s" % (self.__class__.__name__, ":".join(self.getLabels()))
class _RootNode(_MenuNode):
"""The main scripts menu and related information
"""
def __init__(self, master, label, pathList):
"""Construct the _RootNode
Inputs:
- parentNode: parent menu node
- label: label of this sub-menu
- pathList: list of paths to scripts, as returned by TUI.Base.ScriptLoader.getScriptDirs()
"""
self.master = master
_MenuNode.__init__(self, None, label, pathList)
def _setMenu(self):
self.menu = Tkinter.Menu(
self.master,
tearoff = False,
postcommand = self.checkMenu,
)
def _fillMenu(self):
"""Fill the menu.
"""
self.menu.add_command(label="Open...", command=self.doOpen)
_MenuNode._fillMenu(self)
def doOpen(self):
"""Handle Open... menu item.
"""
initialDir = os.path.expanduser("~")
if initialDir == "~":
initialDir = None
fullPath = tkFileDialog.askopenfilename(
master = self.master,
initialdir = initialDir,
title="TUI Script",
filetypes = [("Python", "*.py")],
)
if not fullPath:
return
pathList = os.path.split(fullPath)
ScriptLoader(subPathList=pathList, fullPath=fullPath)()
def getLabels(self):
"""Return a list of labels all the way up to, but not including, the root node.
"""
return []
if __name__ == "__main__":
import RO.Wdg
root = Tkinter.Tk()
menuBar = Tkinter.Menu(root)
root["menu"] = menuBar
scriptMenu = getScriptMenu(menuBar)
menuBar.add_cascade(label="Scripts", menu=scriptMenu)
root.mainloop()
| 35.353448 | 106 | 0.588881 | 920 | 8,202 | 5.184783 | 0.331522 | 0.002935 | 0.01195 | 0.010482 | 0.12956 | 0.104822 | 0.09392 | 0.077568 | 0.062055 | 0.062055 | 0 | 0.016679 | 0.320166 | 8,202 | 231 | 107 | 35.506494 | 0.838773 | 0.43465 | 0 | 0.106195 | 0 | 0 | 0.017409 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106195 | false | 0 | 0.053097 | 0.00885 | 0.230089 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cffb6437c4e4737ed0d16d417d528aae9ad7f455 | 3,121 | py | Python | tests/views/test_plot_configuration_dialog.py | lsst-sitcom/spot_motion_monitor | 3d0242276198126240667ba13e95b7bdf901d053 | [
"BSD-3-Clause"
] | null | null | null | tests/views/test_plot_configuration_dialog.py | lsst-sitcom/spot_motion_monitor | 3d0242276198126240667ba13e95b7bdf901d053 | [
"BSD-3-Clause"
] | 5 | 2020-01-08T23:50:22.000Z | 2020-02-14T18:15:20.000Z | tests/views/test_plot_configuration_dialog.py | lsst-com/spot_motion_monitor | 3d0242276198126240667ba13e95b7bdf901d053 | [
"MIT"
] | null | null | null | # This file is part of spot_motion_monitor.
#
# Developed for LSST System Integration, Test and Commissioning.
#
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
from PyQt5.QtWidgets import QDialogButtonBox
from spot_motion_monitor.utils import AutoscaleState
from spot_motion_monitor.views import PlotConfigurationDialog
class TestPlotConfigurationDialog:
def test_parametersAfterConstruction(self, qtbot):
pcDialog = PlotConfigurationDialog()
qtbot.addWidget(pcDialog)
pcDialog.show()
assert pcDialog.tabWidget.count() == 2
def test_setPlotConfiguration(self, qtbot, mocker):
pcDialog = PlotConfigurationDialog()
mockCentroidTabSetConfig = mocker.patch.object(pcDialog.centroidPlotConfigTab, 'setConfiguration')
mockPsdTabSetConfig = mocker.patch.object(pcDialog.psdPlotConfigTab, 'setConfiguration')
qtbot.addWidget(pcDialog)
pcDialog.show()
centroidConfig = {'xCentroid': {'autoscale': AutoscaleState.OFF.name, 'pixelAddition': None,
'minimum': 10, 'maximum': 1000},
'yCentroid': {'autoscale': AutoscaleState.ON.name, 'pixelAddition': None,
'minimum': None, 'maximum': None},
'scatterPlot': {'numHistogramBins': 50}}
psdConfig = {'waterfall': {'numBins': 15, 'colorMap': None},
'xPSD': {'autoscale': True},
'yPSD': {'autoscale': False, 'maximum': 1320.0}}
pcDialog.setPlotConfiguration(centroidConfig, psdConfig)
assert mockCentroidTabSetConfig.call_count == 1
assert mockPsdTabSetConfig.call_count == 1
def test_getPlotConfiguration(self, qtbot, mocker):
pcDialog = PlotConfigurationDialog()
mockCentroidTabGetConfig = mocker.patch.object(pcDialog.centroidPlotConfigTab, 'getConfiguration')
mockPsdTabGetConfig = mocker.patch.object(pcDialog.psdPlotConfigTab, 'getConfiguration')
qtbot.addWidget(pcDialog)
pcDialog.show()
centroidConfig, psdConfig = pcDialog.getPlotConfiguration()
assert mockCentroidTabGetConfig.call_count == 1
assert mockPsdTabGetConfig.call_count == 1
assert centroidConfig is not None
assert psdConfig is not None
def test_validInputFromTabs(self, qtbot):
pcDialog = PlotConfigurationDialog()
qtbot.addWidget(pcDialog)
pcDialog.show()
pcDialog.centroidPlotConfigTab.pixelAdditionXLineEdit.setText(str(-1))
assert pcDialog.buttonBox.button(QDialogButtonBox.Ok).isEnabled() is False
pcDialog.centroidPlotConfigTab.pixelAdditionXLineEdit.setText(str(10))
assert pcDialog.buttonBox.button(QDialogButtonBox.Ok).isEnabled()
pcDialog.psdPlotConfigTab.waterfallNumBinsLineEdit.setText(str(0))
assert pcDialog.buttonBox.button(QDialogButtonBox.Ok).isEnabled() is False
| 45.231884 | 106 | 0.694008 | 281 | 3,121 | 7.658363 | 0.409253 | 0.013011 | 0.040892 | 0.055762 | 0.378253 | 0.197955 | 0.153346 | 0.127323 | 0.127323 | 0 | 0 | 0.010634 | 0.216597 | 3,121 | 68 | 107 | 45.897059 | 0.86953 | 0.099648 | 0 | 0.297872 | 0 | 0 | 0.085 | 0 | 0 | 0 | 0 | 0 | 0.212766 | 1 | 0.085106 | false | 0 | 0.06383 | 0 | 0.170213 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cffcff915342cd1b241c4888358b81014d40b492 | 1,586 | py | Python | spider/pipelines.py | TeamDDH/bbk-server | 3fdd201e8b7854759b6f5113790d90adb9879b88 | [
"MIT"
] | 3 | 2018-08-20T04:57:57.000Z | 2021-11-01T01:27:34.000Z | spider/pipelines.py | TeamDDH/bbk-server | 3fdd201e8b7854759b6f5113790d90adb9879b88 | [
"MIT"
] | null | null | null | spider/pipelines.py | TeamDDH/bbk-server | 3fdd201e8b7854759b6f5113790d90adb9879b88 | [
"MIT"
] | 2 | 2019-06-18T09:00:46.000Z | 2020-04-09T20:32:45.000Z | # -*- coding: utf-8 -*-
"""
pipelines
~~~~~~~~~
:copyright: (c) 2017-18 by Wendell Hu.
:license: MIT, see LICENSE for more details.
"""
from scrapy.exceptions import DropItem
from .db import spider_session_generator, RawArticle
class ArticlePipeline(object):
"""Persist article items into database."""
def __init__(self):
self.spider_session_generator = spider_session_generator
def process_item(self, item, spider):
if item.get('title', None) is None:
raise DropItem('Article doesn\'t have a title.')
else:
title = item.get('title')[0]
if title:
title = title.strip()
uri = item.get('uri')[0]
content = item.get('content')[0]
if content:
content = content.strip()
source = item.get('source')[0]
crawled_at = item.get('crawled_at')[0]
# published_at = item.get('published_at')[0]
# editor = item.get('editor')[0]
# published_time = item.get('published_time')[0]
if title is None or title == '' or content is None or content == '':
raise DropItem('Article doesn\'t have valid information.')
session = self.spider_session_generator()
session.add(RawArticle(title=title, uri=uri, source=source,
crawled_at=crawled_at, content=content))
session.commit()
session.close()
#: return the item for any other after-processing
return item
| 31.72 | 80 | 0.566204 | 180 | 1,586 | 4.872222 | 0.405556 | 0.071836 | 0.100342 | 0.059293 | 0.068415 | 0.068415 | 0 | 0 | 0 | 0 | 0 | 0.013761 | 0.312736 | 1,586 | 49 | 81 | 32.367347 | 0.790826 | 0.209962 | 0 | 0 | 0 | 0 | 0.052459 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.076923 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cffe63c3675a8ef582e92a5d2831d5faa5ba38af | 1,268 | py | Python | commonmark/main.py | phst/commonmark.py | d031003aa23cfce1787cfb29c1eb109b369ca5b7 | [
"BSD-3-Clause"
] | 154 | 2015-12-10T23:17:28.000Z | 2019-04-04T06:49:36.000Z | commonmark/main.py | phst/commonmark.py | d031003aa23cfce1787cfb29c1eb109b369ca5b7 | [
"BSD-3-Clause"
] | 131 | 2019-07-02T15:56:33.000Z | 2022-03-25T19:54:02.000Z | commonmark/main.py | phst/commonmark.py | d031003aa23cfce1787cfb29c1eb109b369ca5b7 | [
"BSD-3-Clause"
] | 53 | 2015-12-08T18:06:51.000Z | 2019-05-02T18:08:10.000Z | # 2014 - Bibek Kafle & Roland Shoemaker
# 2015-2017 - Nikolas Nyby
# Port of @jgm's commonmark.js implementation of the CommonMark spec.
# Basic usage:
#
# import commonmark
# parser = commonmark.Parser()
# renderer = commonmark.HtmlRenderer()
# print(renderer.render(parser.parse('Hello *world*')))
from __future__ import absolute_import, unicode_literals
from commonmark.blocks import Parser
from commonmark.dump import dumpAST, dumpJSON
from commonmark.render.html import HtmlRenderer
from commonmark.render.rst import ReStructuredTextRenderer
def commonmark(text, format="html"):
"""Render CommonMark into HTML, JSON or AST
Optional keyword arguments:
format: 'html' (default), 'json' or 'ast'
>>> commonmark("*hello!*")
'<p><em>hello</em></p>\\n'
"""
parser = Parser()
ast = parser.parse(text)
if format not in ["html", "json", "ast", "rst"]:
raise ValueError("format must be 'html', 'json' or 'ast'")
if format == "html":
renderer = HtmlRenderer()
return renderer.render(ast)
if format == "json":
return dumpJSON(ast)
if format == "ast":
return dumpAST(ast)
if format == "rst":
renderer = ReStructuredTextRenderer()
return renderer.render(ast)
| 30.190476 | 69 | 0.669558 | 149 | 1,268 | 5.657718 | 0.422819 | 0.04745 | 0.052195 | 0.030842 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011858 | 0.201893 | 1,268 | 41 | 70 | 30.926829 | 0.821146 | 0.355678 | 0 | 0.1 | 0 | 0 | 0.089629 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
32018d05e0f153d9cb272bc8a457d7fb3169bb74 | 37,195 | py | Python | ktapp/views/web/user_profile.py | cu2/KT | 8a0964b77dce150358637faa679d969a07e42f07 | [
"CC-BY-3.0"
] | 5 | 2015-04-13T09:44:31.000Z | 2017-10-19T01:07:58.000Z | ktapp/views/web/user_profile.py | cu2/KT | 8a0964b77dce150358637faa679d969a07e42f07 | [
"CC-BY-3.0"
] | 49 | 2015-02-15T07:12:05.000Z | 2022-03-11T23:11:43.000Z | ktapp/views/web/user_profile.py | cu2/KT | 8a0964b77dce150358637faa679d969a07e42f07 | [
"CC-BY-3.0"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
import math
from django.db import connection
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.db.models import Q, Max
from django.conf import settings
from ktapp import models
from ktapp import utils as kt_utils
from ktapp.helpers import filmlist
from ktapp import texts
from ktapp import sqls as kt_sqls
COMMENTS_PER_PAGE = 100
MESSAGES_PER_PAGE = 50
FILMS_PER_PAGE = 100
MINIMUM_YEAR = 1920
USER_PROFILE_TAB_WIDTH = {
True: '11', # 1/9
False: '12.3', # 1/8
}
def _get_user_profile_numbers(request, selected_user):
if request.user.is_authenticated() and request.user.id != selected_user.id:
number_of_messages = models.MessageCountCache.get_count(owned_by=request.user, partner=selected_user)
else:
number_of_messages = 0
return (
selected_user.number_of_ratings,
selected_user.number_of_comments,
selected_user.number_of_wishes_yes + selected_user.number_of_wishes_no + selected_user.number_of_wishes_get,
selected_user.number_of_toplists,
number_of_messages,
selected_user.number_of_reviews + selected_user.number_of_bios + selected_user.number_of_links,
)
def user_profile(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
number_of_vapiti_votes = selected_user.vote_set.filter(film__vapiti_year=settings.VAPITI_YEAR).count()
latest_votes = [int(v) for v in selected_user.latest_votes.split(',') if v != ''][:10]
latest_comments = [int(c) for c in selected_user.latest_comments.split(',') if c != ''][:10]
# profile
profile = {
'major_genres': [],
'minor_genres': [],
'major_countries': [],
'minor_countries': [],
'major_years': [],
'minor_years': [],
}
for keyword in models.Keyword.objects.raw('''
SELECT k.*, ups.score AS ups_score
FROM ktapp_userprofilesegment ups
INNER JOIN ktapp_profilesegment ps ON ps.id = ups.segment_id AND ps.dimension = 'genre'
LEFT JOIN ktapp_keyword k ON k.id = ps.segment
WHERE ups.user_id = {user_id} AND ups.score >= 50
ORDER BY ups.score DESC;
'''.format(user_id=selected_user.id)):
if keyword.ups_score >= 100:
profile['major_genres'].append(keyword)
else:
profile['minor_genres'].append(keyword)
for keyword in models.Keyword.objects.raw('''
SELECT k.*, ups.score AS ups_score
FROM ktapp_userprofilesegment ups
INNER JOIN ktapp_profilesegment ps ON ps.id = ups.segment_id AND ps.dimension = 'country'
LEFT JOIN ktapp_keyword k ON k.id = ps.segment
WHERE ups.user_id = {user_id} AND ups.score >= 100
ORDER BY ups.score DESC;
'''.format(user_id=selected_user.id)):
if keyword.ups_score >= 200:
profile['major_countries'].append(keyword)
else:
profile['minor_countries'].append(keyword)
for year in models.UserProfileSegment.objects.raw('''
SELECT ups.*, ps.segment as ps_segment
FROM ktapp_userprofilesegment ups
INNER JOIN ktapp_profilesegment ps ON ps.id = ups.segment_id AND ps.dimension = 'year'
LEFT JOIN ktapp_keyword k ON k.id = ps.segment
WHERE ups.user_id = {user_id} AND ups.score >= 50
ORDER BY ups.score DESC;
'''.format(user_id=selected_user.id)):
year_str = texts.LONG_YEARS[int(year.ps_segment)]
if year.score >= 100:
profile['major_years'].append(year_str)
else:
profile['minor_years'].append(year_str)
similarity = None
similarity_per_genre = []
if request.user.is_authenticated():
cursor = connection.cursor()
cursor.execute(kt_sqls.SIMILARITY, (request.user.id, selected_user.id))
row = cursor.fetchone()
if row:
similarity = row
cursor.execute(kt_sqls.SIMILARITY_PER_GENRE, (request.user.id, selected_user.id))
for row in cursor.fetchall():
similarity_per_genre.append(row)
ignore_pm, ignore_comment = False, False
if request.user.is_authenticated():
ignore_pm, ignore_comment = models.IgnoreUser.get(who=request.user, whom=selected_user)
return render(request, 'ktapp/user_profile_subpages/user_profile.html', {
'active_tab': 'profile',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'number_of_vapiti_votes': number_of_vapiti_votes,
'vapiti_weight': number_of_votes + 25 * number_of_vapiti_votes,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'latest_votes': selected_user.vote_set.filter(id__in=latest_votes).select_related('film').order_by('-when', '-id'),
'latest_comments': models.Comment.objects.filter(id__in=latest_comments).select_related('film', 'topic', 'poll', 'created_by', 'reply_to', 'reply_to__created_by'),
'myfav': models.Follow.objects.filter(who=request.user, whom=selected_user).count() if request.user.is_authenticated() else 0,
'fav_count': models.Follow.objects.filter(whom=selected_user).count(),
'ignore_pm': ignore_pm,
'ignore_comment': ignore_comment,
'profile': profile,
'fav_directors': list(models.Artist.objects.raw('''
SELECT a.*
FROM ktapp_artist a
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = a.id
WHERE uf.user_id = %s AND uf.domain = %s
ORDER BY a.name, a.id
''', [selected_user.id, models.UserFavourite.DOMAIN_DIRECTOR])),
'fav_actors': list(models.Artist.objects.raw('''
SELECT a.*
FROM ktapp_artist a
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = a.id
WHERE uf.user_id = %s AND uf.domain = %s
ORDER BY a.name, a.id
''', [selected_user.id, models.UserFavourite.DOMAIN_ACTOR])),
'fav_genres': list(models.Keyword.objects.raw('''
SELECT k.*
FROM ktapp_keyword k
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = k.id
WHERE uf.user_id = %s AND uf.domain = %s AND k.keyword_type = %s
ORDER BY k.name, k.id
''', [selected_user.id, models.UserFavourite.DOMAIN_GENRE, models.Keyword.KEYWORD_TYPE_GENRE])),
'fav_countries': list(models.Keyword.objects.raw('''
SELECT k.*
FROM ktapp_keyword k
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = k.id
WHERE uf.user_id = %s AND uf.domain = %s AND k.keyword_type = %s
ORDER BY k.name, k.id
''', [selected_user.id, models.UserFavourite.DOMAIN_COUNTRY, models.Keyword.KEYWORD_TYPE_COUNTRY])),
'similarity': similarity,
'similarity_per_genre': similarity_per_genre,
'permission_ban_user': kt_utils.check_permission('ban_user', request.user),
'permission_see_core': kt_utils.check_permission('see_core', request.user),
'permission_set_game_master': kt_utils.check_permission('set_game_master', request.user),
'list_of_bans': [
(
ban.created_at,
texts.BAN_TYPES.get(ban.action),
ban.created_by,
)
for ban in models.Change.objects.filter(
action__in=['ban', 'unban', 'warning', 'temp_ban_1d', 'temp_ban_3d', 'temp_ban_7d'],
object='user:%d' % selected_user.id,
).order_by('-created_at')
],
})
def user_taste(request, id, name_slug, domain):
def dictfetchall(cursor):
columns = [col[0] for col in cursor.description]
return [dict(zip(columns, row)) for row in cursor.fetchall()]
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
cursor = connection.cursor()
if domain == 'rendezok':
active_subtab = 'directors'
cursor.execute('''
SELECT
a.id, a.slug_cache, a.name,
AVG(v.rating) AS average_rating,
ROUND(10.0 * AVG(v.rating)) AS average_rating_sort_value,
COUNT(1) AS number_of_ratings,
ROUND(100.0 * COUNT(1) / a.number_of_films_as_director) AS film_ratio,
SUM(v.rating = 1) AS number_of_ratings_1,
SUM(v.rating = 2) AS number_of_ratings_2,
SUM(v.rating = 3) AS number_of_ratings_3,
SUM(v.rating = 4) AS number_of_ratings_4,
SUM(v.rating = 5) AS number_of_ratings_5
FROM ktapp_artist a
INNER JOIN ktapp_filmartistrelationship fa ON fa.artist_id = a.id AND fa.role_type = 'D'
INNER JOIN ktapp_vote v ON v.film_id = fa.film_id AND v.user_id = %s
GROUP BY a.id
HAVING COUNT(1) >= 5 OR (2*COUNT(1)>=MIN(a.number_of_films_as_director) AND COUNT(1)>=3)
ORDER BY average_rating DESC, number_of_ratings DESC, name, id
''', [selected_user.id])
elif domain == 'mufajok':
active_subtab = 'genres'
cursor.execute('''
SELECT
k.id, k.slug_cache, k.name,
AVG(v.rating) AS average_rating,
ROUND(10.0 * AVG(v.rating)) AS average_rating_sort_value,
COUNT(1) AS number_of_ratings,
SUM(v.rating = 1) AS number_of_ratings_1,
SUM(v.rating = 2) AS number_of_ratings_2,
SUM(v.rating = 3) AS number_of_ratings_3,
SUM(v.rating = 4) AS number_of_ratings_4,
SUM(v.rating = 5) AS number_of_ratings_5
FROM ktapp_keyword k
INNER JOIN ktapp_filmkeywordrelationship fk ON fk.keyword_id = k.id
INNER JOIN ktapp_vote v ON v.film_id = fk.film_id AND v.user_id = %s
WHERE k.keyword_type = 'G'
GROUP BY k.id
HAVING COUNT(1) >= 5
ORDER BY average_rating DESC, number_of_ratings DESC, name, id
''', [selected_user.id])
elif domain == 'orszagok':
active_subtab = 'countries'
cursor.execute('''
SELECT
k.id, k.slug_cache, k.name,
AVG(v.rating) AS average_rating,
ROUND(10.0 * AVG(v.rating)) AS average_rating_sort_value,
COUNT(1) AS number_of_ratings,
SUM(v.rating = 1) AS number_of_ratings_1,
SUM(v.rating = 2) AS number_of_ratings_2,
SUM(v.rating = 3) AS number_of_ratings_3,
SUM(v.rating = 4) AS number_of_ratings_4,
SUM(v.rating = 5) AS number_of_ratings_5
FROM ktapp_keyword k
INNER JOIN ktapp_filmkeywordrelationship fk ON fk.keyword_id = k.id
INNER JOIN ktapp_vote v ON v.film_id = fk.film_id AND v.user_id = %s
WHERE k.keyword_type = 'C'
GROUP BY k.id
HAVING COUNT(1) >= 5
ORDER BY average_rating DESC, number_of_ratings DESC, name, id
''', [selected_user.id])
elif domain == 'korszakok':
active_subtab = 'periods'
cursor.execute('''
SELECT
CASE
WHEN f.year < 1920 THEN 1900
ELSE FLOOR(f.year / 10) * 10
END AS period,
CASE
WHEN f.year < 1920 THEN ''
ELSE CAST((FLOOR(f.year / 10) * 10) AS CHAR)
END AS period_min,
CASE
WHEN f.year < 1920 THEN 1919
ELSE FLOOR(f.year / 10) * 10 + 9
END AS period_max,
AVG(v.rating) AS average_rating,
ROUND(10.0 * AVG(v.rating)) AS average_rating_sort_value,
COUNT(1) AS number_of_ratings,
SUM(v.rating = 1) AS number_of_ratings_1,
SUM(v.rating = 2) AS number_of_ratings_2,
SUM(v.rating = 3) AS number_of_ratings_3,
SUM(v.rating = 4) AS number_of_ratings_4,
SUM(v.rating = 5) AS number_of_ratings_5
FROM ktapp_film f
INNER JOIN ktapp_vote v ON v.film_id = f.id AND v.user_id = %s
WHERE f.year IS NOT NULL
GROUP BY period
HAVING COUNT(1) >= 5
ORDER BY average_rating DESC, number_of_ratings DESC, period
''', [selected_user.id])
else:
raise Http404
list_of_items = dictfetchall(cursor)
return render(request, 'ktapp/user_profile_subpages/user_taste.html', {
'active_tab': 'taste',
'active_subtab': active_subtab,
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'list_of_items': list_of_items,
'years_as': [1920, 1930, 1960, 1980, 2020, 2030],
})
def user_films(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
ordering_str = kt_utils.strip_whitespace(request.GET.get('o', ''))
if ordering_str == '':
ordering_str = '-other_rating_when'
if ordering_str[0] == '-':
ordering = (ordering_str[1:], 'DESC')
else:
ordering = (ordering_str, 'ASC')
filters = [('seen_by_id', selected_user.id)] + filmlist.get_filters_from_request(request)
films, nice_filters = filmlist.filmlist(
user_id=request.user.id,
filters=filters,
ordering=ordering,
films_per_page=None,
)
querystring = {}
for filter_type, filter_value in nice_filters:
if filter_type in {'title', 'year', 'director', 'actor', 'country', 'genre', 'keyword', 'my_rating', 'other_rating', 'my_wish'}:
querystring[filter_type] = filter_value
elif filter_type == 'number_of_ratings':
min_value, max_value = filter_value.split('-')
querystring['num_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['num_rating_max'] = kt_utils.coalesce(max_value, '')
elif filter_type == 'average_rating':
min_value, max_value = filter_value.split('-')
querystring['avg_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['avg_rating_max'] = kt_utils.coalesce(max_value, '')
elif filter_type == 'fav_average_rating':
min_value, max_value = filter_value.split('-')
querystring['fav_avg_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['fav_avg_rating_max'] = kt_utils.coalesce(max_value, '')
qs_combined = '&'.join('%s=%s' % (key, val) for key, val in querystring.iteritems())
if qs_combined != '':
qs_combined = '&' + qs_combined
films = list(films)
result_count = len(films)
try:
p = int(request.GET.get('p', 0))
except ValueError:
p = 0
max_pages = int(math.ceil(1.0 * result_count / FILMS_PER_PAGE))
if max_pages == 0:
max_pages = 1
if p == 0:
p = 1
if p > max_pages:
p = max_pages
films = films[(p-1) * FILMS_PER_PAGE:p * FILMS_PER_PAGE]
return render(request, 'ktapp/user_profile_subpages/user_films.html', {
'active_tab': 'films',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'result_count': result_count,
'querystring': querystring,
'qs_combined': qs_combined,
'ordering_str': ordering_str,
'p': p,
'max_pages': max_pages,
'films': films,
})
def user_comments(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
p = int(request.GET.get('p', 0))
if p == 1:
return HttpResponseRedirect(reverse('user_comments', args=(selected_user.id, selected_user.slug_cache)))
max_pages = int(math.ceil(1.0 * selected_user.number_of_comments / COMMENTS_PER_PAGE))
if max_pages == 0:
max_pages = 1
if p == 0:
p = 1
if p > max_pages:
return HttpResponseRedirect(reverse('user_comments', args=(selected_user.id, selected_user.slug_cache)) + '?p=' + str(max_pages))
comments_qs = selected_user.comment_set.select_related('film', 'topic', 'poll', 'reply_to', 'reply_to__created_by')
if max_pages > 1:
first_comment = selected_user.number_of_comments - COMMENTS_PER_PAGE * (p - 1) - (COMMENTS_PER_PAGE - 1)
last_comment = selected_user.number_of_comments - COMMENTS_PER_PAGE * (p - 1)
comments = comments_qs.filter(serial_number_by_user__lte=last_comment, serial_number_by_user__gte=first_comment)
else:
comments = comments_qs.all()
return render(request, 'ktapp/user_profile_subpages/user_comments.html', {
'active_tab': 'comments',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'comments': comments.order_by('-created_at'),
'p': p,
'max_pages': max_pages,
})
def user_wishlist(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
wishlist_type = request.GET.get('t', 'igen')
if wishlist_type == 'nem':
wishlist_type = 'N'
elif wishlist_type == 'szerez':
wishlist_type = 'G'
else:
wishlist_type = 'Y'
filters = [('wished_by_id', '%s:%s' % (wishlist_type, selected_user.id))] + filmlist.get_filters_from_request(request)
films, nice_filters = filmlist.filmlist(
user_id=request.user.id,
filters=filters,
ordering=('average_rating', 'DESC'),
films_per_page=None,
)
querystring = {}
for filter_type, filter_value in nice_filters:
if filter_type in {'title', 'year', 'director', 'actor', 'country', 'genre', 'keyword', 'my_rating', 'other_rating', 'my_wish'}:
querystring[filter_type] = filter_value
elif filter_type == 'number_of_ratings':
min_value, max_value = filter_value.split('-')
querystring['num_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['num_rating_max'] = kt_utils.coalesce(max_value, '')
elif filter_type == 'average_rating':
min_value, max_value = filter_value.split('-')
querystring['avg_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['avg_rating_max'] = kt_utils.coalesce(max_value, '')
elif filter_type == 'fav_average_rating':
min_value, max_value = filter_value.split('-')
querystring['fav_avg_rating_min'] = kt_utils.coalesce(min_value, '')
querystring['fav_avg_rating_max'] = kt_utils.coalesce(max_value, '')
if wishlist_type == 'N':
querystring['t'] = 'nem'
if wishlist_type == 'G':
querystring['t'] = 'szerez'
qs_combined = '&'.join('%s=%s' % (key, val) for key, val in querystring.iteritems())
if qs_combined != '':
qs_combined = '&' + qs_combined
films = list(films)
result_count = len(films)
return render(request, 'ktapp/user_profile_subpages/user_wishlist.html', {
'active_tab': 'wishlist',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'result_count': result_count,
'querystring': querystring,
'qs_combined': qs_combined,
'films': films,
'wishlist_type': wishlist_type,
'number_of_wishes_yes': selected_user.number_of_wishes_yes,
'number_of_wishes_no': selected_user.number_of_wishes_no,
'number_of_wishes_get': selected_user.number_of_wishes_get,
})
def user_toplists(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
toplists = models.UserToplist.objects.filter(created_by=selected_user).order_by('-created_at')
toplist_details = []
for toplist in toplists:
if toplist.toplist_type == models.UserToplist.TOPLIST_TYPE_FILM:
items, _ = filmlist.filmlist(
user_id=request.user.id,
filters=[('usertoplist_id', toplist.id)],
ordering='serial_number',
films_per_page=None,
)
toplist_list = []
with_comments = False
for item in items:
toplist_list.append(item)
if item.comment:
with_comments = True
else:
toplist_list = []
with_comments = False
for item in models.UserToplistItem.objects.filter(usertoplist=toplist).select_related('director', 'actor').order_by('serial_number'):
toplist_list.append(item)
if item.comment:
with_comments = True
toplist_details.append((
toplist,
toplist_list,
with_comments,
))
return render(request, 'ktapp/user_profile_subpages/user_toplists.html', {
'active_tab': 'toplists',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'toplist_details': toplist_details,
})
def user_articles(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
articles = []
for review in models.Review.objects.filter(created_by=selected_user).select_related('film'):
articles.append((
review.created_at,
'R',
review.film,
None,
review.snippet + '...',
))
for bio in models.Biography.objects.filter(created_by=selected_user).select_related('artist'):
articles.append((
bio.created_at,
'B',
None,
bio.artist,
bio.snippet + '...',
))
for article in models.Link.objects.filter(author=selected_user).select_related('film', 'artist'):
articles.append((
article.created_at,
'A',
article.film,
article.artist,
article.lead,
article.url,
article.name,
article.link_domain,
article.id,
))
articles.sort(key=lambda item: item[0], reverse=True)
return render(request, 'ktapp/user_profile_subpages/user_articles.html', {
'active_tab': 'articles',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_articles': number_of_articles,
'number_of_messages': number_of_messages,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'articles': articles,
})
def user_activity(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
cursor = connection.cursor()
max_max_vote = models.KTUser.objects.all().aggregate(Max('number_of_ratings'))['number_of_ratings__max']
max_max_comment = models.KTUser.objects.all().aggregate(Max('number_of_comments'))['number_of_comments__max']
scale_vote = (1.0 * selected_user.number_of_ratings / max_max_vote)**0.3
scale_comment = (1.0 * selected_user.number_of_comments / max_max_comment)**0.3
min_year = selected_user.date_joined.year
max_year = datetime.date.today().year
years = range(max_year, min_year - 1, -1)
min_month = selected_user.date_joined.month
max_month = datetime.date.today().month
months = []
if len(years) == 1:
for month in range(max_month, min_month - 1, -1):
months.append('%04d-%02d' % (years[0], month))
else:
for year in years:
if year == max_year:
for month in range(max_month, 0, -1):
months.append('%04d-%02d' % (year, month))
elif year == min_year:
for month in range(12, min_month - 1, -1):
months.append('%04d-%02d' % (year, month))
else:
for month in range(12, 0, -1):
months.append('%04d-%02d' % (year, month))
years = ['%04d' % y for y in years]
vote_data = {
'm': {},
'y': {},
}
comment_data = {
'm': {},
'y': {},
}
max_vote = {
'm': 0,
'y': 0,
}
max_comment = {
'm': 0,
'y': 0,
}
cursor.execute('SELECT LEFT(`when`, 7) AS dt, COUNT(1) FROM ktapp_vote WHERE user_id = %s AND `when` IS NOT NULL GROUP BY dt', [selected_user.id])
for row in cursor.fetchall():
vote_data['m'][row[0]] = row[1]
if row[1] > max_vote['m']:
max_vote['m'] = row[1]
cursor.execute('SELECT LEFT(`when`, 4) AS dt, COUNT(1) FROM ktapp_vote WHERE user_id = %s AND `when` IS NOT NULL GROUP BY dt', [selected_user.id])
for row in cursor.fetchall():
vote_data['y'][row[0]] = row[1]
if row[1] > max_vote['y']:
max_vote['y'] = row[1]
cursor.execute('SELECT LEFT(created_at, 7) AS dt, COUNT(1) FROM ktapp_comment WHERE created_by_id = %s AND created_at IS NOT NULL GROUP BY dt', [selected_user.id])
for row in cursor.fetchall():
comment_data['m'][row[0]] = row[1]
if row[1] > max_comment['m']:
max_comment['m'] = row[1]
cursor.execute('SELECT LEFT(created_at, 4) AS dt, COUNT(1) FROM ktapp_comment WHERE created_by_id = %s AND created_at IS NOT NULL GROUP BY dt', [selected_user.id])
for row in cursor.fetchall():
comment_data['y'][row[0]] = row[1]
if row[1] > max_comment['y']:
max_comment['y'] = row[1]
data_month = []
for month in months:
data_month.append((
month,
vote_data['m'].get(month, 0),
comment_data['m'].get(month, 0),
int(100.0 * scale_vote * vote_data['m'].get(month, 0) / max_vote['m']) if max_vote['m'] > 0 else 0,
int(100.0 * scale_comment * comment_data['m'].get(month, 0) / max_comment['m']) if max_comment['m'] > 0 else 0,
))
data_year = []
for year in years:
data_year.append((
year,
vote_data['y'].get(year, 0),
comment_data['y'].get(year, 0),
int(100.0 * scale_vote * vote_data['y'].get(year, 0) / max_vote['y']) if max_vote['y'] > 0 else 0,
int(100.0 * scale_comment * comment_data['y'].get(year, 0) / max_comment['y']) if max_comment['y'] > 0 else 0,
))
return render(request, 'ktapp/user_profile_subpages/user_activity.html', {
'active_tab': 'activity',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'data_month': data_month,
'data_year': data_year,
})
@login_required()
def user_messages(request, id, name_slug):
selected_user = get_object_or_404(models.KTUser, pk=id)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, selected_user)
messages_qs = models.Message.objects.filter(private=True).filter(owned_by=request.user).filter(
Q(sent_by=selected_user)
| Q(sent_to=selected_user)
).select_related('sent_by')
try:
p = int(request.GET.get('p', 0))
except ValueError:
p = 0
if p == 1:
return HttpResponseRedirect(reverse('user_messages', args=(selected_user.id, selected_user.slug_cache)))
max_pages = int(math.ceil(1.0 * number_of_messages / MESSAGES_PER_PAGE))
if max_pages == 0:
max_pages = 1
if p == 0:
p = 1
if p > max_pages:
return HttpResponseRedirect(reverse('user_messages', args=(selected_user.id, selected_user.slug_cache)) + '?p=' + str(max_pages))
return render(request, 'ktapp/user_profile_subpages/user_messages.html', {
'active_tab': 'messages',
'selected_user': selected_user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[request.user.is_authenticated() and request.user.id != selected_user.id],
'messages': messages_qs.order_by('-sent_at')[(p-1) * MESSAGES_PER_PAGE:p * MESSAGES_PER_PAGE],
'p': p,
'max_pages': max_pages,
})
@login_required()
def edit_profile(request):
def set_fav(field_name, domain, get_object_function):
old_items = set()
for item in models.UserFavourite.objects.filter(user=request.user, domain=domain):
old_items.add(item.fav_id)
new_items = set()
for name in kt_utils.strip_whitespace(request.POST.get(field_name, '')).split(','):
name = kt_utils.strip_whitespace(name)
if name:
item = get_object_function(name)
if item:
new_items.add(item.id)
for item_id in old_items - new_items:
models.UserFavourite.objects.filter(user=request.user, domain=domain, fav_id=item_id).delete()
for item_id in new_items - old_items:
models.UserFavourite.objects.create(user=request.user, domain=domain, fav_id=item_id)
next_url = request.GET.get('next', request.POST.get('next', reverse('user_profile', args=(request.user.id, request.user.slug_cache))))
if request.POST:
if request.POST.get('t', '') == 'pic':
if request.POST.get('a', '') == 'del':
if request.user.profile_pic:
request.user.profile_pic.delete()
request.user.profile_pic = None
request.user.save()
models.Event.objects.create(
user=request.user,
event_type=models.Event.EVENT_TYPE_DELETE_PROFILE_PIC,
)
else:
if 'img' in request.FILES:
picture = models.Picture.objects.create(
img=request.FILES['img'],
picture_type=models.Picture.PICTURE_TYPE_USER_PROFILE,
created_by=request.user,
user=request.user,
)
request.user.profile_pic = picture
request.user.save()
models.Event.objects.create(
user=request.user,
event_type=models.Event.EVENT_TYPE_UPLOAD_PROFILE_PIC,
)
return HttpResponseRedirect(next_url)
request.user.bio = request.POST.get('bio', '').strip()
gender = request.POST.get('gender', '')
if gender not in {'U', 'M', 'F'}:
gender = 'U'
request.user.gender = gender
try:
request.user.year_of_birth = int(request.POST.get('year_of_birth', 0))
except ValueError:
request.user.year_of_birth = 0
request.user.location = kt_utils.strip_whitespace(request.POST.get('location', ''))
request.user.public_gender = bool(request.POST.get('public_gender', ''))
request.user.public_year_of_birth = bool(request.POST.get('public_year_of_birth', ''))
request.user.public_location = bool(request.POST.get('public_location', ''))
set_fav('fav_director', models.UserFavourite.DOMAIN_DIRECTOR, models.Artist.get_artist_by_name)
set_fav('fav_actor', models.UserFavourite.DOMAIN_ACTOR, models.Artist.get_artist_by_name)
set_fav('fav_genre', models.UserFavourite.DOMAIN_GENRE, lambda name: models.Keyword.get_keyword_by_name(name, models.Keyword.KEYWORD_TYPE_GENRE))
set_fav('fav_country', models.UserFavourite.DOMAIN_COUNTRY, lambda name: models.Keyword.get_keyword_by_name(name, models.Keyword.KEYWORD_TYPE_COUNTRY))
request.user.fav_period = kt_utils.strip_whitespace(request.POST.get('fav_period', ''))
request.user.save()
models.Event.objects.create(
user=request.user,
event_type=models.Event.EVENT_TYPE_EDIT_PROFILE,
)
return HttpResponseRedirect(next_url)
number_of_votes, number_of_comments, number_of_wishes, number_of_toplists, number_of_messages, number_of_articles = _get_user_profile_numbers(request, request.user)
return render(request, 'ktapp/user_profile_subpages/edit_profile.html', {
'active_tab': 'profile',
'selected_user': request.user,
'number_of_votes': number_of_votes,
'number_of_comments': number_of_comments,
'number_of_wishes': number_of_wishes,
'number_of_toplists': number_of_toplists,
'number_of_messages': number_of_messages,
'number_of_articles': number_of_articles,
'tab_width': USER_PROFILE_TAB_WIDTH[False],
'fav_directors': models.Artist.objects.raw('''
SELECT a.*
FROM ktapp_artist a
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = a.id
WHERE uf.user_id = %s AND uf.domain = %s
ORDER BY a.name, a.id
''', [request.user.id, models.UserFavourite.DOMAIN_DIRECTOR]),
'fav_actors': models.Artist.objects.raw('''
SELECT a.*
FROM ktapp_artist a
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = a.id
WHERE uf.user_id = %s AND uf.domain = %s
ORDER BY a.name, a.id
''', [request.user.id, models.UserFavourite.DOMAIN_ACTOR]),
'fav_genres': models.Keyword.objects.raw('''
SELECT k.*
FROM ktapp_keyword k
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = k.id
WHERE uf.user_id = %s AND uf.domain = %s AND k.keyword_type = %s
ORDER BY k.name, k.id
''', [request.user.id, models.UserFavourite.DOMAIN_GENRE, models.Keyword.KEYWORD_TYPE_GENRE]),
'fav_countries': models.Keyword.objects.raw('''
SELECT k.*
FROM ktapp_keyword k
INNER JOIN ktapp_userfavourite uf ON uf.fav_id = k.id
WHERE uf.user_id = %s AND uf.domain = %s AND k.keyword_type = %s
ORDER BY k.name, k.id
''', [request.user.id, models.UserFavourite.DOMAIN_COUNTRY, models.Keyword.KEYWORD_TYPE_COUNTRY]),
'topic': request.GET.get('t', ''),
})
| 45.249392 | 171 | 0.652561 | 5,007 | 37,195 | 4.535051 | 0.070501 | 0.086317 | 0.024045 | 0.030035 | 0.698639 | 0.668208 | 0.636191 | 0.619457 | 0.564936 | 0.543665 | 0 | 0.012377 | 0.231053 | 37,195 | 821 | 172 | 45.304507 | 0.781546 | 0.000995 | 0 | 0.509702 | 0 | 0.011643 | 0.276344 | 0.023794 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016818 | false | 0 | 0.018111 | 0 | 0.058215 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
32059ef6b7d6492ac78fa492483daba2584c090a | 18,502 | py | Python | metrics.py | adamtiger/CPmethod | 9220d39f2f8cbfc191a89418c8795eabf09469d0 | [
"MIT"
] | null | null | null | metrics.py | adamtiger/CPmethod | 9220d39f2f8cbfc191a89418c8795eabf09469d0 | [
"MIT"
] | null | null | null | metrics.py | adamtiger/CPmethod | 9220d39f2f8cbfc191a89418c8795eabf09469d0 | [
"MIT"
] | 1 | 2019-11-20T15:48:57.000Z | 2019-11-20T15:48:57.000Z | from dataloader_utils import Gender, HeartPart, EndPhase
from enum import Enum
import numpy as np
import math
import cv2
# --------------------------------------
# Shape (contour) similarity
# --------------------------------------
def __areas(curve1, curve2):
# floats come in
# find the corners of the bbox
def _bbox(cv):
mins = np.min(cv, axis=0)
maxs = np.max(cv, axis=0)
x_min, y_min = mins[0], mins[1]
x_max, y_max = maxs[0], maxs[1]
return x_min, y_min, x_max, y_max
box1 = _bbox(curve1)
box2 = _bbox(curve2)
xr = max(box1[2], box2[2])
yb = max(box1[3], box2[3])
xl = min(box1[0], box2[0])
yu = max(box1[1], box2[1])
# shift and rescale the curves (DC, JC will not change)
curve1[:, 0] = (curve1[:, 0] - xl) / (xr - xl + 1e-5)
curve1[:, 1] = (curve1[:, 1] - yu) / (yb - yu + 1e-5)
curve2[:, 0] = (curve2[:, 0] - xl) / (xr - xl + 1e-5)
curve2[:, 1] = (curve2[:, 1] - yu) / (yb - yu + 1e-5)
# map the coordinates to 410 x 410 mask
image1 = np.zeros((410, 410), dtype=np.uint8)
curve1 = curve1 * 400 + 5
cv2.drawContours(image1, [np.expand_dims(curve1, axis=1).astype(np.int32)], -1, (255, 0, 0), cv2.FILLED)
image2 = np.zeros((410, 410), dtype=np.uint8)
curve2 = curve2 * 400 + 5
cv2.drawContours(image2, [np.expand_dims(curve2, axis=1).astype(np.int32)], -1, (255, 0, 0), cv2.FILLED)
A = (image1 // 255 == 1).astype(np.float32)
B = (image2 // 255 == 1).astype(np.float32)
area1 = np.sum(A)
area2 = np.sum(B)
area_inter = np.sum(A * B)
area_union = area1 + area2 - area_inter
return area_union, area_inter, area1, area2
def dice(curve1, curve2): # can be viewed as F1 score
"""
Calculate the dice metric for the two curves.
:param curve1: a numpy matrix with shape (N, 2), points are in x, y format
elements are integers
:param curve2: a numpy matrix with shape (N, 2), points are in x, y format
elements are integers
:return: a real number (the dice value)
"""
_, inter, a1, a2 = __areas(curve1, curve2)
# dice metric
return 2.0 * inter / (a1 + a2)
def jaccard(curve1, curve2): # aka. Tanimoto index
"""
Calculate the jaccard metric for the two curves.
:param curve1: a numpy matrix with shape (N, 2), points are in x, y format
elements are integers
:param curve2: a numpy matrix with shape (N, 2), points are in x, y format
elements are integers
:return: a real number (the jaccard index)
"""
union, inter, _, _ = __areas(curve1, curve2)
# dice metric
return inter / union
def hausdorff(curve1, curve2): # aka. Pompeiu-Hausdorff distance
"""
Calculate the Hausdorff distance between two curves. (https://en.wikipedia.org/wiki/Hausdorff_distance)
:param curve1: a numpy matrix with shape (N, 2), points are in x, y format
:param curve2: a numpy matrix with shape (N, 2), points are in x, y format
:return: a real number (hausdorff distance)
"""
N2 = curve2.shape[0]
temp = np.expand_dims(curve1, 2)
temp = np.repeat(temp, N2, 2)
temp = temp - curve2.T
distances = temp[:, 0, :] ** 2 + temp[:, 1, :] ** 2
d1 = np.max(np.min(distances, 0))
d2 = np.max(np.min(distances, 1))
return math.sqrt(max(d1, d2))
# --------------------------------------
# Volume calculation
# --------------------------------------
def ratio(pixel_spacing: tuple, slice_thickness: float, gap: float) -> (float, float):
ratio_slice = pixel_spacing[0] * pixel_spacing[1] * slice_thickness / 1000.0 # mm^3 -> ml conversion
ratio_gap = pixel_spacing[0] * pixel_spacing[1] * gap / 1000.0
return ratio_slice, ratio_gap
def bsa(height, weight): # Mosteller BSA
if not(height is None or weight is None):
return math.sqrt(height * weight / 3600.0)
else:
return None
def area_triangular(curve):
"""
Calculates the area of a closed curve based on
crossproducts.
:param curve: a numpy matrix with shape (N, 2), points are in x, y format
elements are floats
:return: area
"""
# calculate center of mass
crm = np.sum(curve, axis=0) / curve.shape[0]
# vector between crm and a point of the curve
r = curve - crm
# side vector
curve_mtx_shifted = np.ones_like(curve)
curve_mtx_shifted[0] = curve[-1]
curve_mtx_shifted[1:] = curve[0:-1]
dr = curve - curve_mtx_shifted
# vector product
rxdr = np.cross(r, dr)
# sum up the pieces of triangulars
return np.abs(0.5 * np.sum(rxdr))
def convert_to_hierarchical(contours):
"""
convert list of contours into a hierarchical structure
slice > frame > heartpart -- Contour
:param contours: list of Contour objects
:return: a hierarchical structure which contains Contour objects
"""
hierarchical_contours = {}
for contour in contours:
if not(contour.slice in hierarchical_contours.keys()):
hierarchical_contours[contour.slice] = {}
if not(contour.frame in hierarchical_contours[contour.slice].keys()):
hierarchical_contours[contour.slice][contour.frame] = {}
hierarchical_contours[contour.slice][contour.frame][contour.part] = contour
return hierarchical_contours
def calculate_contour_area(curve: np.ndarray):
"""
calculate area with triangulars
:param curve: numpy matrix (N, 2)
:return: area of the closed curve
"""
return area_triangular(curve)
def grouping(hierarchical_contours, calculate_area):
"""
Determines the contour which phase belongs to (systole or diastole).
Calculates the areas of each contour.
:param hierarchical_contours: a hierarchical structure which contains Contour objects
(slice > frame > heartpart -- Contour)
:param calculate_area: function to calculate area of the contour
:return: hierarchical structure with areas (slice > heartpart > phase -- area)
"""
def set_endphase(slice, frame, part, phase):
hierarchical_contours[slice][frame][part].phase = phase
hierarchical_contours[slice][frame][part].corresponding_image.phase = phase
contour_areas = {}
slices = hierarchical_contours.keys()
for slice in slices:
contour_areas[slice] = {}
for part in HeartPart:
areas = []
frames = []
contour_areas[slice][part] = {}
for frame in hierarchical_contours[slice].keys():
if part in hierarchical_contours[slice][frame]:
curve = hierarchical_contours[slice][frame][part]
frames.append(frame)
areas.append(calculate_area(curve.contour_mtx))
if len(areas) > 1:
contour_areas[slice][part][EndPhase.DIA] = max(areas)
contour_areas[slice][part][EndPhase.SYS] = min(areas)
set_endphase(slice, frames[areas.index(max(areas))], part, EndPhase.DIA)
set_endphase(slice, frames[areas.index(min(areas))], part, EndPhase.SYS)
elif len(areas) == 1:
ds = np.array([frames[0] - 0, frames[0] - 20, frames[0] - 9]) # this is a heuristic
idx = np.argmin(np.abs(ds))
if idx in [0, 1]:
contour_areas[slice][part][EndPhase.DIA] = areas[0]
contour_areas[slice][part][EndPhase.SYS] = None
set_endphase(slice, frames[0], part, EndPhase.DIA)
else:
contour_areas[slice][part][EndPhase.DIA] = None
contour_areas[slice][part][EndPhase.SYS] = areas[0]
set_endphase(slice, frames[0], part, EndPhase.SYS)
else:
contour_areas[slice][part][EndPhase.DIA] = None
contour_areas[slice][part][EndPhase.SYS] = None
return contour_areas
def volume(contour_areas, part, phase, ratio):
"""
:param contour_areas: hierarchical structure with areas (slice > heartpart > phase -- area)
:param part: heartpart e.g.: left-endo
:param phase: systole or diastole
:param ratio: comes from the field view, volume changing and slice thickness
:return: volume of the heart in part at phase
"""
ratio_slice, ratio_gap = ratio
v = 0
slices = list(contour_areas.keys())
for idx in range(len(slices) - 1):
a1 = contour_areas[slices[idx]][part][phase]
a2 = contour_areas[slices[idx + 1]][part][phase]
if a1 is not None:
v += a1 * ratio_slice
if a2 is not None:
v += (a1 + np.sqrt(a1 * a2) + a2) * ratio_gap / 3.0
a1 = contour_areas[slices[-1]][part][phase] # the last slice
if a1 is not None:
v += a1 * ratio_slice
return v
def calculate_volumes_left(contour_areas, ratio, bsa=None):
lved = volume(contour_areas, HeartPart.LN, EndPhase.DIA, ratio) # left ED
lves = volume(contour_areas, HeartPart.LN, EndPhase.SYS, ratio) # left ES
lvsv = lved - lves # left Stroke-volume
volume_indices = {'lved': lved, 'lves': lves, 'lvsv': lvsv}
# other metrics: left
if bsa is None:
return volume_indices
lved_i = lved / bsa # left ED-index
lves_i = lves / bsa # left ES-index
lvsv_i = lvsv / bsa # left SV-index
volume_indices['lved_i'] = lved_i
volume_indices['lves_i'] = lves_i
volume_indices['lvsv_i'] = lvsv_i
return volume_indices
def calculate_volumes_right(contour_areas, ratio, bsa=None):
rved = volume(contour_areas, HeartPart.RN, EndPhase.DIA, ratio)
rves = volume(contour_areas, HeartPart.RN, EndPhase.SYS, ratio)
rvsv = rved - rves # right Stroke-volume
volume_indices = {'rved': rved, 'rves': rves, 'rvsv': rvsv}
# other metrics: right
if bsa is None:
return volume_indices
rved_i = rved / bsa # right ED-index
rves_i = rves / bsa # right ES-index
rvsv_i = rvsv / bsa # right SV-index
volume_indices['rved_i'] = rved_i
volume_indices['rves_i'] = rves_i
volume_indices['rvsv_i'] = rvsv_i
return volume_indices
class VolumeIndices:
def __init__(self):
self.gender = None
self.lved = None
self.lves = None
self.lvsv = None
self.lved_i = None
self.lves_i = None
self.lvsv_i = None
self.rved = None
self.rves = None
self.rvsv = None
self.rved_i = None
self.rves_i = None
self.rvsv_i = None
@classmethod
def from_dictionary(cls, dictionary: dict, gender):
def return_if_exists(abreviation):
if dictionary is not None:
if abreviation in dictionary:
return dictionary[abreviation]
return None
obj = cls()
obj.gender = gender
obj.lved = return_if_exists('lved')
obj.lves = return_if_exists('lves')
obj.lvsv = return_if_exists('lvsv')
obj.lved_i = return_if_exists('lved_i')
obj.lves_i = return_if_exists('lves_i')
obj.lvsv_i = return_if_exists('lvsv_i')
obj.rved = return_if_exists('rved')
obj.rves = return_if_exists('rves')
obj.rvsv = return_if_exists('rvsv')
obj.rved_i = return_if_exists('rved_i')
obj.rves_i = return_if_exists('rves_i')
obj.rvsv_i = return_if_exists('rvsv_i')
return obj
# --------------------------------------
# Reorder percentages
# --------------------------------------
class Zone(Enum):
UNK = 0 # unknown (missing data)
AL = 1 # abnormal low
NZ = 2 # normal zone
AH = 3 # abnormal high
class ReorderPercentage:
"""
Refrence: Petersen et al. Journal of Cardiovascular Magnetic Resonance (2017) 19:18 DOI 10.1186/s12968-017-0327-9
"""
def __init__(self, volume_idcs: list):
"""
volume_idcs - pair of VolumeIndices objects (original, predicted)
"""
self.volume_idcs = volume_idcs
self.zone_calculators = [
self._lved, self._lves, self._lvsv,
self._lved_idx, self._lves_idx, self._lvsv_idx,
self._rved, self._rves, self._rvsv,
self._rved_idx, self._rves_idx, self._rvsv_idx
]
@staticmethod
def _get_zone(gender, ventricular_value, male_ranges, female_ranges):
if gender == Gender.M:
if ventricular_value is None:
return Zone.UNK
for barrier, zone in zip(male_ranges, [Zone.AL, Zone.NZ]):
if ventricular_value < barrier:
return zone
return Zone.AH
elif gender == Gender.F:
if ventricular_value is None:
return Zone.UNK
for barrier, zone in zip(female_ranges, [Zone.AL, Zone.NZ]):
if ventricular_value < barrier:
return zone
return Zone.AH
else:
return Zone.UNK
# Left side
def _lved(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.lved
male_ranges = [93, 232]
female_ranges = [80, 175]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _lves(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.lves
male_ranges = [34, 103]
female_ranges = [25, 73]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _lvsv(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.lvsv
male_ranges = [49, 140]
female_ranges = [47, 110]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _lved_idx(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.lved_i
male_ranges = [52, 117]
female_ranges = [50, 101]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _lves_idx(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.lves_i
male_ranges = [19, 52]
female_ranges = [16, 43]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _lvsv_idx(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.lvsv_i
male_ranges = [28, 70]
female_ranges = [29, 63]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
# Right side
def _rved(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.rved
male_ranges = [99, 260]
female_ranges = [83, 192]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _rves(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.rves
male_ranges = [34, 135]
female_ranges = [26, 95]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _rvsv(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.rvsv
male_ranges = [54, 140]
female_ranges = [47, 107]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _rved_idx(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.rved_i
male_ranges = [55, 128]
female_ranges = [51, 110]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _rves_idx(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.rves_i
male_ranges = [19, 67]
female_ranges = [16, 55]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def _rvsv_idx(self, volume_idcs: VolumeIndices):
ventricular_value = volume_idcs.rvsv_i
male_ranges = [30, 69]
female_ranges = [29, 61]
return self._get_zone(volume_idcs.gender, ventricular_value, male_ranges, female_ranges)
def reordering_percentage(self):
"""
This function calculates how many times
the suggested zone is different
in case of the predicted volume data.
"""
overall_errors = {}
LN = {}
NH = {}
NL = {}
HN = {}
LH = {}
HL = {}
for zone_calculator in self.zone_calculators:
zc = lambda vi: (zone_calculator(vi[0]), zone_calculator(vi[1])) # original, predicted
volumes_as_zone = list(map(zc, self.volume_idcs))
cntr = 0
equal, ln, nh, nl, hn, lh, hl = 0, 0, 0, 0, 0, 0, 0
for volume_pair in volumes_as_zone:
if not(volume_pair[0] == Zone.UNK or volume_pair[1] == Zone.UNK):
cntr += 1
if volume_pair[0] == volume_pair[1]:
equal += 1
elif volume_pair[0] == Zone.AL and volume_pair[1] == Zone.NZ:
ln += 1
elif volume_pair[0] == Zone.NZ and volume_pair[1] == Zone.AH:
nh += 1
elif volume_pair[0] == Zone.NZ and volume_pair[1] == Zone.AL:
nl += 1
elif volume_pair[0] == Zone.AH and volume_pair[1] == Zone.NZ:
hn += 1
elif volume_pair[0] == Zone.AL and volume_pair[1] == Zone.AH:
lh += 1
elif volume_pair[0] == Zone.AH and volume_pair[1] == Zone.AL:
hl += 1
overall_errors[zone_calculator.__name__] = (1 - equal / cntr) if cntr > 0 else None
LN[zone_calculator.__name__] = (ln / cntr) if cntr > 0 else None
NH[zone_calculator.__name__] = (nh / cntr) if cntr > 0 else None
NL[zone_calculator.__name__] = (nl / cntr) if cntr > 0 else None
HN[zone_calculator.__name__] = (hn / cntr) if cntr > 0 else None
LH[zone_calculator.__name__] = (lh / cntr) if cntr > 0 else None
HL[zone_calculator.__name__] = (hl / cntr) if cntr > 0 else None
return overall_errors, LN, NH, NL, HN, LH, HL
| 37.15261 | 117 | 0.600908 | 2,400 | 18,502 | 4.444167 | 0.152917 | 0.03844 | 0.019689 | 0.031689 | 0.426402 | 0.395087 | 0.326458 | 0.28633 | 0.271704 | 0.201013 | 0 | 0.0322 | 0.283267 | 18,502 | 497 | 118 | 37.227364 | 0.772114 | 0.181062 | 0 | 0.131902 | 0 | 0 | 0.008116 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101227 | false | 0 | 0.015337 | 0 | 0.260736 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3206936433ce667f0ce4f6df51f664f04496ea93 | 9,077 | py | Python | coinbase.py | foppini975/FinRL | aead943817d1387dc3654de2c189767d10140b78 | [
"MIT"
] | null | null | null | coinbase.py | foppini975/FinRL | aead943817d1387dc3654de2c189767d10140b78 | [
"MIT"
] | null | null | null | coinbase.py | foppini975/FinRL | aead943817d1387dc3654de2c189767d10140b78 | [
"MIT"
] | null | null | null | # Coinbase Pro library:
# https://github.com/danpaquin/coinbasepro-python
#curl "https://api.pro.coinbase.com/products/BTC-USD/candles?start=2021-01-01T12:00:00&end=2021-01-12T12:00:00&granularity=3600"
import cbpro
import numpy as np
import pandas as pd
import logging
from datetime import datetime, timedelta
import json
#from IPython.core.debugger import set_trace
class Coinbase:
def __init__(self, product, logging_level = logging.INFO, products_file = None):
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging_level, format=FORMAT)
# init
self.product = product
self.df = None
# client creation
self.public_client = cbpro.PublicClient()
# get products
self.products = self.public_client.get_products()
if products_file is not None:
with open(products_file, 'w') as fp:
json.dump(self.products, fp)
logging.info(f"Found {len(self.products)} products, saved to {products_file}")
else:
logging.info(f"Found {len(self.products)} products")
found = False
for prod in self.products:
if prod['id'] == self.product:
found = True
logging.info(prod)
self.product = self.product
break
if found is False:
raise Exception(f"Product {self.product} not valid")
@staticmethod
def getProductList(products_file = None):
products = cbpro.PublicClient().get_products()
if products_file is not None:
with open(products_file, 'w') as fp:
json.dump(products, fp)
return products
@staticmethod
def getPrice(product):
return float(cbpro.PublicClient().get_product_ticker(product)['price'])
def loadHistory(self, start_date, end_date, granularity = 86400, moving_average = 20):
#
# dates are datetime objects, can be crated with:
# start_utc = datetime(2021, 1, 1)
#
start_interval = start_date - timedelta(days=moving_average)
end_interval = None
Granularity_Map = {
60: timedelta(hours=5), # 1 day per each call
86400: timedelta(days=28 * 6 -1) # 42 weeks per each call
}
if granularity not in Granularity_Map:
raise Exception(f"Granularity {granularity} not valid")
self.df = pd.DataFrame()
while True:
if end_interval is not None:
start_interval = end_interval + timedelta(seconds=1)
if start_interval > end_date:
break
end_interval = start_interval + Granularity_Map[granularity]
if end_interval > end_date:
end_interval = end_date
start_interval_iso = start_interval.isoformat()
end_interval_iso = end_interval.isoformat()
btc_history = self.public_client.get_product_historic_rates(
self.product, start=start_interval_iso,
end=end_interval_iso,
granularity=granularity)
if len(btc_history) == 1 and 'message' in btc_history:
raise Exception(btc_history['message'])
logging.info(f"Fetched from {start_interval_iso} to {end_interval_iso} : #{len(btc_history)} points")
if len(btc_history) == 0:
continue
btc_history_np = np.array(btc_history)
df_new = pd.DataFrame(btc_history_np, columns = ['Time','Low','High','Open','Close','Volume'])
self.df = self.df.append(df_new, ignore_index=True, sort=True)
self.df['tic'] = self.product
self.df['Time'] = pd.to_datetime(self.df['Time'], unit='s')
moving_average_label = f"MA{moving_average}"
self.df.sort_values(by='Time', inplace=True)
self.df[moving_average_label] = self.df['Close'].rolling(window=moving_average).mean()
# let's remove the initial points where moving average was not available
self.df = self.df[self.df['Time'] >= start_date]
self.df.reset_index(drop=True, inplace=True)
#time bucket start time
#low lowest price during the bucket interval
#high highest price during the bucket interval
#open opening price (first trade) in the bucket interval
#close closing price (last trade) in the bucket interval
#volume volume of trading activity during the bucket interval
def calculateBuy(self, moving_average = 20, below_threshold = 0.1):
# "Buy" significa che il valore era sceso del x% sotto il valore attuale e ora e' tornato sopra la moving average
#
# Let's generate the Below column (min-hold below moving average)
moving_average_label = f"MA{moving_average}"
self.df['Below'] = 0
for index, row in self.df.iterrows():
current_value = row['Close']
if current_value < row[moving_average_label]:
below = current_value - row[moving_average_label]
try:
previous_below = self.df.loc[index-1, 'Below']
except:
previous_below = 0
if below < previous_below:
self.df.loc[index, 'Below'] = below
else:
self.df.loc[index, 'Below'] = previous_below
# Let's generate the BUY trigger based on the Below column
self.df['Buy'] = 0
for index, row in self.df.iterrows():
current_value = row['Close']
try:
previous_below = self.df.loc[index-1, 'Below']
except:
previous_below = 0
if current_value > row[moving_average_label] and previous_below < -1*below_threshold*current_value:
self.df.loc[index, 'Buy'] = self.df['Close'].max()/5 # placeholder value to facilitate the plot
def calculateSell(self, moving_average = 20, above_threshold = 0.1):
# "Sell" significa che il valore era salito del x% sopra il valore attuale e ora e' sceso sotto la moving average
#
# Let's generate the Above column (max-hold above moving average)
moving_average_label = f"MA{moving_average}"
self.df['Above'] = 0
for index, row in self.df.iterrows():
current_value = row['Close']
if current_value > row[moving_average_label]:
above = current_value - row[moving_average_label]
try:
previous_above = self.df.loc[index-1, 'Above']
except:
previous_above = 0
if above > previous_above:
self.df.loc[index, 'Above'] = above
else:
self.df.loc[index, 'Above'] = previous_above
# Let's generate the SELL trigger based on the Above column
self.df['Sell'] = 0
for index, row in self.df.iterrows():
current_value = row['Close']
try:
previous_above= self.df.loc[index-1, 'Above']
except:
previous_above = 0
if current_value < row[moving_average_label] and previous_above > above_threshold*current_value:
self.df.loc[index, 'Sell'] = -1*self.df['Close'].max()/5 # placeholder value to facilitate the plot
def backSimulate(self, initial_amount = 100):
self.df['Wallet_USD'] = 0
self.df['Wallet_Crypto'] = 0
self.df['Wallet_Crypto_Hold'] = 0
for index, row in self.df.iterrows():
self.df.loc[index, 'Wallet_Crypto_Hold'] = initial_amount/self.df.loc[0,'Close'] * self.df.loc[index,'Close']
if index == 0:
self.df.loc[0, 'Wallet_USD'] = initial_amount
continue
if self.df.loc[index, 'Buy'] != 0 and self.df.loc[index-1,'Wallet_USD'] > 0:
# Buy
purchased_crypto = self.df.loc[index-1,'Wallet_USD'] / self.df.loc[index,'Close']
logging.info(f"Buy : {self.df.loc[index-1,'Wallet_USD']} USD ---> {purchased_crypto} BTC")
self.df.loc[index,'Wallet_Crypto'] = purchased_crypto
self.df.loc[index,'Wallet_USD'] = 0
elif self.df.loc[index, 'Sell'] != 0 and self.df.loc[index-1,'Wallet_Crypto'] > 0:
# Sell
sold_crypto = self.df.loc[index-1,'Wallet_Crypto'] * self.df.loc[index,'Close']
logging.info(f"Sell: {self.df.loc[index-1,'Wallet_Crypto']} BTC ---> {sold_crypto} BUSDTC")
self.df.loc[index,'Wallet_USD'] = sold_crypto
self.df.loc[index,'Wallet_Crypto'] = 0
else:
# Hold
self.df.loc[index,'Wallet_USD'] = self.df.loc[index-1,'Wallet_USD']
self.df.loc[index,'Wallet_Crypto'] = self.df.loc[index-1,'Wallet_Crypto']
def getTicker(self):
return self.public_client.get_product_ticker(self.product) | 46.076142 | 128 | 0.593588 | 1,137 | 9,077 | 4.591909 | 0.201407 | 0.068952 | 0.055162 | 0.080444 | 0.425206 | 0.356828 | 0.302241 | 0.2647 | 0.199962 | 0.1923 | 0 | 0.017632 | 0.300209 | 9,077 | 197 | 129 | 46.076142 | 0.804314 | 0.145312 | 0 | 0.28 | 0 | 0.013333 | 0.113398 | 0.00945 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053333 | false | 0 | 0.04 | 0.013333 | 0.12 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3206d78caefb347d3aafe66f87b062f05ea1e6f8 | 1,134 | py | Python | tests/integration/shell/runner.py | d--j/salt | 579f900be67a80e1a77674bc6aa21fec836c1c4c | [
"Apache-2.0"
] | 1 | 2015-06-05T13:47:02.000Z | 2015-06-05T13:47:02.000Z | tests/integration/shell/runner.py | epoelke/salt | 80ae64e54f9f336d3cdb6e03e42f2a50469ec8f2 | [
"Apache-2.0"
] | null | null | null | tests/integration/shell/runner.py | epoelke/salt | 80ae64e54f9f336d3cdb6e03e42f2a50469ec8f2 | [
"Apache-2.0"
] | null | null | null | '''
Tests for the salt-run command
'''
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
class RunTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn):
'''
Test the salt-run command
'''
_call_binary_ = 'salt-run'
def test_in_docs(self):
'''
test the salt-run docs system
'''
data = self.run_run('-d')
data = '\n'.join(data)
self.assertIn('jobs.active:', data)
self.assertIn('jobs.list_jobs:', data)
self.assertIn('jobs.lookup_jid:', data)
self.assertIn('manage.down:', data)
self.assertIn('manage.up:', data)
self.assertIn('network.wol:', data)
self.assertIn('network.wollist:', data)
def test_notin_docs(self):
'''
Verify that hidden methods are not in run docs
'''
data = self.run_run('-d')
data = '\n'.join(data)
self.assertNotIn('jobs.SaltException:', data)
if __name__ == '__main__':
from integration import run_tests
run_tests(RunTest)
| 24.652174 | 76 | 0.617284 | 136 | 1,134 | 4.963235 | 0.404412 | 0.118519 | 0.165926 | 0.088889 | 0.094815 | 0.094815 | 0.094815 | 0.094815 | 0.094815 | 0.094815 | 0 | 0 | 0.246032 | 1,134 | 45 | 77 | 25.2 | 0.789474 | 0.155203 | 0 | 0.181818 | 0 | 0 | 0.15991 | 0 | 0 | 0 | 0 | 0 | 0.363636 | 1 | 0.090909 | false | 0 | 0.136364 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
320a56299d5b0a2df9ef09f9fd22f38ffc418aca | 8,229 | py | Python | stacktach/stacklog.py | PreetiKamble29/stacktach | f4f905393a0d7eaa226a72b6a27b61e4ef52211d | [
"Apache-2.0"
] | null | null | null | stacktach/stacklog.py | PreetiKamble29/stacktach | f4f905393a0d7eaa226a72b6a27b61e4ef52211d | [
"Apache-2.0"
] | 4 | 2020-02-28T10:27:34.000Z | 2022-02-02T01:13:09.000Z | stacktach/stacklog.py | PreetiKamble29/stacktach | f4f905393a0d7eaa226a72b6a27b61e4ef52211d | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import logging.handlers
import multiprocessing
import os
import re
import threading
import traceback
import sys
import time
LOGGERS = {}
LOGGER_QUEUE_MAP = {}
default_logger_location = '/var/log/stacktach/%s.log'
default_logger_name = 'stacktach-default'
def set_default_logger_location(loc):
global default_logger_location
default_logger_location = loc
def set_default_logger_name(name):
global default_logger_name
default_logger_name = name
class ParentLoggerDoesNotExist(Exception):
def __init__(self, parent_logger_name):
self.reason = "Cannot create child logger as parent logger with the" \
"name %s does not exist." % parent_logger_name
def _create_parent_logger(parent_logger_name):
if parent_logger_name not in LOGGERS:
logger = _create_timed_rotating_logger(parent_logger_name)
LOGGERS[parent_logger_name] = logger
LOGGER_QUEUE_MAP[parent_logger_name] = multiprocessing.Queue(-1)
return LOGGERS[parent_logger_name]
def _create_child_logger(parent_logger_name):
child_logger_name = "child_%s" % parent_logger_name
if child_logger_name in LOGGERS:
return LOGGERS[child_logger_name]
if parent_logger_name in LOGGERS:
queue = LOGGER_QUEUE_MAP[parent_logger_name]
logger = _create_queue_logger(child_logger_name, queue)
LOGGERS[child_logger_name] = logger
else:
raise ParentLoggerDoesNotExist(parent_logger_name)
return LOGGERS[child_logger_name]
def _logger_factory(parent_logger_name, is_parent):
if parent_logger_name is None:
parent_logger_name = default_logger_name
if is_parent:
return _create_parent_logger(parent_logger_name)
else:
return _create_child_logger(parent_logger_name)
def get_logger(name=None, is_parent=True):
return _logger_factory(name, is_parent)
def warn(msg, name=None):
if name is None:
name = default_logger_name
get_logger(name=name, is_parent=False).warn(msg)
def error(msg, name=None):
if name is None:
name = default_logger_name
get_logger(name=name, is_parent=False).error(msg)
def info(msg, name=None):
if name is None:
name = default_logger_name
get_logger(name=name, is_parent=False).info(msg)
def _create_timed_rotating_logger(name):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
handler = TimedRotatingFileHandlerWithCurrentTimestamp(
default_logger_location % name, when='midnight', interval=1,
backupCount=6)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.handlers[0].doRollover()
return logger
def _create_queue_logger(name, queue):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
handler = QueueHandler(queue)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
class QueueHandler(logging.Handler):
def __init__(self, queue):
logging.Handler.__init__(self)
self.queue = queue
def emit(self, record):
try:
# ensure that exc_info and args
# have been stringified. Removes any chance of
# unpickleable things inside and possibly reduces
# message size sent over the pipe
if record.exc_info:
# just to get traceback text into record.exc_text
self.format(record)
# remove exception info as it's not needed any more
record.exc_info = None
if record.args:
record.msg = record.msg % record.args
record.args = None
self.queue.put_nowait(record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class LogListener:
def __init__(self, logger):
self.logger = logger
self.queue = get_queue(logger.name)
def start(self):
self.thread = threading.Thread(target=self._receive)
self.thread.daemon = True
self.thread.start()
def _receive(self):
while True:
try:
record = self.queue.get()
# None is sent as a sentinel to tell the listener to quit
if record is None:
break
self.logger.handle(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except:
traceback.print_exc(file=sys.stderr)
def end(self):
self.queue.put_nowait(None)
self.thread.join()
for handler in self.logger.handlers:
handler.close()
def get_queue(logger_name):
return LOGGER_QUEUE_MAP[logger_name]
class TimedRotatingFileHandlerWithCurrentTimestamp(
logging.handlers.TimedRotatingFileHandler):
def __init__(self, filename, when='h', interval=1, backupCount=0,
encoding=None, delay=False, utc=False):
logging.handlers.TimedRotatingFileHandler.__init__(
self, filename, when, interval, backupCount, encoding, delay, utc)
self.suffix = "%Y-%m-%d_%H-%M-%S"
self.extMatch = re.compile(r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$")
def doRollover(self):
"""Exactly the same as TimedRotatingFileHandler's doRollover() except
that the current date/time stamp is appended to the filename rather
than the start date/time stamp, when the rollover happens."""
currentTime = int(time.time())
if self.stream:
self.stream.close()
self.stream = None
if self.utc:
timeTuple = time.gmtime(currentTime)
else:
timeTuple = time.localtime(currentTime)
dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
if os.path.exists(dfn):
os.remove(dfn)
os.rename(self.baseFilename, dfn)
if self.backupCount > 0:
# find the oldest log file and delete it
#s = glob.glob(self.baseFilename + ".20*")
#if len(s) > self.backupCount:
# s.sort()
# os.remove(s[0])
for s in self.getFilesToDelete():
os.remove(s)
#print "%s -> %s" % (self.baseFilename, dfn)
self.mode = 'w'
self.stream = self._open()
newRolloverAt = self.computeRollover(currentTime)
while newRolloverAt <= currentTime:
newRolloverAt = newRolloverAt + self.interval
#If DST changes and midnight or weekly rollover, adjust for this.
if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
dstNow = time.localtime(currentTime)[-1]
dstAtRollover = time.localtime(newRolloverAt)[-1]
if dstNow != dstAtRollover:
if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
newRolloverAt = newRolloverAt - 3600
else: # DST bows out before next rollover, so we need to add an hour
newRolloverAt = newRolloverAt + 3600
self.rolloverAt = newRolloverAt
| 34.2875 | 97 | 0.655244 | 1,000 | 8,229 | 5.226 | 0.264 | 0.078454 | 0.055109 | 0.020092 | 0.223881 | 0.183697 | 0.118638 | 0.107922 | 0.106008 | 0.083429 | 0 | 0.004929 | 0.260299 | 8,229 | 239 | 98 | 34.430962 | 0.853622 | 0.198201 | 0 | 0.218182 | 0 | 0.006061 | 0.046295 | 0.009473 | 0 | 0 | 0 | 0 | 0 | 1 | 0.127273 | false | 0 | 0.054545 | 0.012121 | 0.260606 | 0.006061 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
320d4ac1c04314ad47e8ce414ec8f5ce00f6d0a4 | 1,087 | py | Python | Chapter07/plotly_flask_demo1/template2.py | allen-zqh/plotly | bcaf0930901e77db07245b63bff049eb75893416 | [
"MIT"
] | null | null | null | Chapter07/plotly_flask_demo1/template2.py | allen-zqh/plotly | bcaf0930901e77db07245b63bff049eb75893416 | [
"MIT"
] | null | null | null | Chapter07/plotly_flask_demo1/template2.py | allen-zqh/plotly | bcaf0930901e77db07245b63bff049eb75893416 | [
"MIT"
] | 1 | 2021-02-04T06:56:18.000Z | 2021-02-04T06:56:18.000Z | from flask import render_template
from flask import Flask
import plotly as py
import plotly.graph_objs as go
app = Flask(__name__)
@app.route('/')
def index():
pyplt = py.offline.plot
trace0 = go.Bar(
x=['A类户型', 'B类户型', 'C类户型'],
y=[20, 14, 23],
text=['27%市场占有率', '24%市场占有率', '19%市场占有率'],
marker=dict(
color='rgb(158,202,225)',
line=dict(
color='rgb(8,48,107)',
width=1.5,
)
),
opacity=0.6
)
data = [trace0]
layout = go.Layout(
title='2017年1月不同户型房屋单价情况',
)
fig = go.Figure(data=data, layout=layout)
div = pyplt(fig, output_type='div', include_plotlyjs=False, auto_open=False, show_link=False)
context = {}
context['graph'] = div
import sys
print('参数div占用内存大小为 %d bytes'%sys.getsizeof(div))
with open('div1.txt', 'w') as file:
file.write(div)
return render_template("index2.html",
title = 'Home',
context = context)
if __name__ == '__main__':
app.run()
| 22.645833 | 97 | 0.547378 | 134 | 1,087 | 4.298507 | 0.649254 | 0.057292 | 0.052083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05291 | 0.304508 | 1,087 | 47 | 98 | 23.12766 | 0.708995 | 0 | 0 | 0 | 0 | 0 | 0.132475 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.131579 | 0 | 0.184211 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
320f930d9ba74b1df7112b848be26ad7f7ad719b | 4,101 | py | Python | scraper/log.py | micort35/prestiology | 729d4558005e1c6d07ac5044ff10cbb1b7a522c6 | [
"MIT"
] | null | null | null | scraper/log.py | micort35/prestiology | 729d4558005e1c6d07ac5044ff10cbb1b7a522c6 | [
"MIT"
] | null | null | null | scraper/log.py | micort35/prestiology | 729d4558005e1c6d07ac5044ff10cbb1b7a522c6 | [
"MIT"
] | null | null | null | import statistics
from datetime import date
import psycopg2
from psycopg2 import sql
class Log:
def __init__(self, score, gameday):
#gather player data
self.name = score.get('name')
self.team = (score.get('team')).name
self.date = gameday
self.mins = round(((score.get('seconds_played'))/60), 2)
self.fgm = score.get('made_field_goals')
self.fga = score.get('attempted_field_goals')
if self.fga is 0:
self.fg = None
else:
self.fg = round((self.fgm/self.fga), 4)
self.ftm = score.get('made_free_throws')
self.fta = score.get('attempted_free_throws')
if self.fta is 0:
self.ft = None
else:
self.ft = round((self.ftm/self.fta), 4)
self.tpm = score.get('made_three_point_field_goals')
self.pts = ((self.fgm - self.tpm)*2) + (self.tpm*3) + (self.ftm*1)
self.reb = score.get('offensive_rebounds') + score.get('defensive_rebounds')
self.ast = score.get('assists')
self.stl = score.get('steals')
self.blk = score.get('blocks')
self.tov = score.get('turnovers')
def exists(self, cur):
#check if return is empty for given player
SQL = 'SELECT * FROM league_roster WHERE name = %s;'
cur.execute(SQL, (self.name,))
ans = cur.fetchone()
if ans is None:
return 0
else:
return 1
def get_pid(self, cur):
query = 'SELECT player_id FROM league_roster WHERE name = %s;'
cur.execute(query, (self.name,))
player_id = cur.fetchone()
return player_id[0]
def update_season_measures(self, p_id, cur):
#update games played
ct_query = 'SELECT COUNT(player_id) FROM game_logs WHERE player_id = %s'
cur.execute(ct_query, (p_id,))
res = (cur.fetchone())[0]
update_gp = 'UPDATE league_roster SET gp = %s WHERE player_id = %s'
cur.execute(update_gp, (res, p_id))
#update avgs and std devs
avg_vars = ('mins', 'fg', 'fga', 'ft', 'fta', 'tpm', 'pts', 'reb', 'ast', 'stl', 'blk', 'tov')
sd_vars = [var + '_sd' for var in avg_vars]
for avg, sd in zip(avg_vars, sd_vars):
#avg
avg_query = "SELECT AVG({}) FROM game_logs WHERE player_id = '{}'".format(avg, p_id)
cur.execute(avg_query)
res = (cur.fetchone())[0]
if res is not None:
res = round(res, 4)
update_avg = "UPDATE league_roster SET {} = %s WHERE player_id = '{}'".format(avg, p_id)
cur.execute(update_avg, (res,))
#stddev
sd_query = "SELECT STDDEV({}) FROM game_logs WHERE player_id = '{}'".format(avg, p_id)
cur.execute(sd_query)
res = (cur.fetchone())[0]
if res is not None:
res = round(res, 4)
update_sd = "UPDATE league_roster SET {} = %s WHERE player_id = '{}'".format(sd, p_id)
cur.execute(update_sd, (res,))
def ins_log(self, p_id, cur):
#Add game to logs
ins = 'INSERT INTO game_logs(player_id, name, date, mins, fgm, fga, ftm, fta, tpm, pts, reb, ast, stl, blk, tov)\
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'
ins_args = (p_id, self.name, self.date, self.mins, self.fgm, self.fga, self.ftm, self.fta, self.tpm,
self.pts, self.reb, self.ast, self.stl, self.blk, self.tov)
cur.execute(ins, ins_args)
#INSERT statement breaks with None values, use update for fields where possible
update = 'UPDATE game_logs SET fg = %s, ft = %s WHERE date = %s AND name = %s'
cur.execute(update, (self.fg, self.ft, self.date, self.name))
Log.update_season_measures(self, p_id, cur)
def add_player(self, cur):
#add player to roster
ins = 'INSERT INTO league_roster(name, team) VALUES(%s, %s);'
cur.execute(ins, (self.name, self.team))
#add game to game log
p_id = self.get_pid(cur)
Log.ins_log(self, p_id, cur) | 41.424242 | 121 | 0.568642 | 591 | 4,101 | 3.807107 | 0.199662 | 0.013333 | 0.017333 | 0.021333 | 0.270222 | 0.259111 | 0.218222 | 0.191556 | 0.138222 | 0.094667 | 0 | 0.006866 | 0.289685 | 4,101 | 99 | 122 | 41.424242 | 0.765534 | 0.059742 | 0 | 0.126582 | 0 | 0.037975 | 0.200416 | 0.018196 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075949 | false | 0 | 0.050633 | 0 | 0.177215 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
32117082982e1b0cb2bf7bcfb0c7a96c4bf33ab1 | 14,141 | py | Python | main.py | VuNguyenQS/My-capstone-project | 219bdd50aa046bbecc30496cfdba39ea8799537c | [
"MIT"
] | null | null | null | main.py | VuNguyenQS/My-capstone-project | 219bdd50aa046bbecc30496cfdba39ea8799537c | [
"MIT"
] | null | null | null | main.py | VuNguyenQS/My-capstone-project | 219bdd50aa046bbecc30496cfdba39ea8799537c | [
"MIT"
] | null | null | null | import os
import time
import csv
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim
cudnn.benchmark = True
from models import ResNet
from metrics import AverageMeter, Result
from dataloaders.dense_to_sparse import UniformSampling, SimulatedStereo
import criteria
import utils
# This change in order to get lists
def load_split():
current_directoty = os.getcwd()
train_lists_path = current_directoty + '/trainIdxs.txt'
test_lists_path = current_directoty + '/testIdxs.txt'
train_f = open(train_lists_path)
test_f = open(test_lists_path)
train_lists = []
test_lists = []
train_lists_line = train_f.readline()
while train_lists_line:
train_lists.append(int(train_lists_line) - 1)
train_lists_line = train_f.readline()
train_f.close()
test_lists_line = test_f.readline()
while test_lists_line:
test_lists.append(int(test_lists_line) - 1)
test_lists_line = test_f.readline()
test_f.close()
val_start_idx = int(len(train_lists) * 0.8)
val_lists = train_lists[val_start_idx:-1]
train_lists = train_lists[0:val_start_idx]
return train_lists, val_lists, test_lists
# This change in order to get lists
train_lists, val_lists, test_lists = load_split()
args = utils.parse_command()
print(args)
fieldnames = ['mse', 'rmse', 'absrel', 'lg10', 'mae',
'delta1', 'delta2', 'delta3',
'data_time', 'gpu_time']
best_result = Result()
best_result.set_to_worst()
def create_data_loaders(args):
# Data loading code
print("=> creating data loaders ...")
traindir = os.path.join('data', args.data, 'train')
valdir = os.path.join('data', args.data, 'val')
train_loader = None
val_loader = None
# sparsifier is a class for generating random sparse depth input from the ground truth
sparsifier = None
max_depth = args.max_depth if args.max_depth >= 0.0 else np.inf
if args.sparsifier == UniformSampling.name:
sparsifier = UniformSampling(num_samples=args.num_samples, max_depth=max_depth)
elif args.sparsifier == SimulatedStereo.name:
sparsifier = SimulatedStereo(num_samples=args.num_samples, max_depth=max_depth)
'''
if args.data == 'nyudepthv2':
from dataloaders.nyu_dataloader import NYUDataset
if not args.evaluate:
train_dataset = NYUDataset(traindir, type='train',
modality=args.modality, sparsifier=sparsifier)
val_dataset = NYUDataset(valdir, type='val',
modality=args.modality, sparsifier=sparsifier)
'''
if args.data == 'nyudepthv2':
from dataloaders.nyu_dataloader import NYUDataset
if not args.evaluate:
train_dataset = NYUDataset('nyu_depth_v2_labeled.mat',type = 'train',
modality=args.modality, sparsifier=sparsifier, lists = train_lists)
val_dataset = NYUDataset('nyu_depth_v2_labeled.mat', type = 'val', modality = args.modality, sparsifier = sparsifier, lists = val_lists)
elif args.data == 'kitti':
from dataloaders.kitti_dataloader import KITTIDataset
if not args.evaluate:
train_dataset = KITTIDataset(traindir, type='train',
modality=args.modality, sparsifier=sparsifier)
val_dataset = KITTIDataset(valdir, type='val',
modality=args.modality, sparsifier=sparsifier)
else:
raise RuntimeError('Dataset not found.' +
'The dataset must be either of nyudepthv2 or kitti.')
# set batch size to be 1 for validation
val_loader = torch.utils.data.DataLoader(val_dataset,
batch_size=1, shuffle=False, num_workers=args.workers, pin_memory=True)
# put construction of train loader here, for those who are interested in testing only
if not args.evaluate:
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, sampler=None,
worker_init_fn=lambda work_id:np.random.seed(work_id))
# worker_init_fn ensures different sampling patterns for each data loading thread
print("=> data loaders created.")
return train_loader, val_loader
def main():
global args, best_result, output_directory, train_csv, test_csv
# evaluation mode
start_epoch = 0
if args.evaluate:
assert os.path.isfile(args.evaluate), \
"=> no best model found at '{}'".format(args.evaluate)
print("=> loading best model '{}'".format(args.evaluate))
checkpoint = torch.load(args.evaluate)
output_directory = os.path.dirname(args.evaluate)
args = checkpoint['args']
start_epoch = checkpoint['epoch'] + 1
best_result = checkpoint['best_result']
model = checkpoint['model']
print("=> loaded best model (epoch {})".format(checkpoint['epoch']))
_, val_loader = create_data_loaders(args)
args.evaluate = True
validate(val_loader, model, checkpoint['epoch'], write_to_file=False)
return
# optionally resume from a checkpoint
elif args.resume:
assert os.path.isfile(args.resume), \
"=> no checkpoint found at '{}'".format(args.resume)
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args = checkpoint['args']
start_epoch = checkpoint['epoch'] + 1
best_result = checkpoint['best_result']
model = checkpoint['model']
optimizer = checkpoint['optimizer']
output_directory = os.path.dirname(os.path.abspath(args.resume))
print("=> loaded checkpoint (epoch {})".format(checkpoint['epoch']))
train_loader, val_loader = create_data_loaders(args)
args.resume = True
# create new model
else:
train_loader, val_loader = create_data_loaders(args)
print("=> creating Model ({}-{}) ...".format(args.arch, args.decoder))
in_channels = len(args.modality)
if args.arch == 'resnet50':
model = ResNet(layers=50, decoder=args.decoder, output_size=train_loader.dataset.output_size,
in_channels=in_channels, pretrained=args.pretrained)
elif args.arch == 'resnet18':
model = ResNet(layers=18, decoder=args.decoder, output_size=train_loader.dataset.output_size,
in_channels=in_channels, pretrained=args.pretrained)
print("=> model created.")
optimizer = torch.optim.SGD(model.parameters(), args.lr, \
momentum=args.momentum, weight_decay=args.weight_decay)
# model = torch.nn.DataParallel(model).cuda() # for multi-gpu training
model = model.cuda()
# define loss function (criterion) and optimizer
if args.criterion == 'l2':
criterion = criteria.MaskedMSELoss().cuda()
elif args.criterion == 'l1':
criterion = criteria.MaskedL1Loss().cuda()
# create results folder, if not already exists
output_directory = utils.get_output_directory(args)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
train_csv = os.path.join(output_directory, 'train.csv')
test_csv = os.path.join(output_directory, 'test.csv')
best_txt = os.path.join(output_directory, 'best.txt')
# create new csv files with only header
if not args.resume:
with open(train_csv, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
with open(test_csv, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for epoch in range(start_epoch, args.epochs):
utils.adjust_learning_rate(optimizer, epoch, args.lr)
train(train_loader, model, criterion, optimizer, epoch) # train for one epoch
result, img_merge = validate(val_loader, model, epoch) # evaluate on validation set
# remember best rmse and save checkpoint
is_best = result.rmse < best_result.rmse
if is_best:
best_result = result
with open(best_txt, 'w') as txtfile:
txtfile.write("epoch={}\nmse={:.3f}\nrmse={:.3f}\nabsrel={:.3f}\nlg10={:.3f}\nmae={:.3f}\ndelta1={:.3f}\nt_gpu={:.4f}\n".
format(epoch, result.mse, result.rmse, result.absrel, result.lg10, result.mae, result.delta1, result.gpu_time))
if img_merge is not None:
img_filename = output_directory + '/comparison_best.png'
utils.save_image(img_merge, img_filename)
utils.save_checkpoint({
'args': args,
'epoch': epoch,
'arch': args.arch,
'model': model,
'best_result': best_result,
'optimizer' : optimizer,
}, is_best, epoch, output_directory)
def train(train_loader, model, criterion, optimizer, epoch):
average_meter = AverageMeter()
model.train() # switch to train mode
end = time.time()
for i, (input, target) in enumerate(train_loader):
input, target = input.cuda(), target.cuda()
torch.cuda.synchronize()
data_time = time.time() - end
# compute pred
end = time.time()
pred = model(input)
loss = criterion(pred, target)
optimizer.zero_grad()
loss.backward() # compute gradient and do SGD step
optimizer.step()
torch.cuda.synchronize()
gpu_time = time.time() - end
# measure accuracy and record loss
result = Result()
result.evaluate(pred.data, target.data)
average_meter.update(result, gpu_time, data_time, input.size(0))
end = time.time()
if (i + 1) % args.print_freq == 0:
print('=> output: {}'.format(output_directory))
print('Train Epoch: {0} [{1}/{2}]\t'
't_Data={data_time:.3f}({average.data_time:.3f}) '
't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
'MAE={result.mae:.2f}({average.mae:.2f}) '
'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
'REL={result.absrel:.3f}({average.absrel:.3f}) '
'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
epoch, i+1, len(train_loader), data_time=data_time,
gpu_time=gpu_time, result=result, average=average_meter.average()))
avg = average_meter.average()
with open(train_csv, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'mse': avg.mse, 'rmse': avg.rmse, 'absrel': avg.absrel, 'lg10': avg.lg10,
'mae': avg.mae, 'delta1': avg.delta1, 'delta2': avg.delta2, 'delta3': avg.delta3,
'gpu_time': avg.gpu_time, 'data_time': avg.data_time})
def validate(val_loader, model, epoch, write_to_file=True):
average_meter = AverageMeter()
model.eval() # switch to evaluate mode
end = time.time()
for i, (input, target) in enumerate(val_loader):
input, target = input.cuda(), target.cuda()
torch.cuda.synchronize()
data_time = time.time() - end
# compute output
end = time.time()
with torch.no_grad():
pred = model(input)
torch.cuda.synchronize()
gpu_time = time.time() - end
# measure accuracy and record loss
result = Result()
result.evaluate(pred.data, target.data)
average_meter.update(result, gpu_time, data_time, input.size(0))
end = time.time()
# save 8 images for visualization
skip = 50
if args.modality == 'd':
img_merge = None
else:
if args.modality == 'rgb':
rgb = input
elif args.modality == 'rgbd':
rgb = input[:,:3,:,:]
depth = input[:,3:,:,:]
if i == 0:
if args.modality == 'rgbd':
img_merge = utils.merge_into_row_with_gt(rgb, depth, target, pred)
else:
img_merge = utils.merge_into_row(rgb, target, pred)
elif (i < 8*skip) and (i % skip == 0):
if args.modality == 'rgbd':
row = utils.merge_into_row_with_gt(rgb, depth, target, pred)
else:
row = utils.merge_into_row(rgb, target, pred)
img_merge = utils.add_row(img_merge, row)
elif i == 8*skip:
filename = output_directory + '/comparison_' + str(epoch) + '.png'
utils.save_image(img_merge, filename)
if (i+1) % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
't_GPU={gpu_time:.3f}({average.gpu_time:.3f})\n\t'
'RMSE={result.rmse:.2f}({average.rmse:.2f}) '
'MAE={result.mae:.2f}({average.mae:.2f}) '
'Delta1={result.delta1:.3f}({average.delta1:.3f}) '
'REL={result.absrel:.3f}({average.absrel:.3f}) '
'Lg10={result.lg10:.3f}({average.lg10:.3f}) '.format(
i+1, len(val_loader), gpu_time=gpu_time, result=result, average=average_meter.average()))
avg = average_meter.average()
print('\n*\n'
'RMSE={average.rmse:.3f}\n'
'MAE={average.mae:.3f}\n'
'Delta1={average.delta1:.3f}\n'
'REL={average.absrel:.3f}\n'
'Lg10={average.lg10:.3f}\n'
't_GPU={time:.3f}\n'.format(
average=avg, time=avg.gpu_time))
if write_to_file:
with open(test_csv, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'mse': avg.mse, 'rmse': avg.rmse, 'absrel': avg.absrel, 'lg10': avg.lg10,
'mae': avg.mae, 'delta1': avg.delta1, 'delta2': avg.delta2, 'delta3': avg.delta3,
'data_time': avg.data_time, 'gpu_time': avg.gpu_time})
return avg, img_merge
if __name__ == '__main__':
main()
| 40.402857 | 148 | 0.617849 | 1,731 | 14,141 | 4.882149 | 0.170422 | 0.016566 | 0.0142 | 0.021299 | 0.471778 | 0.427168 | 0.378772 | 0.331913 | 0.296533 | 0.287067 | 0 | 0.013787 | 0.256276 | 14,141 | 349 | 149 | 40.518625 | 0.789769 | 0.065696 | 0 | 0.297398 | 0 | 0.003717 | 0.136413 | 0.066724 | 0 | 0 | 0 | 0 | 0.007435 | 1 | 0.018587 | false | 0 | 0.052045 | 0 | 0.085502 | 0.055762 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
32154a7bfec8aa22a3e74820b2c2af57fc5f81c6 | 647 | py | Python | reports/forms.py | kreeger/etcetera | a0e82e56ffc76cbc73aa59e23f6a77fce92fad08 | [
"BSD-3-Clause"
] | 1 | 2015-02-26T20:47:40.000Z | 2015-02-26T20:47:40.000Z | reports/forms.py | kreeger/etcetera | a0e82e56ffc76cbc73aa59e23f6a77fce92fad08 | [
"BSD-3-Clause"
] | null | null | null | reports/forms.py | kreeger/etcetera | a0e82e56ffc76cbc73aa59e23f6a77fce92fad08 | [
"BSD-3-Clause"
] | null | null | null | import urllib
from django import forms
from etcetera.reports import models as reports
from etcetera.extras.dateutil import formfield_callback, DateTimeField
class SearchForm(forms.Form):
q = forms.CharField(max_length=50)
def get_list(self):
# The search list is automatically everything
out_list = [
'name',
]
return out_list
def as_url_args(self):
return urllib.urlencode(self.cleaned_data)
class ReportModelForm(forms.ModelForm):
formfield_callback = formfield_callback
class Meta:
model = reports.Report
exclude = ('slug','created_by',) | 24.884615 | 70 | 0.678516 | 75 | 647 | 5.706667 | 0.64 | 0.119159 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004115 | 0.248841 | 647 | 26 | 71 | 24.884615 | 0.876543 | 0.066461 | 0 | 0 | 0 | 0 | 0.029851 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0.055556 | 0.722222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
32180c83a0a4b8233e9903ee6c946175a6e07df6 | 2,873 | py | Python | cac/server/announcement.py | tobias93/cards-against-cli | 33c5a43a3b821438c94da719571655d87998384a | [
"MIT"
] | null | null | null | cac/server/announcement.py | tobias93/cards-against-cli | 33c5a43a3b821438c94da719571655d87998384a | [
"MIT"
] | null | null | null | cac/server/announcement.py | tobias93/cards-against-cli | 33c5a43a3b821438c94da719571655d87998384a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Handles the service announcement of the Cards against Cli Server.
Debugging command: (linux, requires avahi)
(A similar command should be available using bonjour on mac)
> avahi-browse --resolve "_cac._tcp"
Use the environment variable CAC_LISTEN_INTERFACES to control,
on which interface(s) the service should be announced.
Example:
export CAC_ANNOUNCE_INTERFACES=lo,wlp4s0
"""
from zeroconf import ServiceInfo, Zeroconf
import socket
import logging
import netifaces
import os
import uuid
_logger = logging.getLogger(__name__)
def start_announcing_on_if(server_name, interface, address, port):
_logger.info(
f"Starting to announce server named '{server_name}' "
f"via {interface} as {address}:{port}.")
service_uuid = uuid.uuid4()
service_type = "_cac._tcp.local."
service = ServiceInfo(service_type,
f"Cards-Against-Cli-Server-"
f"{service_uuid}.{service_type}",
socket.inet_aton(address), port,
properties=dict(name=server_name.encode("utf-8")))
zeroconf = Zeroconf(interfaces=[address])
zeroconf.register_service(service)
return zeroconf, service
def stop_announcing_on_if(zeroconf, service, iface):
_logger.info(f"Unregistering service on {iface}...")
zeroconf.unregister_service(service)
zeroconf.close()
def stop_announcing(announcers):
for zeroconf, service, iface in announcers:
stop_announcing_on_if(zeroconf, service, iface)
def start_announcing(server_name, port):
# announce on all interfaces
ifaces = get_interfaces()
result = []
for iface, addr in ifaces.items():
zeroconf, service = start_announcing_on_if(
server_name, iface, addr, port)
result.append((zeroconf, service, iface))
return result
def get_interfaces():
# get the list of interfaces
ifaces = netifaces.interfaces()
# get the address for each interface
result = dict()
for iface in ifaces:
addr = get_address_for_interface(iface)
if addr:
result[iface] = addr
if "CAC_ANNOUNCE_INTERFACES" in os.environ:
iface_whitelist = os.environ["CAC_ANNOUNCE_INTERFACES"].split(',')
result = {iface: addr
for iface, addr in result.items()
if iface in iface_whitelist and iface != ""}
return result
def get_address_for_interface(iface):
addrs = netifaces.ifaddresses(iface)
# currently, the python zeroconf implementation does only support ipv4 :-(
# however, the server still
addr_family = netifaces.AF_INET
if addr_family in addrs:
inet_addrs = addrs[netifaces.AF_INET]
for inet_addr in inet_addrs:
if "addr" in inet_addr:
return inet_addr["addr"]
return None
| 29.618557 | 78 | 0.674904 | 350 | 2,873 | 5.354286 | 0.337143 | 0.048026 | 0.029883 | 0.022412 | 0.123266 | 0.071505 | 0.040555 | 0 | 0 | 0 | 0 | 0.002737 | 0.237034 | 2,873 | 96 | 79 | 29.927083 | 0.85219 | 0.203968 | 0 | 0.034483 | 0 | 0 | 0.110867 | 0.043995 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.103448 | 0 | 0.293103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
3218c85b611430d9cfad372e384dd6e2a555de20 | 1,440 | py | Python | Python-desenvolvimento/ex059.py | MarcosMaciel-MMRS/Desenvolvimento-python | 2b2fc54788da3ca110d495b9e80a494f2b31fb09 | [
"MIT"
] | null | null | null | Python-desenvolvimento/ex059.py | MarcosMaciel-MMRS/Desenvolvimento-python | 2b2fc54788da3ca110d495b9e80a494f2b31fb09 | [
"MIT"
] | null | null | null | Python-desenvolvimento/ex059.py | MarcosMaciel-MMRS/Desenvolvimento-python | 2b2fc54788da3ca110d495b9e80a494f2b31fb09 | [
"MIT"
] | null | null | null | #o programa vai ler 2 números, em seguida mostrar um menu.
''' O programa vai pedir 2 número.
Tabela
[1]- Soma
[2]- Multiplica
[3]- Maior
[4]-Novos números
[5]- sair
'''
from time import sleep
n1 = int(input('Informe o primeiro número: '))
n2 = int(input('Informe o segundo número: '))
escolha = 0
while escolha != 5:
print('-=-'*15)
print('''
Tabela
[1]- Soma
[2]- Multiplica
[3]- Maior
[4]-Novos números
[5]- sair
''')
escolha = int(input('>>>>>>>> Qual opção: '))
if escolha == 1:
soma = n1+n2
print('O resultado da soma entre {} e {} = {}'.format( n1, n2, soma))
elif escolha == 2:
multi = n1*n2
print('O resultado da mutiplicação entre {} x {} = {}'.format( n1, n2, multi))
elif escolha == 3:
if n1 > n2:
print('O {} é maior que {}.'.format(n1,n2))
elif n1 == n2:
print('Números iguais.')
else:
print('O {} é maior que {}.'.format(n2, n1))
elif escolha == 4:
n1 = int(input('Informe o primeiro número: '))
n2 = int(input('Informe o segundo número: '))
elif escolha == 5:
print('FINALIZANDO....')
else:
print('Opção inválida. Tente novamente!')
sleep(2)
print('Fim do Programa. Volte sempre!')
| 30.638298 | 87 | 0.4875 | 171 | 1,440 | 4.105263 | 0.368421 | 0.039886 | 0.08547 | 0.091168 | 0.430199 | 0.430199 | 0.310541 | 0.310541 | 0.310541 | 0.310541 | 0 | 0.045603 | 0.360417 | 1,440 | 46 | 88 | 31.304348 | 0.716612 | 0.126389 | 0 | 0.162162 | 0 | 0 | 0.411508 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.027027 | 0 | 0.027027 | 0.27027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
321a077e7efc9d3ad00c1d1ebd91b91a3a74dcc6 | 3,320 | py | Python | mdstudio_cli/wamp_services.py | MD-Studio/lie_cli | 567c2c7f146898b804f418e052f01960fca7e0d4 | [
"Apache-2.0"
] | null | null | null | mdstudio_cli/wamp_services.py | MD-Studio/lie_cli | 567c2c7f146898b804f418e052f01960fca7e0d4 | [
"Apache-2.0"
] | 1 | 2019-12-03T10:47:11.000Z | 2019-12-03T10:47:11.000Z | mdstudio_cli/wamp_services.py | MD-Studio/MDStudio_cli | 567c2c7f146898b804f418e052f01960fca7e0d4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
file: wamp_services.py
WAMP service methods the module exposes.
"""
import os
import json
import logging
from twisted.internet import reactor
from autobahn.wamp.exception import ApplicationError
from graphit.graph_io.io_jsonschema_format import read_json_schema
from mdstudio.component.session import ComponentSession
from mdstudio.deferred.chainable import chainable
from mdstudio_cli.schema_parser import SchemaParser, write_schema_info, prepaire_config, process_results
from mdstudio_cli.schema_classes import CLIORM
lg = logging.getLogger('clilogger')
class CliWampApi(ComponentSession):
"""
CLI WAMP methods.
"""
def authorize_request(self, uri, claims):
"""
If you were allowed to call this in the first place,
I will assume you are authorized
"""
return True
def result_callback(self, result):
"""
WAMP result callback
Process the results storing all file-like output to file.
Optionally store the full results directory as a JSON file.
:param result: WAMP results
:type result: :py:dict
"""
# Store results as JSON
if self.config.extra.get('store_json', False):
result_json = os.path.join(os.getcwd(), '{0}.json'.format(self.config.extra['uri']))
json.dump(result, open(result_json, 'w'))
# Process file-like output and print remaining.
process_results(result)
# Disconnect from broker and stop reactor event loop
self.disconnect()
reactor.stop()
def error_callback(self, failure):
"""
WAMP error callback
Process a WAMP endpoint failure and write the failure message to
standard out (stdout).
:param failure: Endpoint failure message
"""
failure_message = failure
if isinstance(failure, Exception) or isinstance(failure, str):
failure_message = str(failure)
elif isinstance(failure.value, ApplicationError):
failure_message = failure.value.error_message()
else:
failure.getErrorMessage()
lg.error('Unable to process: {0}'.format(failure_message))
# Disconnect from broker and stop reactor event loop
self.disconnect()
reactor.stop()
@chainable
def on_run(self):
# Get endpoint config
config = self.config.extra
# Retrieve JSON schemas for the endpoint request and response
schemaparser = SchemaParser(self)
request = yield schemaparser.get(uri=config['uri'], request=True)
request = read_json_schema(request)
request.orm = CLIORM
# Write print friendly endpoint definition to stdout or call endpoint
if config['get_endpoint_info']:
write_schema_info(request, config['uri'])
# Disconnect from broker and stop reactor event loop
self.disconnect()
reactor.stop()
else:
endpoint_input = prepaire_config(request, config['package_config'])
# Call method and wait for results
deferred = self.call(config['uri'], endpoint_input)
deferred.addCallback(self.result_callback)
deferred.addErrback(self.error_callback)
| 29.380531 | 104 | 0.656928 | 383 | 3,320 | 5.592689 | 0.362924 | 0.039216 | 0.021008 | 0.032213 | 0.095238 | 0.095238 | 0.095238 | 0.095238 | 0.095238 | 0.095238 | 0 | 0.001226 | 0.262952 | 3,320 | 112 | 105 | 29.642857 | 0.874132 | 0.283133 | 0 | 0.166667 | 0 | 0 | 0.041986 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.208333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
321a849775e824655942ecde437bbf281050052a | 4,692 | py | Python | code_verification.py | KeithYue/weibo-keywords-crawler | 6d90dea6c1619e06b1846f849e3c3c89fad26dd8 | [
"MIT"
] | 16 | 2015-03-08T20:32:47.000Z | 2021-12-26T09:11:34.000Z | code_verification.py | KeithYue/weibo-keywords-crawler | 6d90dea6c1619e06b1846f849e3c3c89fad26dd8 | [
"MIT"
] | 1 | 2015-04-10T03:15:23.000Z | 2016-04-10T12:16:58.000Z | code_verification.py | KeithYue/weibo-keywords-crawler | 6d90dea6c1619e06b1846f849e3c3c89fad26dd8 | [
"MIT"
] | 13 | 2015-10-15T07:37:35.000Z | 2021-12-26T09:14:01.000Z | # coding=utf-8
import base64
import time
import logging
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.common.exceptions import NoSuchElementException
from PIL import Image
from io import StringIO, BytesIO
from synchronize_util import synchronized, CONSOLE_LOCK
# This module is for code verification
# Every time there would be only one for users
get_image_data = '''
function getBase64Image(img) {
// Create an empty canvas element
var canvas = document.createElement("canvas");
canvas.width = img.width;
canvas.height = img.height;
// Copy the image contents to the canvas
var ctx = canvas.getContext("2d");
ctx.drawImage(img, 0, 0);
// Get the data-URL formatted image
// Firefox supports PNG and JPEG. You could check img.src to
// guess the original format, but be aware the using "image/jpg"
// will re-encode the image.
var dataURL = canvas.toDataURL("image/png");
return dataURL.replace(/^data:image\/(png|jpg);base64,/, "");
// return dataURL;
}
code_img = document.querySelector('img[node-type="yzm_img"]');
// code_img = document.querySelector('img');
data_URL = getBase64Image(code_img);
return data_URL;
'''
def test():
driver = webdriver.PhantomJS()
driver.get('http://s.weibo.com/ajax/pincode/pin?type=sass&ts=1405404856')
verify_user(driver)
return
def get_img(base64_str):
'''
convert the base64 string to png image --> PIL.Image
'''
base64_bytes = base64.b64decode(base64_str)
image_bytes_io = BytesIO(base64_bytes)
image = Image.open(image_bytes_io)
return image
def get_code(img):
'''
given an image, return its code, each time only one image could be served --> the code string
'''
img.show()
verification_code = input('Please input the verificaiont code: ')
return verification_code
def verify_user_for_search(driver):
'''
when the driver shows the verification code, load the code in the browser and input the code-->the code
driver: the current driver which comes into the verification code
'''
while True:
feed = driver.find_elements_by_class_name('feed_list')
if len(feed) == 0:
# there is no feed in this page, meaning you need to input the code
code_png = get_img(driver.execute_script(get_image_data))
verification_code = get_code(code_png)# this action needs to be primitive
code_input = driver.find_element_by_xpath('//input[@node-type="yzm_input"]')
code_input.click()
code_input.send_keys(verification_code.strip())
submit_button = driver.find_element_by_xpath('//a[@node-type="yzm_submit"]')
submit_button.click()
time.sleep(5)
driver.get_screenshot_as_file('./screenshot/after_verfiy.png')
else:
break
logging.info('verification completed!')
return
def verify_user_for_login(driver):
'''
因为使用循环登陆,所以此验证码只保证一次,与搜索验证码的情况不同
'''
if not driver.find_element_by_xpath('//img[@node-type="verifycode_image"]'):
logging.info('There is no verfication code here, continue')
return
else:
try:
# get png, the image instance of PIL
png_element = driver.find_element_by_xpath('//img[@node-type="verifycode_image"]')
location = png_element.location
size = png_element.size
logging.info('vrcode: location--{}, size--{}'.format(location, size))
im = get_img(driver.get_screenshot_as_base64())
left = location['x']
top = location['y']
right = location['x'] + size['width']
bottom = location['y'] + size['height']
im = im.crop((left, top, right, bottom)) # defines crop points
verification_code = get_code(im)
code_input = driver.find_element_by_xpath('//input[@name="verifycode"]')
code_input.click()
code_input.send_keys(verification_code.strip())
except Exception as e:
driver.get_screenshot_as_file('./screenshot/login_failed.png')
logging.info('error, filed savedd to ./screenshot/login_failed.png')
return
@synchronized(CONSOLE_LOCK) # this method is primitive
def verify_user(driver, v_type):
'''
v_type: string, 'search', 'login'
'''
if v_type == 'search':
verify_user_for_search(driver)
elif v_type == 'login':
verify_user_for_login(driver)
else:
logging.info('Unknown verification type')
return
if __name__ == '__main__':
test()
| 31.702703 | 107 | 0.660912 | 600 | 4,692 | 4.981667 | 0.346667 | 0.042824 | 0.028438 | 0.031783 | 0.178655 | 0.117096 | 0.093677 | 0.093677 | 0.06825 | 0.06825 | 0 | 0.011065 | 0.22954 | 4,692 | 147 | 108 | 31.918367 | 0.815768 | 0.140665 | 0 | 0.123711 | 0 | 0.010309 | 0.339164 | 0.123447 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061856 | false | 0 | 0.092784 | 0 | 0.247423 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c4d369413fd643cd1e49d231d62a011fe2c227d | 7,032 | py | Python | blender/addons/io_scene_fmod/__init__.py | tkgamegroup/flame | f1628100cc66e13f84ea3047ea33af019caeb01b | [
"MIT"
] | 25 | 2018-02-28T05:59:50.000Z | 2022-03-18T03:11:52.000Z | blender/addons/io_scene_fmod/__init__.py | tkgamegroup/flame | e5884c7a773c351f3dadadbdb908cfe00f1ce586 | [
"MIT"
] | null | null | null | blender/addons/io_scene_fmod/__init__.py | tkgamegroup/flame | e5884c7a773c351f3dadadbdb908cfe00f1ce586 | [
"MIT"
] | 5 | 2018-05-17T04:16:30.000Z | 2021-12-22T04:02:02.000Z | bl_info = {
"name": "flame model format",
"blender": (2, 81, 6),
"category": "Import-Export",
}
import bpy
from bpy.props import (
BoolProperty,
FloatProperty,
StringProperty,
EnumProperty,
)
from bpy_extras.io_utils import (
ImportHelper,
ExportHelper,
path_reference_mode,
axis_conversion,
)
from bpy_extras import io_utils, node_shader_utils
import ntpath
import xml.etree.ElementTree as ET
class ImportFmod(bpy.types.Operator, ImportHelper):
bl_idname = "import_scene.fmod"
bl_label = "Import Fmod"
bl_options = {'PRESET', 'UNDO'}
filename_ext = ".fmod"
def execute(self, context):
pass
def draw(self, context):
pass
def v3_str(v):
return str(round(v[0], 4)) + "," + str(round(v[1], 4)) + "," + str(round(v[2], 4))
def name_compat(name):
if name is None:
return 'None'
else:
return name.replace(' ', '_')
def export_sub(n_meshes, data_file, mat_name, sub_vertics, sub_uvs, sub_normals, sub_indices):
from array import array
n_mesh = ET.SubElement(n_meshes, "meshe", material=mat_name)
if sub_vertics:
ET.SubElement(n_mesh, "positions", offset=str(data_file.tell()), size=str(4 * len(sub_vertics)))
float_array = array('f', sub_vertics)
float_array.tofile(data_file)
sub_vertics.clear()
if sub_uvs:
ET.SubElement(n_mesh, "uvs", offset=str(data_file.tell()), size=str(4 * len(sub_uvs)))
float_array = array('f', sub_uvs)
float_array.tofile(data_file)
sub_uvs.clear()
if sub_normals:
ET.SubElement(n_mesh, "normals", offset=str(data_file.tell()), size=str(4 * len(sub_normals)))
float_array = array('f', sub_normals)
float_array.tofile(data_file)
sub_normals.clear()
if sub_indices:
ET.SubElement(n_mesh, "indices", offset=str(data_file.tell()), size=str(4 * len(sub_indicess)))
uint_array = array('L', sub_indices)
uint_array.tofile(data_file)
sub_indices.clear()
class ExportFmod(bpy.types.Operator, ExportHelper):
bl_idname = "export_scene.fmod"
bl_label = "Export Fmod"
bl_options = {'PRESET'}
filename_ext = ".fmod"
def execute(self, context):
scene = context.scene
if bpy.ops.object.mode_set.poll():
bpy.ops.object.mode_set(mode='OBJECT')
if len(context.selected_objects) < 1 :
return {"CANCELLED"}
ob = context.selected_objects[0].original
oms = []
arm = None
if ob.type == "MESH":
oms.append(ob)
elif ob.type == "ARMATURE":
arm = ob.data
for o in ob.children:
oms.append(o)
else:
return
filename = self.filepath
ppath = ntpath.dirname(filename)
model_name = ntpath.splitext(ntpath.split(filename)[1])[0]
n_model = ET.Element("model")
n_meshes = ET.SubElement(n_model, "meshes")
model_data_file = open(filename + ".dat", "wb")
for ob in oms:
me = ob.to_mesh()
if len(me.uv_layers) == 0:
return
uvs = me.uv_layers.active.data[:]
if len(uvs) == 0:
ob.to_mesh_clear()
return
verts = me.vertices[:]
if len(verts) == 0:
ob.to_mesh_clear()
return
faces = me.polygons[:]
if len(faces) == 0:
ob.to_mesh_clear()
return
faces.sort(key=lambda a: (a.material_index, a.use_smooth))
me.calc_normals_split()
loops = me.loops
materials = me.materials[:]
material_names = []
for i, m in enumerate(materials):
mat_wrap = node_shader_utils.PrincipledBSDFWrapper(m)
n_material = ET.Element("material", color=v3_str(mat_wrap.base_color), metallic=str(mat_wrap.metallic), roughness=str(mat_wrap.roughness))
color_tex_wrap = getattr(mat_wrap, "base_color_texture", None)
if color_tex_wrap:
image = color_tex_wrap.image
if image:
image.filepath
material_name = m.name
if not material_name:
material_name = str(i)
material_name = (model_name + "_" + material_name + ".fmat").replace(' ', '_')
material_names.append(material_name)
doc = ET.ElementTree(n_material)
doc.write(ntpath.join(ppath, material_name))
group_names = [g.name for g in ob.vertex_groups]
if arm:
for b in arm.edit_bones:
if b.name not in group_names:
continue
curr_mat_idx = faces[0].material_index
sub_vertics = []
sub_uvs = []
sub_normals = []
sub_indices = []
vertex_dict = {}
vert_cnt = 0
for f in faces:
if curr_mat_idx != f.material_index:
export_sub()
vert_cnt = 0
for l_idx in f.loop_indices:
vi = loops[l_idx].vertex_index
uv = uvs[l_idx].uv
no = loops[l_idx].normal
key = vi, round(uv.x, 4), round(uv.y, 4), round(no.x, 4), round(no.y, 4), round(no.z, 4)
idx = vertex_dict.get(key)
if idx is None:
idx = vert_cnt
v = verts[vi].co
sub_vertics.append([v.x, v.y, v.z])
sub_uvs.append([uv.x, uv.y])
sub_normals.append([no.x, no.y, no.z])
vertex_dict[key] = idx
vert_cnt += 1
sub_indices.append(idx)
export_sub()
ob.to_mesh_clear()
model_data_file.close()
doc = ET.ElementTree(n_model)
doc.write(filename)
return {"FINISHED"}
def draw(self, context):
pass
def menu_func_import(self, context):
self.layout.operator(ImportFmod.bl_idname, text="flame model (.fmod)")
def menu_func_export(self, context):
self.layout.operator(ExportFmod.bl_idname, text="flame model (.fmod)")
def register():
bpy.utils.register_class(ImportFmod)
bpy.utils.register_class(ExportFmod)
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
bpy.types.TOPBAR_MT_file_export.append(menu_func_export)
def unregister():
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)
bpy.utils.unregister_class(ImportFmod)
bpy.utils.unregister_class(ExportFmod)
if __name__ == "__main__":
register()
| 30.977974 | 154 | 0.55347 | 856 | 7,032 | 4.313084 | 0.232477 | 0.023835 | 0.021127 | 0.018418 | 0.230228 | 0.182828 | 0.127844 | 0.07909 | 0.059588 | 0.03792 | 0 | 0.007063 | 0.335609 | 7,032 | 226 | 155 | 31.115044 | 0.783176 | 0 | 0 | 0.150838 | 0 | 0 | 0.043658 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.061453 | false | 0.01676 | 0.100559 | 0.005587 | 0.273743 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c4f3ff0fca74925cdc32cdab044c3f0a4860df6 | 1,616 | py | Python | tests/dataverk/connectors/storage/test_nais_s3_storage_connector.py | navikt/dataverk | 7dd803236433048686dd7a58358bc1c09565b14b | [
"MIT"
] | 3 | 2019-09-29T20:48:46.000Z | 2021-03-31T10:16:07.000Z | tests/dataverk/connectors/storage/test_nais_s3_storage_connector.py | navikt/dataverk | 7dd803236433048686dd7a58358bc1c09565b14b | [
"MIT"
] | 148 | 2019-02-08T12:30:58.000Z | 2021-03-11T15:31:55.000Z | tests/dataverk/connectors/storage/test_nais_s3_storage_connector.py | navikt/dataverk | 7dd803236433048686dd7a58358bc1c09565b14b | [
"MIT"
] | 1 | 2020-11-18T14:10:05.000Z | 2020-11-18T14:10:05.000Z | import unittest
import requests
from unittest import mock
from dataverk.connectors import NaisS3Connector
from tests.dataverk.connectors.storage.test_resources.mock_nais_s3_api import mock_requests_put, mock_requests_get
from tests.dataverk.connectors.storage.test_resources.nais_s3_storage_common import NAIS_S3_ENDPOINT, NAIS_S3_BLOB_NAME, \
NAIS_S3_RESOURCE_FMT, NAIS_S3_BUCKET_NAME, NAIS_S3_RESOURCE_CONTENT
class TestNaisS3Connector(unittest.TestCase):
def test_class_instantiation(self):
s3_conn = NaisS3Connector(NAIS_S3_BUCKET_NAME, NAIS_S3_ENDPOINT)
self.assertIsInstance(s3_conn, NaisS3Connector)
@mock.patch("requests.put", side_effect=mock_requests_put)
def test_write_valid(self, mock_put):
s3_conn = NaisS3Connector(NAIS_S3_BUCKET_NAME, NAIS_S3_ENDPOINT)
s3_conn.write(data=NAIS_S3_RESOURCE_CONTENT, destination_blob_name=NAIS_S3_BLOB_NAME, fmt=NAIS_S3_RESOURCE_FMT)
@mock.patch("requests.get", side_effect=mock_requests_get)
def test_read_valid(self, mock_get):
s3_conn = NaisS3Connector(NAIS_S3_BUCKET_NAME, NAIS_S3_ENDPOINT)
resource = s3_conn.read(blob_name=f"{NAIS_S3_BLOB_NAME}.{NAIS_S3_RESOURCE_FMT}")
self.assertEqual(resource, NAIS_S3_RESOURCE_CONTENT)
@mock.patch("requests.get", side_effect=mock_requests_get)
def test_read_invalid_resource_not_found(self, mock_get):
s3_conn = NaisS3Connector(NAIS_S3_BUCKET_NAME, NAIS_S3_ENDPOINT)
with self.assertRaises(requests.exceptions.HTTPError):
resource = s3_conn.read(blob_name=f"resource/not-found.{NAIS_S3_RESOURCE_FMT}")
| 48.969697 | 122 | 0.799505 | 229 | 1,616 | 5.196507 | 0.218341 | 0.110924 | 0.067227 | 0.067227 | 0.478992 | 0.478992 | 0.460504 | 0.336134 | 0.284034 | 0.284034 | 0 | 0.026112 | 0.123144 | 1,616 | 32 | 123 | 50.5 | 0.813691 | 0 | 0 | 0.24 | 0 | 0 | 0.073639 | 0.051361 | 0 | 0 | 0 | 0 | 0.12 | 1 | 0.16 | false | 0 | 0.24 | 0 | 0.44 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c50c15b31a06a2fc340734629a90deb26627e13 | 275 | py | Python | onlineshopping/cart/templatetags/cart_tag.py | SarahMohamedAbdAlkader/DjangoEcommerceProject | edfe3071e5ee301702ff8e55a513efbb8feadab8 | [
"MIT"
] | 1 | 2021-01-27T03:20:45.000Z | 2021-01-27T03:20:45.000Z | onlineshopping/cart/templatetags/cart_tag.py | SarahMohamedAbdAlkader/DjangoEcommerceProject | edfe3071e5ee301702ff8e55a513efbb8feadab8 | [
"MIT"
] | null | null | null | onlineshopping/cart/templatetags/cart_tag.py | SarahMohamedAbdAlkader/DjangoEcommerceProject | edfe3071e5ee301702ff8e55a513efbb8feadab8 | [
"MIT"
] | 1 | 2020-03-24T21:28:31.000Z | 2020-03-24T21:28:31.000Z | from django import template
from cart.models import Order
register = template.Library()
@register.filter
def cart_total(user):
order = Order.objects.filter(user=user, ordered=False)
if order.exists():
return order[0].orderitems.count()
else:
return 0 | 21.153846 | 58 | 0.716364 | 37 | 275 | 5.297297 | 0.621622 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00885 | 0.178182 | 275 | 13 | 59 | 21.153846 | 0.858407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c51c0397e0805fb91d95ef1e2d33d5b04ad1808 | 1,466 | py | Python | scripts/tomidi.py | callistachang/CycleGAN-Music-Transfer | 928e87b4bebc4da1dcf7c43936d2c10fe76170f1 | [
"MIT"
] | null | null | null | scripts/tomidi.py | callistachang/CycleGAN-Music-Transfer | 928e87b4bebc4da1dcf7c43936d2c10fe76170f1 | [
"MIT"
] | 1 | 2021-07-07T13:36:18.000Z | 2021-07-07T13:36:18.000Z | scripts/tomidi.py | callistachang/CycleGAN-Music-Transfer | 928e87b4bebc4da1dcf7c43936d2c10fe76170f1 | [
"MIT"
] | null | null | null | import numpy as np
from working import writemidi
def load_npy_data(npy_data):
npy_A = np.load(npy_data[0]) * 1.0 # 64 * 84 * 1
npy_B = np.load(npy_data[1]) * 1.0 # 64 * 84 * 1
npy_AB = np.concatenate(
(
npy_A.reshape(npy_A.shape[0], npy_A.shape[1], 1),
npy_B.reshape(npy_B.shape[0], npy_B.shape[1], 1),
),
axis=2,
) # 64 * 84 * 2
return npy_AB
def save_midis(bars, file_path, tempo=80.0):
padded_bars = np.concatenate(
(
np.zeros((bars.shape[0], bars.shape[1], 24, bars.shape[3])),
bars,
np.zeros((bars.shape[0], bars.shape[1], 20, bars.shape[3])),
),
axis=2,
)
padded_bars = padded_bars.reshape(
-1, 64, padded_bars.shape[2], padded_bars.shape[3]
)
padded_bars_list = []
for ch_idx in range(padded_bars.shape[3]):
padded_bars_list.append(
padded_bars[:, :, :, ch_idx].reshape(
padded_bars.shape[0], padded_bars.shape[1], padded_bars.shape[2]
)
)
writemidi.write_piano_rolls_to_midi(
piano_rolls=padded_bars_list,
program_nums=[0],
is_drum=[False],
filename=file_path,
tempo=tempo,
beat_resolution=4,
)
if __name__ == "__main__":
data = np.load("./JC_J/test/jazz_piano_test_1.npy") * 1.0
data = data.reshape(1, data.shape[0], data.shape[1], 1)
save_midis(data, "uwu.mid")
| 28.192308 | 80 | 0.566849 | 219 | 1,466 | 3.534247 | 0.30137 | 0.167959 | 0.116279 | 0.033592 | 0.173127 | 0.173127 | 0.147287 | 0.069767 | 0 | 0 | 0 | 0.057197 | 0.284447 | 1,466 | 51 | 81 | 28.745098 | 0.680648 | 0.023874 | 0 | 0.090909 | 0 | 0 | 0.033637 | 0.023125 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.045455 | 0 | 0.113636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c541aa9113adae24d37bc78ba5676b3f3fe64e0 | 524 | py | Python | cdwar1.py | fernandopm248/digital-root | b2039eb50a9a23b0a7799dfd36c1a3380b841ed9 | [
"MIT"
] | null | null | null | cdwar1.py | fernandopm248/digital-root | b2039eb50a9a23b0a7799dfd36c1a3380b841ed9 | [
"MIT"
] | null | null | null | cdwar1.py | fernandopm248/digital-root | b2039eb50a9a23b0a7799dfd36c1a3380b841ed9 | [
"MIT"
] | null | null | null | def digital_root (n):
total = 10
while total > 9 :
total = 0
x = str(n)
stringfy = []
cont = (len(x))
i = 0
e = 1
num = []
while i < cont :
stringfy += x[i:e]
i += 1
e += 1
for i in range(len(stringfy)):
t = int(stringfy[i])
num.append(t)
total = sum(num)
n = total
stringfy.clear()
num.clear()
print(total)
digital_root(493193) | 12.186047 | 38 | 0.389313 | 61 | 524 | 3.311475 | 0.459016 | 0.108911 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05283 | 0.494275 | 524 | 43 | 39 | 12.186047 | 0.709434 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0 | 0 | 0.043478 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c5a5460d0ba3fe976caca1f2575a005ff9201fc | 35,567 | py | Python | src/data/data_classes.py | sandralima/MLMortgage | 16946e4c1a1cfec87739eb5aa0c711e5e305dc24 | [
"MIT"
] | null | null | null | src/data/data_classes.py | sandralima/MLMortgage | 16946e4c1a1cfec87739eb5aa0c711e5e305dc24 | [
"MIT"
] | null | null | null | src/data/data_classes.py | sandralima/MLMortgage | 16946e4c1a1cfec87739eb5aa0c711e5e305dc24 | [
"MIT"
] | null | null | null | """Module for loading the real datasets."""
import numpy as np
import pandas as pd
import os
import math
import glob
import random_permutation as rp
from datetime import datetime
import sys
RANDOM_SEED = 123 # eliminate for taking from the clock!
class DataBatch(object):
"""ABC."""
def __init__(self, architecture, path, period_array, in_tuple=None, dtype='train', cols=None, remainder=False):
if (in_tuple!=None):
self.orig = Data(in_tuple)
self.features, self.labels = in_tuple
self._current_num_examples, self._num_classes = self.labels.shape
self.weights = self.orig.labels @ get_weights(self.orig.labels) # 30/01/2018: this weights are not used in the tensor model!
self._file_index = 0
elif (path!=None):
self.features, self.labels = None, None
self.h5_path = path
self.dtype = dtype
self.all_files = glob.glob(os.path.join(self.h5_path, "*.h5"))
self._num_columns = architecture['n_input'] # dataset_file.get_storer(self.dtype+ '/features').ncols - self.index_length
self._num_classes = architecture['n_classes'] # dataset_file.get_storer(self.dtype+'/labels').ncols - self.index_length
if (dtype == 'valid'):
num_exam = architecture['valid_num_examples']
else:
num_exam = architecture['total_num_examples']
if (cols==None):
self._dict = self.get_metadata_dataset(num_exam)
else:
self._dict = self.get_metadata_dataset_cols(num_exam, cols, remainder)
if (self._dict == None):
raise ValueError('DataBatch: The dictionary was not loaded!')
if dtype == 'train':
self._loan_random = rp.CustomRandom(self._total_num_examples) # np.random.RandomState(RANDOM_SEED)
self.dataset_index = 0 #to record the access to files
self._file_index = 0 #to record the sequential order inside a file
# self.dataset = pd.HDFStore(self.all_files[self.dataset_index]) # the first file of the path
# self._current_num_examples = self.dataset.get_storer(self.dtype+'/features').nrows
# self._num_columns = self.dataset.get_storer('features').attrs.num_columns
self.period_range = period_array #set(range(period_array[0], period_array[1]+1))
#self.period_features = set(list(self.dataset['features'].index.get_level_values(2)))
#self.period_inter = self.period_features.intersection(self.period_range)
self.transitions = {'MBA_DELINQUENCY_STATUS': ['0','3','6','9','C','F','R']}
if any('MBA_DELINQUENCY_STATUS' in s for s in self.features_list):
self.idx_transitions = [self.features_list.index('MBA_DELINQUENCY_STATUS_' + v) for v in self.transitions['MBA_DELINQUENCY_STATUS']]
else: #Dataset empty!
self._dict = None
def get_metadata_dataset(self, max_rows):
try:
files_dict = {}
self._total_num_examples = 0
ok_inputs = True
files_dict[0] = {}
files_dict[0]['dataset_features'] = [] # np.empty((max_rows, num_feat), dtype=np.float32)
files_dict[0]['dataset_labels'] = [] # np.empty((max_rows,num_class), dtype=np.int8)
for i, file_path in zip(range(len(self.all_files)), self.all_files):
with pd.HDFStore(file_path) as dataset_file:
print(file_path, '...to load')
total_rows = dataset_file.get_storer(self.dtype + '/features').nrows
if (total_rows <= max_rows):
max_rows -= total_rows
files_dict[0]['dataset_features'].extend(dataset_file.select(self.dtype+'/features', start=0).values) #, stop=500000
files_dict[0]['dataset_labels'].extend(dataset_file.select(self.dtype+'/labels', start=0, stop=total_rows).values)
else:
total_rows = max_rows
files_dict[0]['dataset_features'].extend(dataset_file.select(self.dtype+'/features', start=0, stop=total_rows).values) #, stop=500000
files_dict[0]['dataset_labels'].extend(dataset_file.select(self.dtype+'/labels', start=0, stop=total_rows).values)
if (ok_inputs):
self.index_length = len(dataset_file.get_storer(self.dtype+'/features').attrs.data_columns)
self.features_list = dataset_file.get_storer(self.dtype+'/features').attrs.non_index_axes[0][1][self.index_length:]
self.labels_list = dataset_file.get_storer(self.dtype+'/labels').attrs.non_index_axes[0][1][self.index_length:]
ok_inputs = False
self._total_num_examples += total_rows
print(file_path, ' loaded in RAM')
if (total_rows == max_rows):
break
files_dict[0]['nrows'] = self._total_num_examples
files_dict[0]['init_index'] = 0
files_dict[0]['end_index'] = self._total_num_examples
#files_dict[0]['class_weights'] = self.get_weights_class(files_dict[0]['dataset_labels'])
files_dict[0]['class_weights'] = self.get_global_weights_transition_class(files_dict[0])
return files_dict
except Exception as e:
raise ValueError('Error in retrieving the METADATA object: ' + str(e))
def get_weights_class(self, labels):
class_weights = np.sum(labels, axis=0)
print('class_weights', class_weights)
class_weights = np.round(class_weights/np.float32(self._total_num_examples),decimals=3)
# 1-weights approach:
class_weights = np.subtract([1]*len(class_weights), class_weights)
#normalizing 1-weights approach:
#sumcw = np.sum(class_weights)
#class_weights = np.round(class_weights/np.float32(sumcw),decimals=3)
print('class_weights', class_weights)
return class_weights
def get_weights_transition_class(self, data_dict):
categorical_cols = {'MBA_DELINQUENCY_STATUS': ['0','3','6','9','C','F','R']}
idx_categorical_cols = {}
for cat, values in categorical_cols.items():
idx_categorical_cols[cat] = [[], []]
if any(cat in s for s in self.features_list):
idx_categorical_cols[cat][0].extend([self.features_list.index(cat+'_'+v) for v in values])
idx_categorical_cols[cat][1].extend([cat+'_'+v for v in values])
print(cat, 'is found', len(values), len(idx_categorical_cols[cat][0]), len(idx_categorical_cols[cat][1]))
print(idx_categorical_cols)
self._idx_categorical_cols = idx_categorical_cols['MBA_DELINQUENCY_STATUS'][0]
trans_subset = []
weights_mtx=[]
for cat, values in idx_categorical_cols.items():
for val in values[0]:
print('val', val)
trans_subset = [data_dict['dataset_labels'][i] for i, elem in enumerate(data_dict['dataset_features']) if elem[val]==1]
total_ex = len(trans_subset)
print('total_ex: ', total_ex)
if (total_ex>0):
print('trans_subset[0]: ', trans_subset[0])
class_weights = np.sum(trans_subset, axis=0)
print('class_weights', class_weights)
class_weights = np.round(class_weights/np.float32(total_ex),decimals=3)
# 1-weights approach:
class_weights = np.subtract([1]*len(class_weights), class_weights)
else:
class_weights = np.zeros((self._num_classes), dtype='float32')
print('class_weights', class_weights)
weights_mtx.append(class_weights)
weights_mtx= np.array(weights_mtx)
print('weights_mtx', weights_mtx)
return weights_mtx
def get_global_weights_transition_class(self, data_dict):
categorical_cols = {'MBA_DELINQUENCY_STATUS': ['0','3','6','9','C','F','R']}
idx_categorical_cols = {}
for cat, values in categorical_cols.items():
idx_categorical_cols[cat] = [[], []]
if any(cat in s for s in self.features_list):
idx_categorical_cols[cat][0].extend([self.features_list.index(cat+'_'+v) for v in values])
idx_categorical_cols[cat][1].extend([cat+'_'+v for v in values])
print(cat, 'is found', len(values), len(idx_categorical_cols[cat][0]), len(idx_categorical_cols[cat][1]))
print(idx_categorical_cols)
self._idx_categorical_cols = idx_categorical_cols['MBA_DELINQUENCY_STATUS'][0]
trans_subset = []
weights_mtx=[]
for cat, values in idx_categorical_cols.items():
for val in values[0]:
print('val', val)
trans_subset = [data_dict['dataset_labels'][i] for i, elem in enumerate(data_dict['dataset_features']) if elem[val]==1]
total_ex = len(trans_subset)
print('total_ex: ', total_ex, 'self._total_num_examples: ', self._total_num_examples)
if (total_ex>0):
print('trans_subset[0]: ', trans_subset[0])
class_weights = np.sum(trans_subset, axis=0)
print('class_weights', class_weights)
class_weights = np.round(class_weights/np.float32(self._total_num_examples),decimals=3)
# 1-weights approach:
class_weights = np.subtract([1]*len(class_weights), class_weights)
else:
class_weights = np.zeros((self._num_classes), dtype='float32')
print('class_weights', class_weights)
weights_mtx.append(class_weights)
weights_mtx= np.array(weights_mtx)
print('weights_mtx', weights_mtx)
return weights_mtx
def get_metadata_dataset_cols(self, max_rows, cols, remainder):
try:
files_dict = {}
self._total_num_examples = 0
ok_inputs = True
files_dict[0] = {}
files_dict[0]['dataset_features'] = [] # np.empty((max_rows, num_feat), dtype=np.float32)
files_dict[0]['dataset_labels'] = [] # np.empty((max_rows,num_class), dtype=np.int8)
for i, file_path in zip(range(len(self.all_files)), self.all_files):
with pd.HDFStore(file_path) as dataset_file:
print(file_path, '...to load')
total_rows = dataset_file.get_storer(self.dtype + '/features').nrows
if (ok_inputs):
self.index_length = len(dataset_file.get_storer(self.dtype+'/features').attrs.data_columns)
if (remainder==True):
cols = set(dataset_file.get_storer(self.dtype+'/features').attrs.non_index_axes[0][1][self.index_length:]) - set(cols)
cols =list(cols)
self.features_list = cols
print('Columns of dataset: ', len(self.features_list), self.features_list)
self.labels_list = dataset_file.get_storer(self.dtype+'/labels').attrs.non_index_axes[0][1][self.index_length:]
ok_inputs = False
if (total_rows <= max_rows):
max_rows -= total_rows
df_feat = dataset_file.select(self.dtype+'/features', start=0)
files_dict[0]['dataset_features'].extend(df_feat[self.features_list].values) #, stop=500000
del df_feat
print('len(files_dict[0][dataset_features][0]): ', len(files_dict[0]['dataset_features'][0]))
df_lab = dataset_file.select(self.dtype+'/labels', start=0, stop=total_rows)
files_dict[0]['dataset_labels'].extend(df_lab.values)
del df_lab
else:
total_rows = max_rows
df_feat = dataset_file.select(self.dtype+'/features', start=0, stop=total_rows)
files_dict[0]['dataset_features'].extend(df_feat[self.features_list].values) #, stop=500000
del df_feat
print('len(files_dict[0][dataset_features][0]): ', len(files_dict[0]['dataset_features'][0]))
df_lab = dataset_file.select(self.dtype+'/labels', start=0, stop=total_rows)
files_dict[0]['dataset_labels'].extend(df_lab.values)
del df_lab
self._total_num_examples += total_rows
print(file_path, ' loaded in RAM')
if (total_rows == max_rows):
break
files_dict[0]['nrows'] = self._total_num_examples
files_dict[0]['init_index'] = 0
files_dict[0]['end_index'] = self._total_num_examples
class_weights = np.sum(files_dict[0]['dataset_labels'], axis=0)
print('class_weights', class_weights)
class_weights = np.round(class_weights/np.float32(self._total_num_examples),decimals=3)
# 1-weights approach:
class_weights = np.subtract([1]*len(class_weights), class_weights)
#normalizing 1-weights approach:
#sumcw = np.sum(class_weights)
#class_weights = np.round(class_weights/np.float32(sumcw),decimals=3)
print('class_weights', class_weights)
files_dict[0]['class_weights'] = class_weights
return files_dict
except Exception as e:
raise ValueError('Error in retrieving the METADATA object: ' + str(e))
# def get_metadata_dataset_repeats(self, repeats):
# try:
# files_dict = {}
# index = 0
# for z in range(repeats):
# for file_path in self.all_files:
# dataset_file = pd.HDFStore(file_path) # the first file of the path
# dataset_features = dataset_file.select(self.dtype+'/features', start=0, stop=500000).values # , stop=5000000
# nrows = dataset_features.shape[0] # dataset_file.get_storer(self.dtype + '/features').nrows
# dataset_labels = dataset_file.select(self.dtype+'/labels', start=0, stop=nrows).values
# files_dict[index] = {'path': file_path, 'nrows': nrows,
# 'init_index': self._total_num_examples, 'end_index': self._total_num_examples + nrows,
# 'dataset' : dataset_file, 'dataset_features' : dataset_features, 'dataset_labels': dataset_labels}
# self._total_num_examples += nrows
# print('dict: ', files_dict[index], ' total rows: ', self._total_num_examples)
# index += 1
# # if dataset.is_open: dataset.close()
# return files_dict
# except Exception as e:
# raise ValueError('Error in retrieving the METADATA object: ' + str(e))
# this method batches the training set in lots of size batch_size, if it reaches the end, concatenates the tail with the front and continues until the num_epoch.
def next_batch(self, batch_size):
"""Get the next batch of the data with the given batch size."""
if not isinstance(batch_size, int):
raise TypeError('batch_size has to be of int type.')
# if self._file_index == 0:
# self.sample()
# print('self._file_index: ', self._file_index)
# print('self._file_index end: ', self._file_index + batch_size)
if self._file_index + batch_size <= self._current_num_examples:
temp_features = self.features[self._file_index:
self._file_index + batch_size, :]
temp_labels = self.labels[self._file_index:
self._file_index + batch_size]
self._file_index += batch_size
# if _global_index has become _num_examples, we need to reset it to
# zero. Otherwise, we don't change it. The following line does this.
self._file_index = self._file_index % self._current_num_examples
else:
temp_end = self._file_index + batch_size - self._current_num_examples
temp_features = np.concatenate(
(self.features[self._file_index:, :],
self.features[:temp_end, :]),
axis=0)
temp_labels = np.concatenate(
(self.labels[self._file_index:], self.labels[:temp_end]),
axis=0)
self._file_index = temp_end
# self.shuffle()
self._file_index = 0
return temp_features, temp_labels, np.array(
[1.0], dtype=np.dtype('float32')) # temp_weights
def next_sequential_batch_period(self, batch_size):
"""Get the next batch of the data with the given batch size."""
if not isinstance(batch_size, int):
raise TypeError('DataBatch: batch_size has to be of int type.')
if (self.dataset==None):
raise ValueError('DataBatch: The file_dataset was not loaded!')
if self._file_index + batch_size <= self._current_num_examples:
temp_features = self.dataset.select('features', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]), start=self._file_index, stop=self._file_index + batch_size)
temp_labels = self.dataset.select('labels', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]), start=self._file_index, stop=self._file_index + batch_size)
self._file_index += batch_size
else:
temp_features = self.dataset.select('features', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]), start=self._file_index)
temp_labels = self.dataset.select('labels', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]), start=self._file_index)
self._file_index = 0
self.dataset_index += 1
self.dataset.close()
self.dataset = pd.HDFStore(self.all_files[self.dataset_index]) # the next file of the path
self._current_num_examples = self.dataset.get_storer('features').nrows
return temp_features, temp_labels, np.array([1.0], dtype=np.dtype('float32')) # temp_weights
def next_sequential_batch(self, batch_size):
"""Get the next batch of the data with the given batch size."""
if not isinstance(batch_size, int):
raise TypeError('DataBatch: batch_size has to be of int type.')
if (self._dict==None):
raise ValueError('DataBatch: The dataset was not loaded!')
if self._file_index + batch_size <= self._dict[self.dataset_index]['nrows']:
# temp_features = pd.read_hdf(self._dict[self.dataset_index]['dataset'], self.dtype+'/features', start=self._file_index, stop=self._file_index + batch_size)
# temp_labels = pd.read_hdf(self._dict[self.dataset_index]['dataset'], self.dtype+'/labels', start=self._file_index, stop=self._file_index + batch_size)
# temp_features = self._dict[self.dataset_index]['dataset'].select(self.dtype+'/features', start=self._file_index, stop=self._file_index + batch_size)
temp_features = np.array(self._dict[self.dataset_index]['dataset_features'][self._file_index: self._file_index + batch_size])
temp_labels = np.array(self._dict[self.dataset_index]['dataset_labels'][self._file_index: self._file_index + batch_size])
self._file_index += batch_size
else:
# temp_features = pd.read_hdf(self._dict[self.dataset_index]['dataset'], self.dtype+'/features', start=self._file_index)
# temp_labels = pd.read_hdf(self._dict[self.dataset_index]['dataset'], self.dtype+'/labels', start=self._file_index)
temp_features = np.array(self._dict[self.dataset_index]['dataset_features'][self._file_index :])
temp_labels = np.array(self._dict[self.dataset_index]['dataset_labels'][self._file_index :])
# hdf = pd.read_hdf('storage.h5', 'd1', where=['A>.5'], columns=['A','B'])
self._file_index = 0
#self.dataset_index += 1
#if (self.dataset_index >= len(self.all_files)):
# self.dataset_index = 0
# self.dataset.close()
# self.dataset = pd.HDFStore(self.all_files[self.dataset_index]) # the next file of the path
# self._current_num_examples = self.dataset.get_storer(self.dtype+'/features').nrows
transitions = temp_features[:, self.idx_transitions]
return temp_features, temp_labels, np.array([1.0], dtype=np.dtype('float32')), transitions # temp_weights
def next_random_batch_perfiles(self, batch_size): # pending!!
"""Get the next batch of the data with the given batch size."""
if not isinstance(batch_size, int):
raise TypeError('DataBatch: batch_size has to be of int type.')
if (self.h5_path==None):
raise ValueError('DataBatch: The file_dataset was not loaded!')
# all_files = glob.glob(os.path.join(self.h5_path, "*.h5"))
records_per_file = math.ceil(np.float32(batch_size / len(self.all_files)))
#period_range = set(range(self.period_range[0], self.period_range[1]+1))
features_list = self.dataset.get_storer('features').attrs.non_index_axes[0][1][3:]
temp_features = pd.DataFrame(None,columns=features_list)
labels_list = self.dataset.get_storer('labels').attrs.non_index_axes[0][1][3:]
temp_labels = pd.DataFrame(None,columns=labels_list)
for file_path in self.all_files:
# if self.dataset.is_open: self.dataset.close()
self.dataset = pd.HDFStore(file_path) # the first file of the path
self._current_num_examples = self.dataset.get_storer('features').nrows
self._num_columns = self.dataset.get_storer('features').ncols - len(self.dataset.get_storer('features').attrs.data_columns)
self._num_classes = self.dataset.get_storer('labels').ncols - len(self.dataset.get_storer('labels').attrs.data_columns)
# random_loan= np.random.sample(range(self._num_examples), k=records_per_file) # if one is after the training dates?
period_random = np.random.RandomState()
for i in range(records_per_file):
while True:
try:
random_loan = self._loan_random.randint(self._current_num_examples)
loan_id = self.dataset.select('features', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]),start=random_loan, stop=random_loan+1).index.get_level_values(0)[0]
if str(loan_id):
df_features = self.dataset.select('features', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]) + ' & LOAN_ID=' + str(loan_id))
df_labels = self.dataset.select('labels', "PERIOD>=" + str(self.period_range[0]) + ' & PERIOD<=' + str(self.period_range[1]) + ' & LOAN_ID=' + str(loan_id))
# df_features = self.dataset['features'].loc[(loan_id, slice(None), slice(None)), :]
# df_labels = self.dataset['labels'].loc[(loan_id, slice(None), slice(None)), :]
if (df_features.shape[0] > 0):
r_period = period_random.randint(df_features.shape[0])
temp_features = pd.concat([temp_features, df_features.iloc[r_period, :].to_frame().T], ignore_index=True, copy=False)
temp_labels = pd.concat([temp_labels, df_labels.iloc[r_period, :].to_frame().T], ignore_index=True, copy=False)
break
except Exception as e:
print('Invalid Loan: ' + str(e))
print('temp_features')
self.dataset.close()
return temp_features, temp_labels, np.array([1.0], dtype=np.dtype('float32')) # temp_weights
def next_random_batch(self, batch_size): # pending!! --_exp
"""Get the next batch of the data with the given batch size."""
if not isinstance(batch_size, int):
raise TypeError('DataBatch: batch_size has to be of int type.')
if (self.h5_path==None):
raise ValueError('DataBatch: The file_dataset was not loaded!')
temp_features = [] #np.empty((batch_size,len(self.features_list)))
temp_labels = [] #np.zeros((batch_size,len(self.labels_list)))
random_batch = np.array(list(self._loan_random.get_batch(batch_size)))
#print(len(random_batch), random_batch)
orb_size = 0
for k, v in self._dict.items():
try:
records_per_file = np.logical_and(random_batch>=v['init_index'], random_batch<(v['end_index']))
orb = np.sort(random_batch[records_per_file]) - v['init_index']
#print(len(orb), orb)
assert(len(orb)==batch_size)
#print(len(orb), orb)
if (len(orb)>0):
temp_features.extend(np.array([v['dataset_features'][index] for index in orb]))
temp_labels.extend(np.array([v['dataset_labels'][index] for index in orb]))
#print('temp ready!!')
orb_size += len(orb)
except Exception as e:
print('Invalid Range: ' + str(e))
assert(np.where(np.sum(temp_labels, axis=1)==0)[0].size == 0)
temp_features = np.array(temp_features)
temp_labels = np.array(temp_labels)
#print('shapes: ', temp_features.shape, temp_labels.shape)
assert(temp_features.shape[0]==batch_size)
assert(temp_labels.shape[0]==batch_size)
# the same permutation:
permutation = np.random.permutation(len(temp_features))
temp_features = temp_features[permutation, :]
temp_labels = temp_labels[permutation, :]
transitions = temp_features[:, self.idx_transitions]
return temp_features, temp_labels, transitions #, np.array([1.0], dtype=np.dtype('float32')) # temp_weights
def next_random_batch_ind_access(self, batch_size): # pending!! --_ind_access
"""Get the next batch of the data with the given batch size."""
if not isinstance(batch_size, int):
raise TypeError('DataBatch: batch_size has to be of int type.')
if (self.h5_path==None):
raise ValueError('DataBatch: The file_dataset was not loaded!')
features_list = self.dataset.get_storer('features').attrs.non_index_axes[0][1][self.index_length:]
temp_features = pd.DataFrame(None,columns=features_list)
labels_list = self.dataset.get_storer('labels').attrs.non_index_axes[0][1][self.index_length:]
temp_labels = pd.DataFrame(None,columns=labels_list)
random_batch = self._loan_random.get_batch(batch_size)
startTime = datetime.now()
for i in random_batch:
try:
startTime1 = datetime.now()
partial_number = 0
values_list = list(self._dict.values())
for e in values_list:
partial_number += e['nrows']
if partial_number >= i:
break
if self.dataset.is_open: self.dataset.close()
self.dataset = pd.HDFStore(e['path']) # the first file of the path
self._current_num_examples = self.dataset.get_storer('features').nrows
self._num_columns = self.dataset.get_storer('features').ncols - len(self.dataset.get_storer('features').attrs.data_columns)
self._num_classes = self.dataset.get_storer('labels').ncols - len(self.dataset.get_storer('labels').attrs.data_columns)
true_loan = self._current_num_examples - (partial_number - i)
df_features = self.dataset.select('features', start=true_loan, stop=true_loan+1)
df_labels = self.dataset.select('labels', start=true_loan, stop=true_loan+1)
temp_features = pd.concat([temp_features, df_features], ignore_index=True, copy=False)
temp_labels = pd.concat([temp_labels, df_labels], ignore_index=True, copy=False)
print('Time for Getting one element: ', datetime.now() - startTime1)
# self.dataset.close()
except Exception as e:
print('Invalid Loan: ' + str(e))
print('Time for Getting' + str(batch_size) +' random elements: ', datetime.now() - startTime)
return temp_features, temp_labels, np.array([1.0], dtype=np.dtype('float32')) # temp_weights
def shuffle(self):
"""Reshuffle the dataset and its corresponding labels."""
permutation = np.random.permutation(self._current_num_examples)
self.features = self.features[permutation, :]
self.labels = self.labels[permutation]
return
def shuffle(self, data, labels):
"""Reshuffle the dataset data and its corresponding labels."""
rows = np.shape(data)[0]
permutation = np.random.permutation(rows)
data = data[permutation, :]
labels = labels[permutation]
return
def sample(self):
"""Sample with replacement."""
probs = self.weights / self.weights.sum()
gamma = 0 # .8
probs = gamma * probs + (1 - gamma) / self._current_num_examples
indices = np.random.choice(
self._current_num_examples, size=self._current_num_examples, replace=True, p=probs)
self.features = self.orig.features[indices, :]
self.labels = self.orig.labels[indices]
# self.weights = self.weights_orig[indices]
def total_num_batch(self, batch_size):
total_batch = 0
values_list = list(self._dict.values())
for e in values_list:
total_batch += math.ceil(np.float32( e['nrows'] / batch_size))
return total_batch
@property
def total_num_examples(self):
"""Get the number of examples in the dataset."""
return self._total_num_examples
@property
def num_classes(self):
"""Get the number of examples in the dataset."""
return self._num_classes
@property
def num_columns(self):
"""Get the number of examples in the dataset."""
return self._num_columns
@property
def class_weights(self):
return self._dict[0]['class_weights']
class Data(object):
"""ABC."""
def __init__(self, in_tuple=None):
if in_tuple !=None:
if in_tuple[0].shape[0] != in_tuple[1].shape[0]:
raise ValueError('Sizes should match!')
self.features, self.labels = in_tuple
self._num_examples, self._num_classes = self.labels.shape
@property
def num_examples(self):
"""Get the number of examples in the dataset."""
return self._num_examples
@property
def num_classes(self):
"""Get the number of examples in the dataset."""
return self._num_classes
class Dataset(object):
"""A new class to represent learning datasets."""
def __init__(self, architecture, train_tuple=None, valid_tuple=None, test_tuple=None, feature_columns=None, train_path=None, valid_path=None, test_path=None,
train_period=[121, 279], valid_period=[280,285], test_period=[286, 304], cols=None, remainder=False):
if (train_tuple!=None and valid_tuple!=None and test_tuple!=None):
self.train = DataBatch(train_tuple, train_period, cols=cols)
self.validation = Data(valid_tuple)
self.test = Data(test_tuple)
self.feature_columns = feature_columns
elif (train_path==None and valid_path==None and test_path==None):
raise ValueError('DataBatch: The path for at least one set was not loaded!')
else:
self.train = DataBatch(architecture, train_path, train_period, dtype='train', cols=cols, remainder=remainder)
self.validation = DataBatch(architecture, valid_path, valid_period, dtype='valid', cols=cols, remainder=remainder) # Data((h5_dataset.get('valid/features'), h5_dataset.get('valid/labels')))
self.test = DataBatch(architecture, test_path, test_period, dtype='test', cols=cols, remainder=remainder) # Data((h5_dataset.get('test/features'), h5_dataset.get('test/labels'))) #if it gives some trouble, it will be loaded at the end.
def get_weights(labels):
"""Get the weights per class."""
# weights = np.ones_like(self.labels[1, :])
weights = labels.shape[0] / (1e-8 + labels.sum(axis=0))
# print(weights)
# weights = np.array(
# [
# 5.561735, 2.349348, 6.397953, 2.575793, 0.056791, 2.591479,
# 94.966762
# ],
# dtype=self.labels.dtype)
return weights
| 58.498355 | 247 | 0.575196 | 4,215 | 35,567 | 4.59573 | 0.076868 | 0.035207 | 0.030871 | 0.019617 | 0.697486 | 0.666357 | 0.639926 | 0.616798 | 0.588973 | 0.581539 | 0 | 0.015046 | 0.310484 | 35,567 | 607 | 248 | 58.594728 | 0.774833 | 0.194562 | 0 | 0.523364 | 0 | 0 | 0.086701 | 0.009919 | 0 | 0 | 0 | 0 | 0.009346 | 1 | 0.058411 | false | 0 | 0.018692 | 0.002336 | 0.133178 | 0.077103 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c5bcfb620d91c04a78b6a0659fdd5357ae64bd7 | 402 | py | Python | ch9/place.py | chunhua2017/pythonprogrammingdemo | 64e4ac2b33c54cde4671291a6203e94cd96de4ba | [
"MIT"
] | 4 | 2020-05-18T05:25:44.000Z | 2021-07-30T01:02:39.000Z | ch9/place.py | chunhua2017/pythonprogrammingdemo | 64e4ac2b33c54cde4671291a6203e94cd96de4ba | [
"MIT"
] | null | null | null | ch9/place.py | chunhua2017/pythonprogrammingdemo | 64e4ac2b33c54cde4671291a6203e94cd96de4ba | [
"MIT"
] | 2 | 2021-09-15T05:41:05.000Z | 2022-01-25T05:44:43.000Z | from tkinter import * #导入tkinter模块
window = Tk() #创建主窗口对象
window.title('Place Example') #设置窗口标题
window.geometry('300x200') #设置窗口大小与位置
colors = ['red', 'green', 'light blue', 'yellow']
#Place放置效果
[Label(window, font="Arial 12",text='place(80,%d),anchor=NW' % (20 + i * 40),
bg=colors[i]).place(x=40, y=20 + i * 40, width=200, height=30)
for i in range(4)
]
#进入Tk事件循环
window.mainloop() | 30.923077 | 77 | 0.646766 | 59 | 402 | 4.40678 | 0.779661 | 0.023077 | 0.038462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 0.159204 | 402 | 13 | 78 | 30.923077 | 0.692308 | 0.124378 | 0 | 0 | 0 | 0 | 0.213256 | 0.063401 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c5e3aae9b20d433324b1ee0506da0c93c47e6f1 | 5,110 | py | Python | satflow/models/hub.py | lewtun/satflow | 6a675e4fa921b4dd023361b55cc2a5fa25b8f8ed | [
"MIT"
] | null | null | null | satflow/models/hub.py | lewtun/satflow | 6a675e4fa921b4dd023361b55cc2a5fa25b8f8ed | [
"MIT"
] | null | null | null | satflow/models/hub.py | lewtun/satflow | 6a675e4fa921b4dd023361b55cc2a5fa25b8f8ed | [
"MIT"
] | null | null | null | """
Originally Taken from https://github.com/rwightman/pytorch-image-models/blob/acd6c687fd1c0507128f0ce091829b233c8560b9/timm/models/hub.py
"""
import json
import logging
import os
from functools import partial
from typing import Union, Optional
import pytorch_lightning
import torch
try:
from torch.hub import get_dir
except ImportError:
from torch.hub import _get_torch_home as get_dir
from satflow import __version__
try:
from huggingface_hub import hf_hub_url
from huggingface_hub import cached_download
cached_download = partial(cached_download, library_name="satflow", library_version=__version__)
except ImportError:
hf_hub_url = None
cached_download = None
_logger = logging.getLogger(__name__)
def get_cache_dir(child_dir=""):
"""
Returns the location of the directory where models are cached (and creates it if necessary).
"""
hub_dir = get_dir()
child_dir = () if not child_dir else (child_dir,)
model_dir = os.path.join(hub_dir, "checkpoints", *child_dir)
os.makedirs(model_dir, exist_ok=True)
return model_dir
def has_hf_hub(necessary=False):
if hf_hub_url is None and necessary:
# if no HF Hub module installed and it is necessary to continue, raise error
raise RuntimeError(
"Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`."
)
return hf_hub_url is not None
def hf_split(hf_id):
rev_split = hf_id.split("@")
assert (
0 < len(rev_split) <= 2
), "hf_hub id should only contain one @ character to identify revision."
hf_model_id = rev_split[0]
hf_revision = rev_split[-1] if len(rev_split) > 1 else None
return hf_model_id, hf_revision
def load_cfg_from_json(json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def _download_from_hf(model_id: str, filename: str):
hf_model_id, hf_revision = hf_split(model_id)
url = hf_hub_url(hf_model_id, filename, revision=hf_revision)
return cached_download(url, cache_dir=get_cache_dir("hf"))
def load_model_config_from_hf(model_id: str):
assert has_hf_hub(True)
cached_file = _download_from_hf(model_id, "config.json")
default_cfg = load_cfg_from_json(cached_file)
default_cfg[
"hf_hub"
] = model_id # insert hf_hub id for pretrained weight load during model creation
model_name = default_cfg.get("architecture")
return default_cfg, model_name
def load_state_dict_from_hf(model_id: str):
assert has_hf_hub(True)
cached_file = _download_from_hf(model_id, "pytorch_model.pth")
state_dict = torch.load(cached_file, map_location="cpu")
return state_dict
def cache_file_from_hf(model_id: str):
assert has_hf_hub(True)
cached_file = _download_from_hf(model_id, "pytorch_model.pth")
return cached_file
def load_pretrained(
model,
default_cfg: Optional[dict] = None,
in_chans: int = 12,
strict: bool = True,
) -> Union[torch.nn.Module, pytorch_lightning.LightningModule]:
"""Load pretrained checkpoint
Taken from https://github.com/rwightman/pytorch-image-models/blob/acd6c687fd1c0507128f0ce091829b233c8560b9/timm/models/helpers.py
Args:
model (nn.Module) : PyTorch model module, or LightningModule
default_cfg (Optional[Dict]): default configuration for pretrained weights / target dataset
in_chans (int): in_chans for model
strict (bool): strict load of checkpoint
"""
is_lightning_module = issubclass(model, pytorch_lightning.LightningModule)
default_cfg = default_cfg or getattr(model, "default_cfg", None) or {}
pretrained_path = default_cfg.pop("checkpoint_path", None)
hf_hub_id = default_cfg.pop("hf_hub", None)
if in_chans != default_cfg.get("input_channels", None):
strict = False
_logger.warning(
f"Unable to convert pretrained weights because of mismatch in input channels, using random init for first layer."
)
if not is_lightning_module:
# The model is passed uninitialized, so if not having to do the PL thing, should initialize here
model = model(**default_cfg)
if not pretrained_path and not hf_hub_id:
_logger.warning("No pretrained weights exist for this model. Using random initialization.")
return model
if hf_hub_id and has_hf_hub(necessary=not pretrained_path):
_logger.info(f"Loading pretrained weights from Hugging Face hub ({hf_hub_id})")
if is_lightning_module:
checkpoint = cache_file_from_hf(hf_hub_id)
model.load_from_checkpoint(checkpoint, strict=strict, **default_cfg)
return model
state_dict = load_state_dict_from_hf(hf_hub_id)
else:
if is_lightning_module:
model.load_from_checkpoint(pretrained_path, strict=strict, **default_cfg)
return model
state_dict = torch.load(pretrained_path, map_location="cpu")
model.load_state_dict(state_dict, strict=strict)
return model
| 35.241379 | 136 | 0.722505 | 725 | 5,110 | 4.798621 | 0.248276 | 0.030181 | 0.028456 | 0.026157 | 0.193446 | 0.148606 | 0.148606 | 0.148606 | 0.124461 | 0.124461 | 0 | 0.015093 | 0.196086 | 5,110 | 144 | 137 | 35.486111 | 0.831792 | 0.171429 | 0 | 0.15 | 0 | 0 | 0.131018 | 0 | 0 | 0 | 0 | 0 | 0.04 | 1 | 0.09 | false | 0 | 0.14 | 0 | 0.35 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c616179b40409b6df7a7b9997dac34dc5c2f054 | 1,568 | py | Python | VM/__main__.py | djtech-dev/PyVM | 1edda436ce7073d0cecbf16f5cab2509895d953c | [
"MIT"
] | 75 | 2017-09-22T22:36:13.000Z | 2022-03-20T16:18:27.000Z | VM/__main__.py | djtech-dev/PyVM | 1edda436ce7073d0cecbf16f5cab2509895d953c | [
"MIT"
] | 7 | 2019-05-10T19:15:08.000Z | 2021-08-24T16:03:34.000Z | VM/__main__.py | djtech-dev/PyVM | 1edda436ce7073d0cecbf16f5cab2509895d953c | [
"MIT"
] | 14 | 2018-07-02T02:49:46.000Z | 2022-02-22T15:24:47.000Z | import argparse
import shlex
import logging
import sys
from . import VMKernel, ExecutionStrategy
parser = argparse.ArgumentParser()
parser.add_argument('command', help='The command to be executed')
parser.add_argument(
'-t', '--type',
default=ExecutionStrategy.ELF, type=lambda s: ExecutionStrategy[s.upper()],
help='Executable type (elf, flat)'
)
parser.add_argument('-m', '--memory', default=10_000, type=int, help='The amount of memory to give to the VM (bytes)')
parser.add_argument('-d', '--debug', action='store_true', default=False, help='Enable debug output')
parser.add_argument('-v', '--verbose', action='store_true', default=False)
args = parser.parse_args()
if args.verbose:
print(f'Initializing VM with {args.memory:,d} bytes of memory...')
if args.debug:
logging.basicConfig(
stream=sys.stdout,
level=logging.DEBUG,
format='%(message)s'
)
vm = VMKernel(args.memory)
cmd, *cmd_args = shlex.split(args.command)
if args.type == ExecutionStrategy.ELF:
if args.verbose:
print(f'Running ELF executable {cmd!r} with arguments {cmd_args}...')
vm.execute(args.type, cmd, cmd_args)
elif args.type == ExecutionStrategy.FLAT:
if cmd_args:
raise ValueError(f'Running flat binaries with arguments is not supported yet! Arguments: {cmd_args}')
if args.verbose:
print(f'Running flat executable {cmd!r}...')
vm.execute(args.type, cmd)
else:
raise ValueError(f'Invalid executable type: {args.type}')
if args.verbose:
print(f'Command {args.command!r} executed!')
| 30.745098 | 118 | 0.698342 | 215 | 1,568 | 5.027907 | 0.353488 | 0.033303 | 0.078631 | 0.066605 | 0.177613 | 0.07308 | 0 | 0 | 0 | 0 | 0 | 0.003788 | 0.158163 | 1,568 | 50 | 119 | 31.36 | 0.815152 | 0 | 0 | 0.1 | 0 | 0 | 0.314413 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c62285051765df8785d97d4cb963aaaad64edf9 | 2,224 | py | Python | imomoe_client/japanese_anime_page.py | xiaoland/DuerosSkill_ImomoeService | e94e74f9c1939cca80e0592d8d1f2f4d2520bb04 | [
"MIT"
] | 5 | 2020-06-15T01:43:07.000Z | 2021-02-08T03:01:53.000Z | imomoe_client/japanese_anime_page.py | xiaoland/DuerosSkill_ImomoeService | e94e74f9c1939cca80e0592d8d1f2f4d2520bb04 | [
"MIT"
] | null | null | null | imomoe_client/japanese_anime_page.py | xiaoland/DuerosSkill_ImomoeService | e94e74f9c1939cca80e0592d8d1f2f4d2520bb04 | [
"MIT"
] | 2 | 2020-06-15T01:43:17.000Z | 2021-02-08T03:00:17.000Z | # coding=utf-8
import requests
from bs4 import BeautifulSoup as bs
class ImomoeClientJapaneseAnimePage(object):
def __init__(self):
self.base_url = "http://www.imomoe.in"
r = requests.get(self.base_url + "/list/2.html")
self.jp_html = r.content
self.soup = bs(self.jp_html, "lxml")
self.all_div = self.soup.find_all("div")
self.focus_div = self.all_div[13]
self.classic_div = self.all_div[22]
self.movie_div = self.all_div[24]
self.ova_div = self.all_div[25]
def get_focus_list(self):
"""
获取热门日本番剧列表
"""
focus = self.focus_div.select("li")
focus_result = []
for i in focus:
result = {}
result["title"] = i.p.a["title"]
result["href"] = self.base_url + i.p.a["href"]
result["img"] = i.img["src"]
result["info"] = i.select("p")[1].string
focus_result.append(result)
return focus_result
def get_classic_list(self):
"""
获取经典日本番剧列表
"""
classic = self.classic_div.select("li")
classic_result = []
for i in classic:
result = {}
result["title"] = i.p.a["title"]
result["href"] = self.base_url + i.p.a["href"]
result["img"] = i.img["src"]
classic_result.append(result)
return classic_result
def get_movie_list(self):
"""
获取日本剧场版动漫列表
"""
movie = self.movie_div.select("li")
movie_result = []
for i in movie:
result = {}
result["title"] = i.p.a["title"]
result["href"] = self.base_url + i.p.a["href"]
result["img"] = i.img["src"]
movie_result.append(result)
return movie_result
def get_ova_list(self):
"""
获取日本OVA版动漫列表
"""
ova = self.ova_div.select("li")
ova_result = []
for i in ova:
result = {}
result["title"] = i.p.a["title"]
result["href"] = self.base_url + i.p.a["href"]
result["img"] = i.img["src"]
ova_result.append(result)
return ova_result
| 26.164706 | 58 | 0.513939 | 272 | 2,224 | 4.033088 | 0.227941 | 0.014585 | 0.021878 | 0.047402 | 0.251595 | 0.251595 | 0.251595 | 0.251595 | 0.251595 | 0.251595 | 0 | 0.008197 | 0.341727 | 2,224 | 84 | 59 | 26.47619 | 0.74112 | 0.026978 | 0 | 0.296296 | 0 | 0 | 0.071463 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.092593 | false | 0 | 0.037037 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c688d37744f8919869471727e3b542ad0e4c131 | 940 | py | Python | star-track/testprog.py | valentinp72/star-track | 0537f1a9494297d6de5025a945abc41ca4a40738 | [
"MIT"
] | null | null | null | star-track/testprog.py | valentinp72/star-track | 0537f1a9494297d6de5025a945abc41ca4a40738 | [
"MIT"
] | null | null | null | star-track/testprog.py | valentinp72/star-track | 0537f1a9494297d6de5025a945abc41ca4a40738 | [
"MIT"
] | null | null | null | import time
from logic.axis import Axis
e = Axis.azimuth()
#e.motor.set_dist(209)
#e.move_angle(degrees=-90)
#time.sleep(1)
#e.move_angle(seconds=340)
from skyfield.api import load
from skyfield.api import Topos
ts = load.timescale()
planets = load('de421.bsp')
earth = planets["earth"]
moon = planets["moon"]
jupiter = planets["jupiter barycenter"]
stations_url = 'http://celestrak.com/NORAD/elements/stations.txt'
satellites = load.tle(stations_url)
satellite = satellites['ISS (ZARYA)']
target = jupiter
#target = earth + satellite
print(target)
here = earth + Topos('47.827435 N', '-0.397186 W')
while True:
t = ts.now()
astrometric = here.at(t).observe(target)
alt, az, d = astrometric.apparent().altaz()
# print(alt, az)
d, m, s = az.dms(warn=False)
d, m, s = int(d), int(m), int(s)
#d, m, s = 90, 0, 0
print(d, m, s)
e.move_angle(degrees=d, minutes=m, seconds=s)
time.sleep(1)
| 19.583333 | 65 | 0.661702 | 147 | 940 | 4.190476 | 0.496599 | 0.012987 | 0.019481 | 0.055195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.041131 | 0.17234 | 940 | 47 | 66 | 20 | 0.750643 | 0.152128 | 0 | 0 | 0 | 0 | 0.148101 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.16 | 0 | 0.16 | 0.08 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c697a310f0427f6467e603fb1ffba4ed0518bf0 | 717 | py | Python | DFRobot_MAX31855.py | Red-Hide/ZeroP_Software | 8cc1b39966bb69870efabfc47c08aac7af1090c5 | [
"MIT"
] | null | null | null | DFRobot_MAX31855.py | Red-Hide/ZeroP_Software | 8cc1b39966bb69870efabfc47c08aac7af1090c5 | [
"MIT"
] | null | null | null | DFRobot_MAX31855.py | Red-Hide/ZeroP_Software | 8cc1b39966bb69870efabfc47c08aac7af1090c5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import sys
import pigpio as GPIO
MAX31855_ADDR = 0x10
pi = GPIO.pi()
class DFRobot_MAX31855:
def __init__(self):
self.i2c = pi.i2c_open(1,MAX31855_ADDR)
def readData(self):
a = pi.i2c_read_byte_data(self.i2c.handle, 0x00)
b = pi.i2c_read_byte_data(self.i2c.handle, 0x01)
# c = pi.i2c_read_byte_data(self.i2c.handle, 0x02)
d = pi.i2c_read_byte_data(self.i2c.handle, 0x03)
return a,b,d
def readCelsius(self):
a,b,d = self.readData()
if(d&0x7):
return False
if(a&0x80):
a = 0xff - a
b = 0xff - b
temp = -((((a << 8) | b) >> 2)+1)*0.25
return temp
temp = (((a << 8) | b) >> 2)*0.25
return temp
| 21.727273 | 53 | 0.595537 | 119 | 717 | 3.420168 | 0.378151 | 0.085995 | 0.088452 | 0.127764 | 0.334152 | 0.29484 | 0.29484 | 0.29484 | 0 | 0 | 0 | 0.111524 | 0.249651 | 717 | 32 | 54 | 22.40625 | 0.644981 | 0.101813 | 0 | 0.083333 | 0 | 0 | 0 | 0 | 0 | 0 | 0.048362 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c6a85a9d1b092254c2132467b21aaf79fda4140 | 1,060 | py | Python | student-project-management-devv/code.py | deveshk72/dsmp-pre-work | f7bdcd976bcff6c93819f35fb5ce013a0d2b9a10 | [
"MIT"
] | null | null | null | student-project-management-devv/code.py | deveshk72/dsmp-pre-work | f7bdcd976bcff6c93819f35fb5ce013a0d2b9a10 | [
"MIT"
] | null | null | null | student-project-management-devv/code.py | deveshk72/dsmp-pre-work | f7bdcd976bcff6c93819f35fb5ce013a0d2b9a10 | [
"MIT"
] | null | null | null | # --------------
# Code starts here
class_1 = ['Geoffrey Hinton','Andrew Ng','Sebastian Raschka','Yoshua Bengio']
class_2 = ['Hilary Mason','Carla Gentry','Corinna Cortes']
new_class = class_1+class_2
print(new_class)
new_class.append('Peter Warden')
print(new_class)
new_class.remove('Carla Gentry')
print(new_class)
# Code ends here
# --------------
# Code starts here
courses = {'Math':65,'English':70,'History':80,'French':70,'Science':60}
print(courses)
total = sum(courses.values())
print(total)
percentage = total/500 * 100
print(percentage)
# Code ends here
# --------------
# Code starts here
mathematics = {'Geoffrey Hinton':78,'Andrew Ng':95,'Sebastian Raschka':65,
'Yoshua Benjo':50,'Hilary Mason':70}
topper = max(mathematics,key = mathematics.get)
print(topper)
# Code ends here
# --------------
# Given string
topper = 'andrew ng'
# Code starts here
first_name,last_name = topper.split()
full_name=last_name+' '+first_name
print(full_name)
certificate_name = full_name.upper()
print(certificate_name)
# Code ends here
| 16.825397 | 77 | 0.683019 | 144 | 1,060 | 4.895833 | 0.423611 | 0.068085 | 0.079433 | 0.04539 | 0.133333 | 0.073759 | 0 | 0 | 0 | 0 | 0 | 0.032468 | 0.128302 | 1,060 | 62 | 78 | 17.096774 | 0.730519 | 0.190566 | 0 | 0.125 | 0 | 0 | 0.263658 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c6c408433b03a9fe094016964841137491b8945 | 812 | py | Python | src/paper/management/commands/recalc_discussion_count.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 18 | 2021-05-20T13:20:16.000Z | 2022-02-11T02:40:18.000Z | src/paper/management/commands/recalc_discussion_count.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 109 | 2021-05-21T20:14:23.000Z | 2022-03-31T20:56:10.000Z | src/paper/management/commands/recalc_discussion_count.py | ResearchHub/ResearchHub-Backend-Open | d36dca33afae2d442690694bb2ab17180d84bcd3 | [
"MIT"
] | 4 | 2021-05-17T13:47:53.000Z | 2022-02-12T10:48:21.000Z | '''
Recalculates paper discussion count
'''
from django.core.management.base import BaseCommand
from paper.models import Paper
class Command(BaseCommand):
def handle(self, *args, **options):
papers = Paper.objects.iterator()
count = Paper.objects.count()
print('Recalculating paper discussion count')
for i, paper in enumerate(papers):
try:
print(f'Paper: {paper.id} - {i + 1}/{count}')
new_count = paper.get_discussion_count()
paper.discussion_count = new_count
paper.save()
except Exception as e:
print(
f'Error updating discussion count for paper: {paper.id}', e
)
print('Finished recalculating paper discussion count')
| 30.074074 | 79 | 0.587438 | 86 | 812 | 5.488372 | 0.488372 | 0.190678 | 0.169492 | 0.139831 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001802 | 0.316502 | 812 | 26 | 80 | 31.230769 | 0.848649 | 0.043103 | 0 | 0 | 0 | 0 | 0.219766 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.222222 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c6dee994fb01185ef6915184567b795aa87c303 | 2,291 | py | Python | ccc/py/ccc05j5.py | tylertian123/CompSciSolutions | 33769a20ea613439f92055b40deeac4927cb0a91 | [
"MIT"
] | null | null | null | ccc/py/ccc05j5.py | tylertian123/CompSciSolutions | 33769a20ea613439f92055b40deeac4927cb0a91 | [
"MIT"
] | null | null | null | ccc/py/ccc05j5.py | tylertian123/CompSciSolutions | 33769a20ea613439f92055b40deeac4927cb0a91 | [
"MIT"
] | null | null | null | """
ccc05j5.py: Python solution to CCC '05 J5 (Bananas)
"""
# Create these sets to hold the known valid and invalid A-words to save time
# We already know that 'A' is a valid A-word and an empty string is always invalid
known_awords = set(['A'])
known_nonawords = set([''])
def is_aword(word):
# First try looking into the known sets
if word in known_awords:
return True
if word in known_nonawords:
return False
# Otherwise see if the word starts with a 'B' and ends with an 'S'
# Since 'A' is already handled
# if the word's length is less than 3 it cannot be valid
if len(word) < 3 or not (word[0] == 'B' and word[-1] == 'S'):
known_nonawords.add(word)
return False
# If yes then get the word in between the 'B' and the 'S'
inner_word = word[1:-1]
# See if the inner word is a monkey word
if is_monkey(inner_word):
known_awords.add(word)
return True
else:
known_nonawords.add(word)
return False
# Create sets to hold known results like above
known_words = set()
known_nonwords = set([''])
def is_monkey(word):
# Check known sets
if word in known_words:
return True
if word in known_nonwords:
return False
# First check if the word itself is an A word
if is_aword(word):
known_words.add(word)
return True
else:
# If not then see if it's two monkey words joined together with an N
for i, c in enumerate(word):
# For every single occurrence of N try splitting
if c == 'N':
try:
word1 = word[0:i]
word2 = word[i + 1:]
# See if both parts of the string are monkey words
if is_monkey(word1) and is_monkey(word2):
known_words.add(word)
return True
# Catch the possible IndexError with Ns in the beginning or end of the string
except IndexError:
pass
# If that did not return then the word is not a monkey word
known_nonwords.add(word)
return False
while True:
word = input()
if word == 'X':
break
print("YES" if is_monkey(word) else "NO")
| 32.728571 | 93 | 0.588826 | 342 | 2,291 | 3.877193 | 0.333333 | 0.031674 | 0.058824 | 0.039216 | 0.175716 | 0.156863 | 0 | 0 | 0 | 0 | 0 | 0.011858 | 0.337407 | 2,291 | 69 | 94 | 33.202899 | 0.86166 | 0.389786 | 0 | 0.355556 | 0 | 0 | 0.007273 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0.022222 | 0 | 0 | 0.266667 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c6f489d7f4ddd337cd5c3487bd38ef583744941 | 1,004 | py | Python | 07/p1_test.py | en0/aoc2021 | 14c74f872319f023b7ee4293009445dab315716f | [
"Unlicense"
] | null | null | null | 07/p1_test.py | en0/aoc2021 | 14c74f872319f023b7ee4293009445dab315716f | [
"Unlicense"
] | null | null | null | 07/p1_test.py | en0/aoc2021 | 14c74f872319f023b7ee4293009445dab315716f | [
"Unlicense"
] | null | null | null | from unittest import TestCase, main
from aocfw import TestCaseMixin
from p1 import Solution
class SolutionTests(TestCase, TestCaseMixin):
solution = Solution
source = "sample.txt"
given = 37
def test_find_target(self):
data = self.get_parsed_data()
self.assertEqual(Solution().get_target(data), 2)
def test_get_fuel_cost_2(self):
data = self.get_parsed_data()
ans = Solution().get_fuel_cost(data, 2)
self.assertEqual(ans, 37)
def test_get_fuel_cost_1(self):
data = self.get_parsed_data()
ans = Solution().get_fuel_cost(data, 1)
self.assertEqual(ans, 41)
def test_get_fuel_cost_3(self):
data = self.get_parsed_data()
ans = Solution().get_fuel_cost(data, 3)
self.assertEqual(ans, 39)
def test_get_fuel_cost_10(self):
data = self.get_parsed_data()
ans = Solution().get_fuel_cost(data, 10)
self.assertEqual(ans, 71)
if __name__ == "__main__":
main()
| 25.74359 | 56 | 0.656375 | 137 | 1,004 | 4.481752 | 0.270073 | 0.091205 | 0.143322 | 0.12215 | 0.490228 | 0.372964 | 0.332248 | 0.332248 | 0.332248 | 0.332248 | 0 | 0.028758 | 0.238048 | 1,004 | 38 | 57 | 26.421053 | 0.773856 | 0 | 0 | 0.178571 | 0 | 0 | 0.017928 | 0 | 0 | 0 | 0 | 0 | 0.178571 | 1 | 0.178571 | false | 0 | 0.107143 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c6f61323eed68703a3ed669efbf6f92ee935246 | 2,569 | py | Python | server.py | telminov/prometheus-rabbitmq-exporter | d13f67102c60853132dc3efcb8a2d54b1bd2e2ac | [
"MIT"
] | null | null | null | server.py | telminov/prometheus-rabbitmq-exporter | d13f67102c60853132dc3efcb8a2d54b1bd2e2ac | [
"MIT"
] | null | null | null | server.py | telminov/prometheus-rabbitmq-exporter | d13f67102c60853132dc3efcb8a2d54b1bd2e2ac | [
"MIT"
] | null | null | null | #! /usr/bin/env python
import argparse
import yaml
from aiohttp import web, ClientSession, TCPConnector, BasicAuth
import async_timeout
parser = argparse.ArgumentParser(description='Prometheus rabbitmq exporter.')
parser.add_argument('-c', '--config', dest='config', default='config.yml',
help='Path to configuration yaml-file. Default config.yml')
parser.add_argument('--host', dest='host', default='0.0.0.0',
help='HTTP server host. Default 0.0.0.0')
parser.add_argument('-p', '--port', dest='port', default=9125, type=int,
help='HTTP server port. Default 9125')
args = parser.parse_args()
def create_app() -> web.Application:
app = web.Application()
app.router.add_get('/', index)
app.router.add_get('/metrics', metrics)
return app
def get_config() -> dict:
config_path = args.config
with open(config_path) as f:
config_data = yaml.load(f)
return config_data
async def get_queues(target: dict) -> list:
try:
queues = []
target_url = target['url']
auth = BasicAuth(login=target['login'], password=target['password'])
connector = TCPConnector(verify_ssl=False)
async with ClientSession(connector=connector) as session:
url = target_url + '/api/queues'
with async_timeout.timeout(10):
async with session.get(url, auth=auth) as response:
result = await response.json()
for item in result:
queues.append({
'name': item['name'],
'messages': item['messages']
})
return queues
except Exception as ex:
print(ex)
return []
async def index(request):
return web.Response(text='<h1>RabbitMQ exporter</h1><p><a href="/metrics">Metrics</a><p>', content_type='text/html')
async def metrics(request):
config = get_config()
result = '# HELP rabbitmq_queues_messages Displays queue messages count\n'
result += '# TYPE rabbitmq_queues_messages gauge\n'
for target in config.get('targets', []):
queues = await get_queues(target=target)
for queue in queues:
result += 'rabbitmq_queues_messages{target="%s",name="%s",queue="%s"} %s\n' % (
target['url'], target['name'], queue['name'], queue['messages']
)
return web.Response(text=result)
if __name__ == '__main__':
app = create_app()
web.run_app(app, host=args.host, port=args.port)
| 33.802632 | 120 | 0.606462 | 309 | 2,569 | 4.92233 | 0.343042 | 0.00789 | 0.00789 | 0.017094 | 0.019724 | 0.019724 | 0 | 0 | 0 | 0 | 0 | 0.010493 | 0.258077 | 2,569 | 75 | 121 | 34.253333 | 0.787513 | 0.008174 | 0 | 0 | 0 | 0.017241 | 0.207303 | 0.053396 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0.017241 | 0.068966 | 0 | 0.206897 | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c7148885a4bcc7103f891edb01fd7a7ba508c40 | 1,705 | py | Python | olea/core/errors/__init__.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | 2 | 2020-06-18T03:25:52.000Z | 2020-06-18T07:33:45.000Z | olea/core/errors/__init__.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | 15 | 2021-01-28T07:11:04.000Z | 2021-05-24T07:11:37.000Z | olea/core/errors/__init__.py | Pix-00/olea | 98bee1fd8866a3929f685a139255afb7b6813f31 | [
"Apache-2.0"
] | null | null | null | __all__ = [
'AccessDenied', 'AccountDeactivated', 'BaseError', 'DoesNotMeetRequirements',
'DuplicatedRecord', 'FileExist', 'FileVerConflict', 'InvalidAccessToken', 'InvalidCredential',
'InvalidRefreshToken', 'InvalidReply', 'InvalidSource', 'NotQualifiedToPick',
'PermissionDenied', 'PitStatusLocked', 'ProjMetaLocked', 'RecordNotFound', 'RoleIsTaken',
'WeekPwd', 'register_error_handlers'
]
from .auth_fail import (AccessDenied, AccountDeactivated, InvalidAccessToken, InvalidCredential,
InvalidRefreshToken, PermissionDenied)
from .bad_opt import InvalidReply, InvalidSource, PitStatusLocked, ProjMetaLocked, RoleIsTaken
from .base_error import BaseError
from .data_conflict import DuplicatedRecord, FileExist, FileVerConflict, RecordNotFound
from .quality_control import DoesNotMeetRequirements, NotQualifiedToPick, WeekPwd
def register_error_handlers(app):
from flask_json import json_response
# - - - - - - - - - - - - - - - - - - - - - - -
@app.errorhandler(BaseError)
def handle_olea_exceptions(e: BaseError):
return json_response(status_=e.http_code, data_=e)
# - - - - - - - - - - - - - - - - - - - - - - -
from sentry_sdk import init as sentry_init
from sentry_sdk.integrations import flask, redis, sqlalchemy
if not app.config.get('IGNORE_ERRORS', False):
sentry_init(dsn=app.config['SENTRY_DSN'],
integrations=[
flask.FlaskIntegration(),
sqlalchemy.SqlalchemyIntegration(),
redis.RedisIntegration(),
],
traces_sample_rate=0.2)
| 46.081081 | 99 | 0.651613 | 134 | 1,705 | 8.074627 | 0.529851 | 0.055453 | 0.073937 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001542 | 0.239296 | 1,705 | 36 | 100 | 47.361111 | 0.832691 | 0.053372 | 0 | 0 | 0 | 0 | 0.204444 | 0.029206 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.285714 | 0.035714 | 0.392857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c7771b8e689f85c44bb0476f8c6fdb2d2047022 | 3,592 | py | Python | encodeAudio.py | TimelessParadise/encodeAudio | a17403628699e2ff08a02e60f8bfb93c786c4bb6 | [
"MIT"
] | null | null | null | encodeAudio.py | TimelessParadise/encodeAudio | a17403628699e2ff08a02e60f8bfb93c786c4bb6 | [
"MIT"
] | null | null | null | encodeAudio.py | TimelessParadise/encodeAudio | a17403628699e2ff08a02e60f8bfb93c786c4bb6 | [
"MIT"
] | null | null | null | import subprocess as sp
import shlex
import os
import argparse
import glob
import sys
extensionsTuple = (".m2ts", ".wav", ".flac")
def wavEncode(filePath):
sp.run(
shlex.split(
f"eac3to \"{filePath}\" -log=NUL \"{os.path.splitext(filePath)[0]}.wav\""
)
)
def wavEncode2(filePath, trackNumber):
sp.run(
shlex.split(
f"eac3to \"{filePath}\" -log=NUL {trackNumber}:\"{os.path.splitext(filePath)[0]}_Track{trackNumber}.wav\""
)
)
def flacEncode(filePath):
sp.run(
shlex.split(
f"eac3to \"{filePath}\" -log=NUL \"{os.path.splitext(filePath)[0]}.flac\""
)
)
def aacEncode(filePath):
sp.run(
shlex.split(
f"ffmpeg -i \"{filePath}\" -loglevel panic \"{os.path.splitext(filePath)[0]}.wav\""
)
)
sp.run(
shlex.split(
f"qaac \"{os.path.splitext(filePath)[0]}.wav\" -V 127 --no-delay -o \"{os.path.splitext(filePath)[0]}.m4a\""
)
)
if os.path.exists(f"{os.path.splitext(filePath)[0]}.wav"):
os.remove(f"{os.path.splitext(filePath)[0]}.wav")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-R", "--recursive", action="store_true", default=False, help="Check files recurcively if your path is a folder.")
parser.add_argument("-W", "--wav", action="store_true", default=False, help="Encode a PCM file, use this with .m2ts files.")
parser.add_argument("-T", "--track", action="store", type=int, default=False, help="Track number to encode.")
parser.add_argument("-F", "--flac", action="store_true", default=False, help="Enable FLAC encoding.")
parser.add_argument("-A", "--aac", action="store_true", default=False, help="Enable AAC encoding.")
parser.add_argument("path", metavar="path", type=str, nargs="?", help="Path of the file/folder you want to use")
args = parser.parse_args()
if args.path == None:
print(f"[WARNING] Usage: python {sys.argv[0]} -ARG/--arg path\n[INFO] Setting path to the current directory.")
args.path = os.getcwd()
if args.wav:
if os.path.isfile(args.path):
if args.track:
wavEncode2(args.path, args.track)
else:
wavEncode(args.path)
else:
if args.recursive:
fileList = glob.glob(f"{args.path}/**/*", recursive=True)
else:
fileList = glob.glob(f"{args.path}/*")
for audioFile in fileList:
if audioFile.endswith(extensionsTuple[0]):
if args.track:
wavEncode2(audioFile, args.track)
else:
wavEncode(audioFile)
if args.flac:
if os.path.isfile(args.path):
flacEncode(args.path)
else:
if args.recursive:
fileList = glob.glob(f"{args.path}/**/*", recursive=True)
else:
fileList = glob.glob(f"{args.path}/*")
for audioFile in fileList:
if audioFile.endswith(extensionsTuple):
flacEncode(audioFile)
if args.aac:
if os.path.isfile(args.path):
aacEncode(args.path)
else:
if args.recursive:
fileList = glob.glob(f"{args.path}/**/*", recursive=True)
else:
fileList = glob.glob(f"{args.path}/*")
for audioFile in fileList:
if audioFile.endswith(extensionsTuple):
aacEncode(audioFile)
| 35.92 | 138 | 0.560412 | 419 | 3,592 | 4.756563 | 0.260143 | 0.060211 | 0.056197 | 0.088309 | 0.510286 | 0.479177 | 0.375314 | 0.311089 | 0.311089 | 0.293026 | 0 | 0.008557 | 0.284243 | 3,592 | 99 | 139 | 36.282828 | 0.766628 | 0 | 0 | 0.41573 | 0 | 0.011236 | 0.195991 | 0.019488 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044944 | false | 0 | 0.067416 | 0 | 0.11236 | 0.011236 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c7898a236d93a0f71c8b6e1ef252d0e74cabda8 | 12,285 | py | Python | augmentation/methods/robust/utils.py | SaraR-1/model-patching | 97b30bad4bb4575a5f3a4cc23fbd333b10a057a8 | [
"Apache-2.0"
] | 28 | 2020-08-19T02:59:37.000Z | 2022-03-17T18:10:24.000Z | augmentation/methods/robust/utils.py | SaraR-1/model-patching | 97b30bad4bb4575a5f3a4cc23fbd333b10a057a8 | [
"Apache-2.0"
] | null | null | null | augmentation/methods/robust/utils.py | SaraR-1/model-patching | 97b30bad4bb4575a5f3a4cc23fbd333b10a057a8 | [
"Apache-2.0"
] | 3 | 2021-01-29T10:20:14.000Z | 2021-11-15T17:06:27.000Z | import tensorflow as tf
import wandb
import yaml
import subprocess
from augmentation.utilities.visualize import gallery
from augmentation.utilities.wandb import *
from augmentation.utilities.checkpoint import load_tf_optimizer_state
def rewrite_config_for_resumption(config):
config.prev_wandb_entity = config.wandb_entity
config.prev_wandb_project = config.wandb_project
config.prev_wandb_run_id = wandb.run.id
config.resume = True
yaml.dump(config.__dict__, open(config._config_path, 'w'))
# Push the change for this config
for cmd in [['git', 'add', config._config_path],
['git', 'commit', '-m', f'cfg_update_{wandb.run.id}'],
['git', 'pull'],
['git', 'push']]:
subprocess.run(cmd)
return config
def reload_run(model,
optimizer,
robust_loss_calc,
wandb_run_id,
wandb_project,
wandb_entity,
wandb_ckpt_path,
resume_epoch=-1,
continue_training=True):
# By default, we start at the beginning
start_epoch, start_step = 0, 0
# Load up the previous run
prev_run = load_wandb_run(wandb_run_id, wandb_project, wandb_entity)
step_extractor = particular_checkpoint_step_extractor(resume_epoch,
lambda fname: fname.split(".")[-2].split("_")[-1])
# If the previous run crashed, wandb_ckpt_path should be '': this is the typical use case
# but this should be changed in the future
_, loaded_epoch = load_most_recent_keras_model_weights(model, prev_run,
model_name='ckpt',
exclude='generator',
step_extractor=step_extractor,
wandb_ckpt_path=wandb_ckpt_path)
# If we're continuing training AND if we reloaded a model
# - load up the optimizer and DRO state
# - set the start epoch and start step
if continue_training and loaded_epoch is not None:
start_epoch = loaded_epoch
for line in prev_run.history():
if 'epochs' in line and line['epochs'] == start_epoch:
start_step = line['train_step/step']
break
# Reloading the optimizer states from that epoch
opt_ckpt = get_most_recent_model_file(prev_run,
wandb_ckpt_path=wandb_ckpt_path,
model_name='optimizer',
step_extractor=particular_checkpoint_step_extractor(start_epoch))
load_tf_optimizer_state(optimizer, opt_ckpt.name)
# Reloading the state of GDRO from that epoch
gdro_ckpt = get_most_recent_model_file(prev_run,
wandb_ckpt_path=wandb_ckpt_path,
model_name='gdro',
step_extractor=particular_checkpoint_step_extractor(start_epoch))
robust_loss_calc._adv_prob_logits = tf.convert_to_tensor(np.load(gdro_ckpt.name))
print(f"Loaded epoch {loaded_epoch} from {wandb_run_id}. Starting from step {start_step} and epoch {start_epoch}.",
flush=True)
return start_epoch, start_step
def log_robust_train_step_to_wandb(group_aliases, group_batches, group_targets, group_predictions, group_losses,
robust_loss, consistency_loss, consistency_penalty_weight,
irm_losses, irm_penalty_weight,
gradients, model, optimizer,
robust_loss_calc, step, log_images=False, log_weights_and_grads=False):
# Loop over the data from each group
# for i, (batch, targets, predictions, loss) in enumerate(zip(group_batches, group_targets,
for (alias, batch, targets, predictions, loss, irm) in zip(group_aliases, group_batches, group_targets,
group_predictions, group_losses, irm_losses):
# Log data generated in this train step
wandb.log({f'train_step/{alias}/targets': targets.numpy(),
f'train_step/{alias}/predictions': wandb.Histogram(predictions.numpy()),
f'train_step/{alias}/argmax_predictions': tf.argmax(predictions, axis=-1).numpy(),
f'train_step/{alias}/loss': loss.numpy(),
f'train_step/{alias}/irm': irm.numpy()},
step=step)
# Optionally, log the minibatch of images
if log_images:
wandb.log({f'train_step/{alias}/images': wandb.Image(gallery(batch.numpy()))}, step=step)
# Log all the gradients and weights: every 50 steps
if log_weights_and_grads:
wandb.log({f'gradients/{v.name}': g.numpy() for v, g in zip(model.trainable_variables, gradients)}, step=step)
wandb.log({f'weights/{v.name}': v.numpy() for v in model.trainable_variables}, step=step)
for prob, alias in zip(tf.nn.softmax(robust_loss_calc._adv_prob_logits, axis=-1).numpy().reshape(-1),
robust_loss_calc._aliases):
wandb.log({f'train_step/gdro_adv_prob.{alias}': prob}, step=step)
wandb.log({'train_step/irm_penalty_weight': irm_penalty_weight,
'train_step/consistency_penalty_weight': consistency_penalty_weight,
# 'train_step/gdro_adv_probs': tf.nn.softmax(robust_loss_calc._adv_prob_logits, axis=-1).numpy(),
'train_step/robust_loss': robust_loss.numpy(),
'train_step/consistency_loss': consistency_loss.numpy(),
'train_step/global_gradient_norm': tf.linalg.global_norm(gradients).numpy(),
'train_step/learning_rate': optimizer._decayed_lr(tf.float32).numpy(),
'train_step/step': step}, step=step)
def consistency_penalty(predictions_orig, predictions_1, predictions_2, consistency_type, scale=1.0):
# CAMEL consistency: JS-Divergence of augmentations, plus KL between original and average augmentation
if consistency_type == 'camel':
avg_predictions = (predictions_1 + predictions_2) / 2.0
return tf.reduce_mean((tf.keras.losses.KLD(predictions_orig, avg_predictions) * 0.5 +
tf.keras.losses.KLD(predictions_1, avg_predictions) * 0.25 +
tf.keras.losses.KLD(predictions_2, avg_predictions) * 0.25)) * scale
# JS-Divergence between original and both augmentations (as in AugMix)
elif consistency_type == 'triplet-js':
avg_predictions = (predictions_orig + predictions_1 + predictions_2) / 3.0
return tf.reduce_mean((tf.keras.losses.KLD(predictions_orig, avg_predictions) +
tf.keras.losses.KLD(predictions_1, avg_predictions) +
tf.keras.losses.KLD(predictions_2, avg_predictions)) / 3.0) * scale
# KL divergence between original and each augmentation
elif consistency_type == 'kl':
return tf.reduce_mean((tf.keras.losses.KLD(predictions_orig, predictions_1) +
tf.keras.losses.KLD(predictions_orig, predictions_2)) * scale * 0.5)
elif consistency_type == 'reverse-kl':
return tf.reduce_mean((tf.keras.losses.KLD(predictions_1, predictions_orig) +
tf.keras.losses.KLD(predictions_2, predictions_orig)) * scale * 0.5)
elif consistency_type == 'none':
return tf.convert_to_tensor(0.)
else:
assert False, f'consistency_type {consistency_type} not supported'
def irm_penalty_explicit(targets, pred_logits, penalty_weight):
""" Computes the IRM penalty grad_{w} |_{w=1.0} crossent(targets, w*logits) explicitly """
if penalty_weight == 0.:
return tf.convert_to_tensor(0.)
xent = tf.keras.losses.sparse_categorical_crossentropy(targets, pred_logits, from_logits=True)
sparse_logit = xent + tf.reduce_logsumexp(pred_logits,
axis=-1) # equivalent to grabbing the logit indexed by target
grad = sparse_logit - tf.reduce_sum(pred_logits * tf.nn.softmax(pred_logits, axis=-1), axis=-1)
return tf.reduce_sum(grad ** 2) * penalty_weight
def irm_penalty_gradient(targets, pred_logits, penalty_weight, tape):
""" Computes IRM penalty as formulated in the paper
Currently does not work: tf does not support second order gradients of cross entropy
"""
if penalty_weight == 0.:
return 0.
# Taken from https://github.com/facebookresearch/InvariantRiskMinimization/blob/6aad47e689913b9bdad05880833530a5edac389e/code/colored_mnist/main.py#L107
scale = tf.convert_to_tensor(1.)
tape.watch(scale)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)(targets, pred_logits * scale)
grad = tape.gradient(loss, scale)
return tf.reduce_sum(grad ** 2) * penalty_weight
def consistency_penalty_scheduler(step, n_anneal_steps, base_penalty_weight):
"""
Schedule the consistency penalty.
"""
if base_penalty_weight == 0:
return 0.
if step >= n_anneal_steps:
return base_penalty_weight
return 0.0
def irm_penalty_scheduler(step, n_anneal_steps=100, base_penalty_weight=10000.):
"""
Schedule the IRM penalty weight using a step function as done by
https://github.com/facebookresearch/InvariantRiskMinimization
If the penalty weight is 0. (IRM disabled), just return 0.
"""
if base_penalty_weight == 0.:
return 0.
if step >= n_anneal_steps:
return base_penalty_weight
# return 1.0
return 0.0 # train with no irm at first
def irm_loss_rescale(total_loss, irm_penalty_weight):
"""
Rescale the total loss by the IRM penalty weight as done by
https://github.com/facebookresearch/InvariantRiskMinimization
"""
if irm_penalty_weight > 1.0:
return total_loss / irm_penalty_weight
return total_loss
class GDROLoss:
def __init__(self, group_aliases, group_counts, superclass_ids, adj_coef, step_size):
"""
group_counts: list of integer sizes of the groups
adj_coef: scalar coefficient of the generalization gap penalty
step_size: robust learning rate for the "mixture of expert" probabilities
"""
assert len(group_aliases) == len(group_counts) == len(superclass_ids)
group_counts = tf.cast(tf.stack(group_counts), tf.float32)
print(f"GDROLoss: Group counts {group_counts}")
self._adj = adj_coef * 1. / tf.math.sqrt(group_counts)
print("adj_coef", adj_coef)
print("total adjustment", self._adj)
self._step_size = step_size
self._adv_probs = tf.ones(len(group_counts)) / len(group_counts)
# _adv_prob_logits must exist, being logged by wandb now
self._adv_prob_logits = tf.zeros_like(group_counts)
self._aliases = group_aliases
# For now, assume superclass_ids are 0, 1, -1
superclass_idxs_ = {}
for i in set(superclass_ids):
superclass_idxs_[i] = [idx for idx, j in enumerate(superclass_ids) if j == i]
superclass_freqs_ = {i: len(idxs) / len(group_aliases) for i, idxs in superclass_idxs_.items()}
self.superclass_idxs = superclass_idxs_.values()
self.superclass_freqs = superclass_freqs_.values()
print("GDROLoss: superclass indices, freqs", self.superclass_idxs, self.superclass_freqs)
def compute_loss(self, losses):
""" losses: list of losses (scalars) """
if len(losses) == 0: return tf.convert_to_tensor(0.0)
losses = tf.stack(losses, axis=-1) + self._adj
self._adv_prob_logits += self._step_size * losses
loss = tf.convert_to_tensor(0.)
for idxs, freq in zip(self.superclass_idxs, self.superclass_freqs):
adv_probs = tf.nn.softmax(tf.gather(self._adv_prob_logits, idxs), axis=-1)
loss = loss + tf.reduce_sum(adv_probs * tf.gather(losses, idxs), axis=-1) * freq
return loss
| 48.944223 | 156 | 0.641758 | 1,533 | 12,285 | 4.87606 | 0.204827 | 0.041739 | 0.02087 | 0.021405 | 0.301672 | 0.247893 | 0.185552 | 0.162943 | 0.125485 | 0.097926 | 0 | 0.013639 | 0.265934 | 12,285 | 250 | 157 | 49.14 | 0.815258 | 0.173871 | 0 | 0.115152 | 0 | 0 | 0.0807 | 0.039 | 0 | 0 | 0 | 0 | 0.012121 | 1 | 0.066667 | false | 0 | 0.042424 | 0 | 0.236364 | 0.030303 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c794db4e212c6cf7d9537acd92f512aa6cdb0cf | 1,046 | py | Python | main.py | Jodagito/Pandomit | 19278aee951d1238272a18473ea1581380f437d7 | [
"MIT"
] | null | null | null | main.py | Jodagito/Pandomit | 19278aee951d1238272a18473ea1581380f437d7 | [
"MIT"
] | null | null | null | main.py | Jodagito/Pandomit | 19278aee951d1238272a18473ea1581380f437d7 | [
"MIT"
] | null | null | null | import os
import parser
import pandas as pd
def file_converter(filename, expected_format):
"""Given a file returns a converted file to a preferred format"""
read_methods = [method for method in dir(pd) if method[:4] == 'read']
i = 0
while os.path.exists("converted filename {}.".format(i) + expected_format.replace("to_", "") + ""):
i += 1
try:
for method in read_methods[1:]:
try:
df = getattr(pd, method)(filename)
df_converted = getattr(pd.DataFrame, expected_format)(df)
if df_converted:
with open("converted filename {}.".format(i) + expected_format.replace("to_", "") + "", 'w') as converted_file:
converted_file.write(df_converted)
break
except:
continue
except ValueError:
print("This format can't be converted.")
if __name__ == "__main__":
args = parser.arguments_parser()
file_converter(args.filename, args.expectedformat)
| 33.741935 | 131 | 0.58891 | 121 | 1,046 | 4.892562 | 0.454545 | 0.094595 | 0.037162 | 0.081081 | 0.158784 | 0.158784 | 0.158784 | 0.158784 | 0 | 0 | 0 | 0.005427 | 0.295411 | 1,046 | 30 | 132 | 34.866667 | 0.797829 | 0.056405 | 0 | 0.083333 | 0 | 0 | 0.095821 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.166667 | 0.041667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c7a952effb826e1fbf8fc0f5e9663289f251cf5 | 4,317 | py | Python | apps/run_command_line.py | MattSegal/AuTuMN | 49d78d9c07ea3825ac31682a4d124eab9d3365ce | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | apps/run_command_line.py | MattSegal/AuTuMN | 49d78d9c07ea3825ac31682a4d124eab9d3365ce | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | apps/run_command_line.py | MattSegal/AuTuMN | 49d78d9c07ea3825ac31682a4d124eab9d3365ce | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | """
Runs AuTuMN apps
You can access this script from your CLI by running:
python -m apps --help
"""
import os
import click
from . import covid_19, marshall_islands, mongolia, sir_example
from .marshall_islands.calibration import run_calibration_chain as run_rmi_calibration_chain
from .mongolia.calibration import run_calibration_chain as run_mongolia_calibration_chain
from .covid_19.calibration.victoria import (
run_vic_calibration_chain as run_victoria_covid_calibration_chain,
)
from .covid_19.calibration.malaysia import (
run_mys_calibration_chain as run_malaysia_covid_calibration_chain,
)
from .covid_19.calibration.philippines import (
run_phl_calibration_chain as run_philippines_covid_calibration_chain,
)
from autumn.db.models import create_power_bi_outputs, collate_outputs_powerbi
from autumn.plots.database_plots import plot_from_database
@click.group()
def cli():
"""AuTuMN CLI"""
@click.group()
def db():
"""Database utilities"""
@db.command("plot")
@click.argument("model_run_path", type=str)
def plot_database(model_run_path):
"""Re-plot data from a model run folder"""
plot_from_database(model_run_path)
@db.command("powerbi")
@click.argument("src_db_path", type=str)
@click.argument("dest_db_path", type=str)
def powerbi_convert(src_db_path, dest_db_path):
"""Convert model outputs into PowerBI format"""
assert os.path.isfile(src_db_path), f"{src_db_path} must be a file"
create_power_bi_outputs(src_db_path, dest_db_path)
@db.command("powerbi-collate")
@click.argument("src_db_dir", type=str)
@click.argument("dest_db_path", type=str)
@click.argument("max_size_mb", type=int)
def powerbi_collate(src_db_dir, dest_db_path, max_size_mb):
"""Collate MCMC databases and then convert model outputs into PowerBI format"""
assert os.path.isdir(src_db_dir), f"{src_db_dir} must be a folder"
src_db_paths = [
os.path.join(src_db_dir, fname) for fname in os.listdir(src_db_dir) if fname.endswith(".db")
]
collate_outputs_powerbi(src_db_paths, dest_db_path, max_size_mb)
@click.group()
def run():
"""Run a model"""
@run.command("covid")
@click.argument("country", type=click.Choice(covid_19.COUNTRY_RUNNERS))
def run_covid(country):
"""Run the COVID model for some country"""
runner = getattr(covid_19, country)
runner.run_model()
@run.command("sir_example")
@click.argument("country", type=click.Choice(sir_example.COUNTRY_RUNNERS))
def run_sir_example(country):
"""Run the SIR model for some country"""
runner = getattr(sir_example, country)
runner.run_model()
@run.command("rmi")
def run_rmi():
"""Run the Marshall Islands TB model"""
marshall_islands.run_model()
@run.command("mongolia")
def run_mongolia():
"""Run the Mongolia TB model"""
mongolia.run_model()
@click.group()
def calibrate():
"""
Calibrate a model
"""
@calibrate.command("rmi")
@click.argument("max_seconds", type=int)
@click.argument("run_id", type=int)
def rmi_calibration(max_seconds, run_id):
"""Run Marshall Islands model calibration."""
marshall_islands.calibration.run_calibration_chain(max_seconds, run_id)
@calibrate.command("mongolia")
@click.argument("max_seconds", type=int)
@click.argument("run_id", type=int)
def mongolia_calibration(max_seconds, run_id):
"""Run Mongolia model calibration."""
mongolia.calibration.run_calibration_chain(max_seconds, run_id)
@calibrate.command("victoria")
@click.argument("max_seconds", type=int)
@click.argument("run_id", type=int)
def victoria_calibration(max_seconds, run_id):
"""Run Victoria COVID model calibration."""
run_victoria_covid_calibration_chain(max_seconds, run_id)
@calibrate.command("malaysia")
@click.argument("max_seconds", type=int)
@click.argument("run_id", type=int)
def malaysia_calibration(max_seconds, run_id):
"""Run Malaysia COVID model calibration."""
run_malaysia_covid_calibration_chain(max_seconds, run_id)
@calibrate.command("philippines")
@click.argument("max_seconds", type=int)
@click.argument("run_id", type=int)
def philippines_calibration(max_seconds, run_id):
"""Run Malaysia COVID model calibration."""
run_philippines_covid_calibration_chain(max_seconds, run_id)
cli.add_command(run)
cli.add_command(calibrate)
cli.add_command(db)
cli()
| 28.401316 | 100 | 0.756544 | 623 | 4,317 | 4.956661 | 0.163724 | 0.075777 | 0.042098 | 0.048575 | 0.490609 | 0.462435 | 0.32772 | 0.261658 | 0.261658 | 0.172927 | 0 | 0.003158 | 0.119759 | 4,317 | 151 | 101 | 28.589404 | 0.809474 | 0.145471 | 0 | 0.202247 | 0 | 0 | 0.089136 | 0 | 0 | 0 | 0 | 0 | 0.022472 | 1 | 0.179775 | false | 0 | 0.11236 | 0 | 0.292135 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c7b170332c963d2c748af8230525d7348d1ce37 | 1,851 | py | Python | Toolkits/Discovery/meta/searx/searx/engines/translated.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | 4 | 2018-09-07T15:35:24.000Z | 2019-03-27T09:48:12.000Z | Toolkits/Discovery/meta/searx/searx/engines/translated.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | 371 | 2020-03-04T21:51:56.000Z | 2022-03-31T20:59:11.000Z | searx/engines/translated.py | xu1991/open | 5398dab4ba669b3ca87d9fe26eb24431c45f153e | [
"CC0-1.0"
] | 3 | 2019-06-18T19:57:17.000Z | 2020-11-06T03:55:08.000Z | """
MyMemory Translated
@website https://mymemory.translated.net/
@provide-api yes (https://mymemory.translated.net/doc/spec.php)
@using-api yes
@results JSON
@stable yes
@parse url, title, content
"""
import re
from sys import version_info
from searx.utils import is_valid_lang
if version_info[0] == 3:
unicode = str
categories = ['general']
url = u'http://api.mymemory.translated.net/get?q={query}&langpair={from_lang}|{to_lang}{key}'
web_url = u'http://mymemory.translated.net/en/{from_lang}/{to_lang}/{query}'
weight = 100
parser_re = re.compile(u'.*?([a-z]+)-([a-z]+) (.{2,})$', re.I)
api_key = ''
def request(query, params):
m = parser_re.match(unicode(query, 'utf8'))
if not m:
return params
from_lang, to_lang, query = m.groups()
from_lang = is_valid_lang(from_lang)
to_lang = is_valid_lang(to_lang)
if not from_lang or not to_lang:
return params
if api_key:
key_form = '&key=' + api_key
else:
key_form = ''
params['url'] = url.format(from_lang=from_lang[1],
to_lang=to_lang[1],
query=query,
key=key_form)
params['query'] = query
params['from_lang'] = from_lang
params['to_lang'] = to_lang
return params
def response(resp):
results = []
results.append({
'url': web_url.format(
from_lang=resp.search_params['from_lang'][2],
to_lang=resp.search_params['to_lang'][2],
query=resp.search_params['query']),
'title': '[{0}-{1}] {2}'.format(
resp.search_params['from_lang'][1],
resp.search_params['to_lang'][1],
resp.search_params['query']),
'content': resp.json()['responseData']['translatedText']
})
return results
| 26.826087 | 93 | 0.590492 | 247 | 1,851 | 4.222672 | 0.303644 | 0.099712 | 0.067114 | 0.053691 | 0.141898 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011594 | 0.254457 | 1,851 | 68 | 94 | 27.220588 | 0.744203 | 0.116153 | 0 | 0.065217 | 0 | 0.021739 | 0.192474 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.065217 | 0 | 0.195652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c7c7bbed183e6691077921479d848eb10529f59 | 4,849 | py | Python | core/python_src/test_protobuf.py | kellpossible/libgdx-atc-sim | b469ccc3245004d36a9e0f6d1d27651182ba2962 | [
"MIT"
] | null | null | null | core/python_src/test_protobuf.py | kellpossible/libgdx-atc-sim | b469ccc3245004d36a9e0f6d1d27651182ba2962 | [
"MIT"
] | 1 | 2019-07-03T04:28:57.000Z | 2019-07-03T04:28:57.000Z | core/python_src/test_protobuf.py | kellpossible/libgdx-atc-sim | b469ccc3245004d36a9e0f6d1d27651182ba2962 | [
"MIT"
] | 2 | 2019-05-06T14:54:26.000Z | 2021-02-16T03:33:16.000Z | import DebugDataFeedServe_pb2 as NetworkInterfacePacket
import socket
import sys
import time
import math
from google.protobuf.internal import encoder
from google.protobuf.internal import decoder
from threading import Thread
from Queue import Queue
def build_test_packet(dt):
t = int(round(time.time() * 1000))
system_state = NetworkInterfacePacket.SystemStateMessage()
system_state.time = t
# assert not system_state.HasField("time")
aircraft_state = system_state.aircraftState.add()
aircraft_state.aircraftID = "7C1468"
aircraft_state.time = t
aircraft_state.heading = 81.0
speed = 0.01
deg_to_rad = 0.0174533
position = aircraft_state.position
position.altitude = 2278.38
position.latitude = math.radians(-37.7549 + speed*(dt/1000.0))
position.longitude = math.radians(144.6835)
velocity = aircraft_state.velocity
velocity.dr = 0.0
velocity.dtheta = 0.0
velocity.dphi = math.radians(speed)
return system_state
# I had to implement this because the tools in google.protobuf.internal.decoder
# read from a buffer, not from a file-like objcet
def readRawVarint32(stream):
mask = 0x80 # (1 << 7)
raw_varint32 = []
while 1:
b = stream.read(1)
# eof
if b == "":
break
raw_varint32.append(b)
if not (ord(b) & mask):
# we found a byte starting with a 0, which means it's the last byte
# of this varint
break
return raw_varint32
# These methods are from here: http://stackoverflow.com/questions/2340730/are-t
# here-c-equivalents-for-the-protocol-buffers-delimited-i-o-functions-in-ja/3453
# 9706#34539706
def writeDelimitedTo(message, connection):
message_str = message.SerializeToString()
delimiter = encoder._VarintBytes(len(message_str))
connection.send(delimiter + message_str)
def readDelimitedFrom(MessageType, stream):
raw_varint32 = readRawVarint32(stream)
message = None
if raw_varint32:
size, _ = decoder._DecodeVarint32(raw_varint32, 0)
data = stream.read(size)
if len(data) < size:
raise Exception("Unexpected end of file")
message = MessageType()
message.ParseFromString(data)
return message
# In place version that takes an already built protobuf object
# In my tests, this is around 20% faster than the other version
# of readDelimitedFrom()
def readDelimitedFrom_inplace(message, stream):
raw_varint32 = readRawVarint32(stream)
if raw_varint32:
size, _ = decoder._DecodeVarint32(raw_varint32, 0)
data = stream.read(size)
if len(data) < size:
raise Exception("Unexpected end of file")
message.ParseFromString(data)
return message
else:
return None
class ServerThread(Thread):
def __init__(self, message_queue):
Thread.__init__(self)
self.daemon = True
self.message_queue = message_queue
self.running = False
def run(self):
self.start_server('localhost', 6989)
def start_server(self, address, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print("starting server on {} port {}".format(address, port))
sock.bind((address, port))
print("socket has been bound")
# listen for incoming connections
sock.listen(1)
while True:
# wait for a connection
connection, client_address = sock.accept()
print("connection initiated from {}", client_address)
self.running = True
while True:
message = self.message_queue.get(True, None)
writeDelimitedTo(message, connection)
def is_running(self):
return self.running and self.is_alive()
class SimulationThread(Thread):
def __init__(self, message_queue, server_thread):
Thread.__init__(self)
self.daemon = True
self.message_queue = message_queue
self.server_thread = server_thread
def run(self):
start_time = int(round(time.time() * 1000))
while True:
current_time = int(round(time.time() * 1000))
dt = current_time - start_time
message = build_test_packet(dt)
if self.server_thread.is_running():
self.message_queue.put(message, True, 2.0)
print(self.message_queue.qsize())
else:
print("waiting until server is running...")
time.sleep(0.5)
if __name__ == "__main__":
# print(build_test_packet(0))
message_queue = Queue(10)
server_thread = ServerThread(message_queue)
server_thread.start()
sim_thread = SimulationThread(message_queue, server_thread)
sim_thread.start()
while True:
time.sleep(1)
| 28.028902 | 80 | 0.655599 | 585 | 4,849 | 5.270085 | 0.360684 | 0.046708 | 0.036328 | 0.015569 | 0.226727 | 0.1518 | 0.117418 | 0.117418 | 0.117418 | 0.117418 | 0 | 0.037579 | 0.253661 | 4,849 | 172 | 81 | 28.19186 | 0.814313 | 0.135698 | 0 | 0.280702 | 0 | 0 | 0.042895 | 0 | 0 | 0 | 0.000959 | 0 | 0 | 1 | 0.096491 | false | 0 | 0.078947 | 0.008772 | 0.245614 | 0.04386 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c7ef61a5d394fa533e6ad33c697d5d4aacf0bf9 | 645 | py | Python | scripts/annotation/extract_cds_by_gff.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | 10 | 2015-04-28T14:15:04.000Z | 2021-03-15T00:07:38.000Z | scripts/annotation/extract_cds_by_gff.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | null | null | null | scripts/annotation/extract_cds_by_gff.py | mahajrod/MAVR | 4db74dff7376a2ffe4426db720b241de9198f329 | [
"MIT"
] | 6 | 2017-03-16T22:38:41.000Z | 2021-08-11T00:22:52.000Z | #!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
from RouToolPa.Tools.Expression import Gffread
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True,
help="Input gff with annotation")
parser.add_argument("-g", "--genome", action="store", dest="genome", required=True,
help="Fasta with genome")
parser.add_argument("-o", "--output", action="store", dest="output", required=True,
help="Output file to write cds")
args = parser.parse_args()
Gffread.extract_cds(args.input, args.genome, args.output)
| 33.947368 | 83 | 0.665116 | 80 | 645 | 5.25 | 0.525 | 0.064286 | 0.121429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.176744 | 645 | 18 | 84 | 35.833333 | 0.79096 | 0.031008 | 0 | 0 | 0 | 0 | 0.229167 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c826613b4eb9967c2381828b12ff8fbe1540c84 | 6,879 | py | Python | demo/python/picovoice_demo_mic.py | soltrinox/picovoice | 2fb47389c7031c3a365eca40edd67cef1dc152c5 | [
"Apache-2.0"
] | null | null | null | demo/python/picovoice_demo_mic.py | soltrinox/picovoice | 2fb47389c7031c3a365eca40edd67cef1dc152c5 | [
"Apache-2.0"
] | null | null | null | demo/python/picovoice_demo_mic.py | soltrinox/picovoice | 2fb47389c7031c3a365eca40edd67cef1dc152c5 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2020-2021 Picovoice Inc.
#
# You may not use this file except in compliance with the license. A copy of the license is located in the "LICENSE"
# file accompanying this source.
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import argparse
import os
import sys
import struct
import wave
from threading import Thread
import numpy as np
from picovoice import Picovoice
from pvrecorder import PvRecorder
class PicovoiceDemo(Thread):
def __init__(
self,
access_key,
audio_device_index,
keyword_path,
context_path,
porcupine_library_path=None,
porcupine_model_path=None,
porcupine_sensitivity=0.5,
rhino_library_path=None,
rhino_model_path=None,
rhino_sensitivity=0.5,
require_endpoint=True,
output_path=None):
super(PicovoiceDemo, self).__init__()
self._picovoice = Picovoice(
access_key=access_key,
keyword_path=keyword_path,
wake_word_callback=self._wake_word_callback,
context_path=context_path,
inference_callback=self._inference_callback,
porcupine_library_path=porcupine_library_path,
porcupine_model_path=porcupine_model_path,
porcupine_sensitivity=porcupine_sensitivity,
rhino_library_path=rhino_library_path,
rhino_model_path=rhino_model_path,
rhino_sensitivity=rhino_sensitivity,
require_endpoint=require_endpoint)
self.audio_device_index = audio_device_index
self.output_path = output_path
@staticmethod
def _wake_word_callback():
print('[wake word]\n')
@staticmethod
def _inference_callback(inference):
if inference.is_understood:
print('{')
print(" intent : '%s'" % inference.intent)
print(' slots : {')
for slot, value in inference.slots.items():
print(" %s : '%s'" % (slot, value))
print(' }')
print('}\n')
else:
print("Didn't understand the command.\n")
def run(self):
recorder = None
wav_file = None
try:
recorder = PvRecorder(device_index=self.audio_device_index, frame_length=self._picovoice.frame_length)
recorder.start()
if self.output_path is not None:
wav_file = wave.open(self.output_path, "w")
wav_file.setparams((1, 2, 16000, 512, "NONE", "NONE"))
print(f"Using device: {recorder.selected_device}")
print('[Listening ...]')
while True:
pcm = recorder.read()
if wav_file is not None:
wav_file.writeframes(struct.pack("h" * len(pcm), *pcm))
self._picovoice.process(pcm)
except KeyboardInterrupt:
sys.stdout.write('\b' * 2)
print('Stopping ...')
finally:
if recorder is not None:
recorder.delete()
if wav_file is not None:
wav_file.close()
self._picovoice.delete()
@classmethod
def show_audio_devices(cls):
devices = PvRecorder.get_audio_devices()
for i in range(len(devices)):
print(f'index: {i}, device name: {devices[i]}')
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--access_key',
help='AccessKey obtained from Picovoice Console (https://picovoice.ai/console/)',
required=True)
parser.add_argument('--keyword_path', help="Absolute path to a Porcupine keyword file.")
parser.add_argument('--context_path', help="Absolute path to a Rhino context file.")
parser.add_argument('--porcupine_library_path', help="Absolute path to Porcupine's dynamic library.", default=None)
parser.add_argument('--porcupine_model_path', help="Absolute path to Porcupine's model file.", default=None)
parser.add_argument(
'--porcupine_sensitivity',
help="Sensitivity for detecting wake word. Each value should be a number within [0, 1]. A higher sensitivity " +
"results in fewer misses at the cost of increasing the false alarm rate.",
type=float,
default=0.5)
parser.add_argument('--rhino_library_path', help="Absolute path to Rhino's dynamic library.", default=None)
parser.add_argument('--rhino_model_path', help="Absolute path to Rhino's model file.", default=None)
parser.add_argument(
'--rhino_sensitivity',
help="Inference sensitivity. It should be a number within [0, 1]. A higher sensitivity value results in fewer" +
"misses at the cost of (potentially) increasing the erroneous inference rate.",
type=float,
default=0.5)
parser.add_argument(
'--require_endpoint',
help="If set to `False`, Rhino does not require an endpoint (chunk of silence) before finishing inference.",
default='True',
choices=['True', 'False'])
parser.add_argument('--audio_device_index', help='index of input audio device', type=int, default=-1)
parser.add_argument('--output_path', help='Absolute path to recorded audio for debugging.', default=None)
parser.add_argument('--show_audio_devices', action='store_true')
args = parser.parse_args()
if args.require_endpoint.lower() == 'false':
require_endpoint = False
else:
require_endpoint = True
if args.show_audio_devices:
PicovoiceDemo.show_audio_devices()
else:
if not args.keyword_path:
raise ValueError("Missing path to Porcupine's keyword file.")
if not args.context_path:
raise ValueError("Missing path to Rhino's context file.")
PicovoiceDemo(
access_key=args.access_key,
audio_device_index=args.audio_device_index,
keyword_path=args.keyword_path,
context_path=args.context_path,
porcupine_library_path=args.porcupine_library_path,
porcupine_model_path=args.porcupine_model_path,
porcupine_sensitivity=args.porcupine_sensitivity,
rhino_library_path=args.rhino_library_path,
rhino_model_path=args.rhino_model_path,
rhino_sensitivity=args.rhino_sensitivity,
require_endpoint=require_endpoint,
output_path=os.path.expanduser(args.output_path) if args.output_path is not None else None).run()
if __name__ == '__main__':
main()
| 35.096939 | 120 | 0.643989 | 814 | 6,879 | 5.218673 | 0.259214 | 0.027542 | 0.052024 | 0.032957 | 0.327919 | 0.221281 | 0.129473 | 0.102166 | 0.037665 | 0.019303 | 0 | 0.006328 | 0.264864 | 6,879 | 195 | 121 | 35.276923 | 0.833696 | 0.07065 | 0 | 0.104167 | 0 | 0.020833 | 0.21921 | 0.014886 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.0625 | 0 | 0.111111 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c862c8e739f0576851e5f917cdfc1912121180e | 700 | py | Python | algorithms_and_data_structures/algorithms/sorting/insertion_sort/insertion_sort_naive.py | JCPedroza/algorithms-and-data-structures-py | e8532060e82bb7f56d667c587469dea2921117df | [
"MIT"
] | 2 | 2022-01-14T01:33:24.000Z | 2022-01-14T03:23:41.000Z | algorithms_and_data_structures/algorithms/sorting/insertion_sort/insertion_sort_naive.py | JCPedroza/algorithms-and-data-structures-py | e8532060e82bb7f56d667c587469dea2921117df | [
"MIT"
] | 1 | 2022-01-14T03:26:58.000Z | 2022-01-14T03:26:58.000Z | algorithms_and_data_structures/algorithms/sorting/insertion_sort/insertion_sort_naive.py | JCPedroza/algorithms-and-data-structures-py | e8532060e82bb7f56d667c587469dea2921117df | [
"MIT"
] | 1 | 2022-01-14T03:23:45.000Z | 2022-01-14T03:23:45.000Z | def insertion_sort(nums: list[float]) -> list[float]:
"""Sorts a list in-place using the insertion sort approach.
This version does more comparisons and moves more data than necessary, so
it is sub-optimal.
Time complexity: O(n) best O(n^2) worst O(n^2) average.
Space complexity: O(n) total O(1) auxiliary.
Args:
nums: A list of numbers.
Returns.
The sorted list.
"""
for target in range(1, len(nums)):
swap = target
while swap > 0 and nums[swap - 1] > nums[swap]:
nums[swap - 1], nums[swap] = nums[swap], nums[swap - 1]
swap -= 1
return nums
algorithm = insertion_sort
name = 'in-place naive'
| 24.137931 | 77 | 0.607143 | 104 | 700 | 4.067308 | 0.538462 | 0.132388 | 0.06383 | 0.113475 | 0.120567 | 0.099291 | 0.099291 | 0 | 0 | 0 | 0 | 0.017928 | 0.282857 | 700 | 28 | 78 | 25 | 0.824701 | 0.455714 | 0 | 0 | 0 | 0 | 0.04142 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c86d2ba40619a738f5d8ad8d925e6ddb561978c | 6,010 | py | Python | drf_localize/serializers.py | ebs-integrator/DRF-Localize | 85201157027770ff3859842f04ce8a3e302ab072 | [
"MIT"
] | 3 | 2022-03-10T12:34:18.000Z | 2022-03-14T08:52:22.000Z | drf_localize/serializers.py | ebs-integrator/drf-localize | 85201157027770ff3859842f04ce8a3e302ab072 | [
"MIT"
] | null | null | null | drf_localize/serializers.py | ebs-integrator/drf-localize | 85201157027770ff3859842f04ce8a3e302ab072 | [
"MIT"
] | null | null | null | from django.utils.translation import ugettext_lazy as _
from rest_framework.serializers import (
Serializer,
JSONField,
ModelSerializer,
)
from rest_framework.utils.serializer_helpers import BindingDict
from rest_framework.exceptions import ValidationError
from rest_framework.fields import empty
from django.db import models
from django.utils.functional import cached_property
# Import your package here.
from drf_localize.core import (
localize,
localize_key_type
)
# Create your serializers here.
class I18N(Serializer):
context: dict = None
localize_namespace: bool = False
localize_model: models.Model = None
localize_translate: list = []
def __init__(self, **kwargs):
self.localize_model = kwargs.pop('model', None)
self.context = kwargs.pop('context', None)
self.localize_namespace = kwargs.pop('namespace', False)
self.localize_translate, self.localize_field, self.localize_auto_update = localize._model_set(model=self.localize_model) # noqa
localize._signal(model=self.localize_model) # noqa
super(I18N, self).__init__(**kwargs)
def to_representation(self, instance):
# Not evaluating non-request context
if 'request' not in self.context:
return {}
response = {}
request = self.context.get('request', {})
data = getattr(request, 'data', {})
i18n = data.get(self.localize_field, {})
languages = localize.get_languages(request=request)
# Update i18n with request's language -> i18n.LANGUAGE_CODE -> i18n.en
if language := request.LANGUAGE_CODE:
response[language] = {}
# Take i18n field from request body
if i18n and isinstance(i18n, dict):
keys = list(i18n.keys())
# Check if i18n object has valid language keys
if difference := list(set(keys) - set(languages)):
raise ValidationError({
self.localize_field: [_('Unknown language keys "%(key)s".') % {'key': ','.join(difference)}]
})
# Attach language keys with values
for language in languages:
response[language] = {}
value = i18n.get(language, '')
value_string = value if isinstance(value, str) else ''
# Model based field translation
if self.localize_model and self.localize_translate:
for field in self.localize_translate:
keyed_data = data.get(field, '')
keyed = i18n.get(language, {})
if not isinstance(keyed, dict):
keyed = {}
# Retrieve language field value, if set
keyed = keyed.get(field, '')
value_string = keyed if keyed and isinstance(keyed, str) else ''
# Defaulting to internal body key value
value_string = keyed_data if not value_string else value_string
# Update language code key value
response[language].update({field: value_string})
# We are skipping the rest, because model based translation is already in use
continue
# Blank string if value is not string, and non-model
response[language] = value_string
# Namespacing keys, means each language is allowed to have 2nd level keys, non-model
if self.localize_namespace:
response[language] = {}
if not isinstance(value, dict):
continue
for key, value in value.items():
# Skipping if value is not string
if not isinstance(value, str):
continue
# Attach 2nd level value
response[language].update({key: value})
return response
def to_internal_value(self, data):
return {self.localize_field: data}
def update(self, instance, validated_data):
pass
def create(self, validated_data):
pass
class I18NModelSerializer(ModelSerializer):
def __init__(self, instance=None, data=empty, **kwargs):
self.localize_model = self.Meta.model # noqa
self.localize_field = getattr(self.localize_model, 'LOCALIZE_FIELD', None)
super(I18NModelSerializer, self).__init__(instance=instance, data=data, **kwargs)
@cached_property
def fields(self):
"""
A dictionary of {field_name: field_instance}.
"""
# `fields` is evaluated lazily. We do this to ensure that we don't
# have issues importing modules that use ModelSerializers as fields,
# even if Django's app-loading stage has not yet run.
fields = BindingDict(self)
for key, value in self.get_fields().items():
fields[key] = value
if self.localize_field:
fields.update({
self.localize_field: JSONField(
required=False, default={}
)
})
return fields
def _i18n(self, validated_data):
typing = validated_data.get('type', None)
serializer = I18N(
data=self,
context=self.context,
model=self.localize_model,
namespace=typing == localize_key_type.KEY_NAMESPACE
)
serializer.is_valid(raise_exception=True)
# In case model does not have i18n field
if self.localize_field:
validated_data.update({self.localize_field: serializer.data})
return validated_data
def create(self, validated_data):
validated_data = self._i18n(validated_data)
return super(I18NModelSerializer, self).create(validated_data)
def update(self, instance, validated_data):
validated_data = self._i18n(validated_data)
return super(I18NModelSerializer, self).update(instance, validated_data)
| 35.56213 | 136 | 0.612313 | 656 | 6,010 | 5.458841 | 0.233232 | 0.073722 | 0.042726 | 0.018431 | 0.098297 | 0.062832 | 0.062832 | 0.045239 | 0.045239 | 0.045239 | 0 | 0.010989 | 0.303494 | 6,010 | 168 | 137 | 35.77381 | 0.844482 | 0.159401 | 0 | 0.165138 | 0 | 0 | 0.018578 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.091743 | false | 0.018349 | 0.073395 | 0.009174 | 0.284404 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c8fb708a31f92aa30ed34d57058f849faa1abff | 488 | py | Python | S4/S4 Decompiler/Old Libraries/xdis/opcodes/opcode_23.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | 1 | 2021-05-20T19:33:37.000Z | 2021-05-20T19:33:37.000Z | S4/S4 Decompiler/Old Libraries/xdis/opcodes/opcode_23.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | S4/S4 Decompiler/Old Libraries/xdis/opcodes/opcode_23.py | NeonOcean/Environment | ca658cf66e8fd6866c22a4a0136d415705b36d26 | [
"CC-BY-4.0"
] | null | null | null | # (C) Copyright 2017, 2019 by Rocky Bernstein
"""
CPython 2.3 bytecode opcodes
This is a like Python 2.3's opcode.py with some classification
of stack usage.
"""
import xdis.opcodes.opcode_2x as opcode_2x
from xdis.opcodes.base import (
finalize_opcodes,
format_extended_arg,
init_opdata,
update_pj2,
)
version = 2.3
l = locals()
init_opdata(l, opcode_2x, version)
update_pj2(globals(), l)
opcode_arg_fmt = {"EXTENDED_ARG": format_extended_arg}
finalize_opcodes(l)
| 18.074074 | 62 | 0.741803 | 76 | 488 | 4.552632 | 0.592105 | 0.017341 | 0.098266 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046455 | 0.161885 | 488 | 26 | 63 | 18.769231 | 0.799511 | 0.313525 | 0 | 0 | 0 | 0 | 0.036697 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c8fd73e664627880e0ceebfd049cc8bfc1a9308 | 2,302 | py | Python | prodj/pdblib/track.py | beauburrows/python-prodj-link | 1cc6b6c19e38ac09fadb91420e45adbe2c9691bb | [
"Apache-2.0"
] | 66 | 2018-01-16T09:25:30.000Z | 2022-03-24T14:58:44.000Z | prodj/pdblib/track.py | beauburrows/python-prodj-link | 1cc6b6c19e38ac09fadb91420e45adbe2c9691bb | [
"Apache-2.0"
] | 25 | 2018-05-16T12:17:11.000Z | 2021-02-06T11:09:03.000Z | prodj/pdblib/track.py | beauburrows/python-prodj-link | 1cc6b6c19e38ac09fadb91420e45adbe2c9691bb | [
"Apache-2.0"
] | 18 | 2018-03-15T13:54:40.000Z | 2022-03-24T20:49:43.000Z | from construct import Struct, Int8ul, Int16ul, Int32ul, Array, Const, Tell, Default
from .piostring import PioString, IndexedPioString
TRACK_ENTRY_MAGIC = 0x24
Track = Struct(
"entry_start" / Tell,
"magic" / Const(TRACK_ENTRY_MAGIC, Int16ul),
"index_shift" / Int16ul, # the index inside the page <<5 (0x00, 0x20, 0x40, ...)
"bitmask" / Int32ul,
"sample_rate" / Int32ul,
"composer_index" / Int32ul,
"file_size" / Int32ul,
"u1" / Int32ul, # some id?
"u2" / Int16ul, # always 19048?
"u3" / Int16ul, # always 30967?
"artwork_id" / Int32ul,
"key_id" / Int32ul, # not sure
"original_artist_id" / Int32ul,
"label_id" / Int32ul,
"remixer_id" / Int32ul,
"bitrate" / Int32ul,
"track_number" / Int32ul,
"bpm_100" / Int32ul,
"genre_id" / Int32ul,
"album_id" / Int32ul, # album artist is set in album entry
"artist_id" / Int32ul,
"id" / Int32ul, # the rekordbox track id
"disc_number" / Int16ul,
"play_count" / Int16ul,
"year" / Int16ul,
"sample_depth" / Int16ul, # not sure
"duration" / Int16ul,
"u4" / Int16ul, # always 41?
"color_id" / Int8ul,
"rating" / Int8ul,
"u5" / Default(Int16ul, 1), # always 1?
"u6" / Int16ul, # alternating 2 or 3
"str_idx" / Array(21, Int16ul),
"str_u1" / IndexedPioString(0), # empty
"texter" / IndexedPioString(1),
"str_u2" / IndexedPioString(2), # thought tracknumber -> wrong!
"str_u3" / IndexedPioString(3), # strange strings, often zero length, sometimes low binary values 0x01/0x02 as content
"str_u4" / IndexedPioString(4), # strange strings, often zero length, sometimes low binary values 0x01/0x02 as content
"message" / IndexedPioString(5),
"kuvo_public" / IndexedPioString(6), # "ON" or empty
"autoload_hotcues" / IndexedPioString(7), # "ON" or empty
"str_u5" / IndexedPioString(8), # 8
"str_u6" / IndexedPioString(9), # empty
"date_added" / IndexedPioString(10),
"release_date" / IndexedPioString(11),
"mix_name" / IndexedPioString(12),
"str_u7" / IndexedPioString(13), # empty
"analyze_path" / IndexedPioString(14),
"analyze_date" / IndexedPioString(15),
"comment" / IndexedPioString(16),
"title" / IndexedPioString(17),
"str_u8" / IndexedPioString(18), # always empty; only in newer versions?
"filename" / IndexedPioString(19),
"path" / IndexedPioString(20)
)
| 37.129032 | 120 | 0.682016 | 278 | 2,302 | 5.503597 | 0.485612 | 0.052941 | 0.019608 | 0.030065 | 0.091503 | 0.091503 | 0.091503 | 0.091503 | 0.091503 | 0.091503 | 0 | 0.084211 | 0.174631 | 2,302 | 61 | 121 | 37.737705 | 0.721053 | 0.213727 | 0 | 0 | 0 | 0 | 0.233221 | 0 | 0 | 0 | 0.002237 | 0 | 0 | 1 | 0 | false | 0 | 0.033898 | 0 | 0.033898 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c92a88457ea25794c1ceb7149e058a6f314e651 | 1,301 | py | Python | core/wakeup/SnowboyWakeup.py | aibittek/WallERobot | 956f47ce91cb8e89d67c7a3df23e1a7014ffc1e5 | [
"MIT"
] | 1 | 2021-07-06T04:13:56.000Z | 2021-07-06T04:13:56.000Z | core/wakeup/SnowboyWakeup.py | aibittek/WallERobot | 956f47ce91cb8e89d67c7a3df23e1a7014ffc1e5 | [
"MIT"
] | null | null | null | core/wakeup/SnowboyWakeup.py | aibittek/WallERobot | 956f47ce91cb8e89d67c7a3df23e1a7014ffc1e5 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import os
import sys
import wakeup
import snowboydetect
class SnowboyWakeup(wakeup.Wakeup):
def __init__(self, args):
print(args)
tm = type(args['models'])
ts = type(args['sensitivity'])
if tm is not list:
args['models'] = [args['models']]
if ts is not list:
args['sensitivity'] = [args['sensitivity']]
model_str = ",".join(args['models'])
sensitivity = args['sensitivity']
self.detector = snowboydetect.SnowboyDetect(
resource_filename=args['resource'].encode(), model_str=model_str.encode())
self.detector.SetAudioGain(args['audio_gain'])
self.detector.ApplyFrontend(args['apply_frontend'])
self.num_hotwords = self.detector.NumHotwords()
if len(sensitivity) != 0:
assert self.num_hotwords == len(sensitivity), \
"number of hotwords in decoder_model (%d) and sensitivity " \
"(%d) does not match" % (self.num_hotwords, len(sensitivity))
sensitivity_str = ",".join([str(t) for t in sensitivity])
if len(sensitivity) != 0:
self.detector.SetSensitivity(sensitivity_str.encode())
def start(self, audio_data):
return self.detector.RunDetection(audio_data)
| 36.138889 | 86 | 0.617218 | 145 | 1,301 | 5.413793 | 0.4 | 0.09172 | 0.057325 | 0.033121 | 0.073885 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003071 | 0.249039 | 1,301 | 35 | 87 | 37.171429 | 0.800409 | 0.015373 | 0 | 0.068966 | 0 | 0 | 0.139171 | 0 | 0 | 0 | 0 | 0 | 0.034483 | 1 | 0.068966 | false | 0 | 0.137931 | 0.034483 | 0.275862 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c92ee282d9ec36a7c22cb593bc6dbc705b019f8 | 637 | py | Python | graycode.py | pritam19798/BPCS-Steganography | 3e0025f34bc42d4c5de1a84177a130ff33d3e35e | [
"MIT"
] | null | null | null | graycode.py | pritam19798/BPCS-Steganography | 3e0025f34bc42d4c5de1a84177a130ff33d3e35e | [
"MIT"
] | null | null | null | graycode.py | pritam19798/BPCS-Steganography | 3e0025f34bc42d4c5de1a84177a130ff33d3e35e | [
"MIT"
] | null | null | null |
import cv2
def grayCode(n):
return n ^ (n >> 1)
def inversegrayCode(n):
inv = 0;
while(n):
inv = inv ^ n;
n = n >> 1;
return inv;
def image_grayCode(image):
row,col,channel=image.shape
for r in range(row):
for c in range(col):
for ch in range(channel):
image[r][c][ch]=grayCode(image[r][c][ch])
return image
def image_inversegrayCode(image):
row,col,channel=image.shape
for r in range(row):
for c in range(col):
for ch in range(channel):
image[r][c][ch]=inversegrayCode(image[r][c][ch])
return image
| 19.30303 | 64 | 0.549451 | 94 | 637 | 3.702128 | 0.234043 | 0.12069 | 0.08046 | 0.103448 | 0.597701 | 0.597701 | 0.482759 | 0.482759 | 0.482759 | 0.482759 | 0 | 0.009174 | 0.315542 | 637 | 32 | 65 | 19.90625 | 0.788991 | 0 | 0 | 0.434783 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.173913 | false | 0 | 0.043478 | 0.043478 | 0.391304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c973fe5744d6e662b689c47a3285c65fec2e1df | 6,768 | py | Python | WRN-backbone-32/utils/utils_my.py | ashleylqx/AIB | 77e418cac52f0ca5f2a7c54927468a7bd75a8fc9 | [
"MIT"
] | 5 | 2021-05-23T13:05:45.000Z | 2022-02-13T21:40:59.000Z | WRN-backbone-32/utils/utils_my.py | ashleylqx/AIB | 77e418cac52f0ca5f2a7c54927468a7bd75a8fc9 | [
"MIT"
] | null | null | null | WRN-backbone-32/utils/utils_my.py | ashleylqx/AIB | 77e418cac52f0ca5f2a7c54927468a7bd75a8fc9 | [
"MIT"
] | 3 | 2021-08-11T03:23:31.000Z | 2021-11-17T01:48:52.000Z | from nested_dict import nested_dict
from functools import partial
import torch
from torch.nn.init import kaiming_normal_
from torch.nn.parallel._functions import Broadcast
from torch.nn.parallel import scatter, parallel_apply, gather
import torch.nn.functional as F
from torch.distributions import Normal, Independent, kl
import pdb
import numpy as np
import math
import cv2
from utils.config import *
def str2bool(v):
"""
codes from : https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def cuda(tensor, is_cuda):
if is_cuda : return tensor.cuda()
else : return tensor
def postprocess_prediction(prediction, size=None, print_info=True, ostu_th=False):
"""
Postprocess saliency maps by resizing and applying gaussian blurringself.
args:
prediction: numpy array with saliency postprocess_prediction
size: original (H,W) of the image
returns:
numpy array with saliency map normalized 0-255 (int8)
"""
if print_info:
print('max %.4f min %.4f'%(np.max(prediction), np.min(prediction))) # l1 norm is much larger than l2? but maps are similar
prediction = prediction - np.min(prediction)
# prediction = prediction - np.mean(prediction)
# prediction[prediction<0] = 0
# print('max %.4f min %.4f'%(np.max(prediction), np.min(prediction))) # l1 norm is much larger than l2? but maps are similar
if np.max(prediction) != 0:
saliency_map = (prediction/np.max(prediction) * 255).astype(np.uint8)
else:
saliency_map = prediction.astype(np.uint8)
if size is None:
size = MNIST_RESIZE
# resize back to original size
saliency_map = cv2.GaussianBlur(saliency_map, (7, 7), 0)
saliency_map = cv2.resize(saliency_map, (size[1], size[0]), interpolation=cv2.INTER_CUBIC)
# clip again
# saliency_map = np.clip(saliency_map, 0, 255)
if np.max(saliency_map)!=0:
saliency_map = saliency_map.astype('float') / np.max(saliency_map) * 255.
else:
print('Zero saliency map.')
if ostu_th:
_, th2 = cv2.threshold(saliency_map, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# ret2, th2 = cv2.threshold(saliency_map, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return th2
return saliency_map
def distillation(y, teacher_scores, labels, T, alpha):
p = F.log_softmax(y/T, dim=1)
q = F.softmax(teacher_scores/T, dim=1)
# l_kl = F.kl_div(p, q, size_average=False) * (T**2) / y.shape[0]
l_kl = F.kl_div(p, q, reduction='sum') * (T**2) / y.shape[0]
l_ce = F.cross_entropy(y, labels)
return l_kl * alpha + l_ce * (1. - alpha)
def distillation_my(y, teacher_scores, labels, T, alpha):
p = F.log_softmax(y/T, dim=1)
q = F.softmax(teacher_scores/T, dim=1)
# l_kl = F.kl_div(p, q, size_average=False) * (T**2) / y.shape[0]
l_kl = F.kl_div(p, q, reduction='sum') * (T**2) / y.shape[0]
l_ce = F.cross_entropy(y, labels).div(math.log(2)) # divide log(2)
return l_kl * alpha + l_ce * (1. - alpha)
def at_my(x):
return F.normalize(x.pow(2).mean(1))
def at(x):
return F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
def at_loss(x, y):
# pdb.set_trace()
if y.size()[-2:] != x.size()[-2:]:
y = F.interpolate(y, x.size()[-2:])
# y = y.view(y.size(0), -1)
return (at(x) - at(y)).pow(2).mean()
def at_loss_my_new(x, y):
# pdb.set_trace()
if y.size()[-2:] != x.size()[-2:]:
y = F.interpolate(y, x.size()[-2:])
return (x - y).pow(2).mean()
# def kl_divergence(self, latent_space1, latent_space2):
# kl_div = kl.kl_divergence(latent_space1, latent_space2)
# return kl_div
def at_loss_my_dist(s, t):
return torch.mean(kl.kl_divergence(s, t))
def at_loss_my(x, y):
# pdb.set_trace()
if y.size()[-2:] != x.size()[-2:]:
y = F.interpolate(y, x.size()[-2:])
y = y.view(y.size(0), -1)
# y = (y-y.min()) / (y.max()+1e-8)
# tmp_x = at(x)
# tmp_x = (tmp_x-tmp_x.min()) / (tmp_x.max()+1e-8) # _norm
# return (tmp_x - y).pow(2).mean()
return (x - y).pow(2).mean()
# def at_loss_my(x, y):
# # pdb.set_trace()
# if y.size()[-2:] != x.size()[-2:]:
# y = F.interpolate(y, x.size()[-2:])
# y = y.view(y.size(0), -1)
# y = y * 0.25 # _d4
# return (at(x) - y).pow(2).mean()
def cast(params, dtype='float'):
if isinstance(params, dict):
return {k: cast(v, dtype) for k,v in params.items()}
else:
return getattr(params.cuda() if torch.cuda.is_available() else params, dtype)()
def conv_params(ni, no, k=1):
return kaiming_normal_(torch.Tensor(no, ni, k, k))
def linear_params(ni, no):
return {'weight': kaiming_normal_(torch.Tensor(no, ni)), 'bias': torch.zeros(no)}
def bnparams(n):
return {'weight': torch.rand(n),
'bias': torch.zeros(n),
'running_mean': torch.zeros(n),
'running_var': torch.ones(n)}
def data_parallel(f, input, params, mode, device_ids, output_device=None):
device_ids = list(device_ids)
if output_device is None:
output_device = device_ids[0]
if len(device_ids) == 1:
return f(input, params, mode)
params_all = Broadcast.apply(device_ids, *params.values())
params_replicas = [{k: params_all[i + j*len(params)] for i, k in enumerate(params.keys())}
for j in range(len(device_ids))]
replicas = [partial(f, params=p, mode=mode)
for p in params_replicas]
inputs = scatter([input], device_ids)
outputs = parallel_apply(replicas, inputs)
return gather(outputs, output_device)
def flatten(params):
return {'.'.join(k): v for k, v in nested_dict(params).items_flat() if v is not None}
def batch_norm(x, params, base, mode):
# pdb.set_trace()
return F.batch_norm(x, weight=params[base + '.weight'],
bias=params[base + '.bias'],
running_mean=params[base + '.running_mean'],
running_var=params[base + '.running_var'],
training=mode)
def print_tensor_dict(params):
kmax = max(len(key) for key in params.keys())
for i, (key, v) in enumerate(params.items()):
print(str(i).ljust(5), key.ljust(kmax + 3), str(tuple(v.shape)).ljust(23), torch.typename(v), v.requires_grad)
def set_requires_grad_except_bn_(params):
for k, v in params.items():
if not k.endswith('running_mean') and not k.endswith('running_var'):
v.requires_grad = True
| 31.924528 | 130 | 0.621602 | 1,048 | 6,768 | 3.879771 | 0.22042 | 0.045991 | 0.011805 | 0.012051 | 0.283817 | 0.27816 | 0.252336 | 0.245942 | 0.23881 | 0.21274 | 0 | 0.024801 | 0.219563 | 6,768 | 211 | 131 | 32.075829 | 0.744983 | 0.218824 | 0 | 0.169492 | 0 | 0 | 0.038055 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.169492 | false | 0 | 0.110169 | 0.067797 | 0.457627 | 0.050847 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c9cd4d2ff71a7b5c5769ea7aac45585efe9e300 | 913 | py | Python | src/main.py | ChiaCatPool/ChiaSignature | 114cce3b1e811183c85ef745e21f564b9a6e718c | [
"MIT"
] | 2 | 2021-05-27T09:36:54.000Z | 2021-10-12T08:03:08.000Z | src/main.py | Pow-Duck/ChiaSignature | 114cce3b1e811183c85ef745e21f564b9a6e718c | [
"MIT"
] | null | null | null | src/main.py | Pow-Duck/ChiaSignature | 114cce3b1e811183c85ef745e21f564b9a6e718c | [
"MIT"
] | null | null | null | from fastapi import FastAPI, Response
from pydantic import BaseModel
import src.option as option
import src.plot as plot
app = FastAPI()
@app.get("/")
async def root():
return "Chia Signature Version: 0.0.1 https://github.com/Pow-Duck/ChiaSignature"
class InputDataModel(BaseModel):
farmer_public_key: str
pool_key: str
@app.post("/signature", status_code=200)
async def signature(input_data: InputDataModel, response: Response):
try:
plot.create_plots(input_data.farmer_public_key, input_data.pool_key)
(plot_id1, plot_memo2) = plot.create_plots(input_data.farmer_public_key, input_data.pool_key)
return option.api_return(plot_id1, plot_memo2, True, None)
except Exception as e:
response.status_code = 500
print("err: ", e)
return option.api_return(None, None, False, "Failed to generate, please verify that the parameters are correct")
| 31.482759 | 120 | 0.731654 | 130 | 913 | 4.953846 | 0.5 | 0.069876 | 0.069876 | 0.062112 | 0.170807 | 0.170807 | 0.170807 | 0.170807 | 0.170807 | 0.170807 | 0 | 0.017173 | 0.170865 | 913 | 28 | 121 | 32.607143 | 0.833554 | 0 | 0 | 0 | 0 | 0 | 0.166484 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.190476 | 0 | 0.47619 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5c9e7f1f755ab1c025d13ac796c3080931f4af5f | 9,720 | py | Python | notifandhelpline.py | deepthiinduri/TRACK_THE_COVID | c1f3773c5d7e9f41fe464786c6e29bb419ab78e0 | [
"Apache-2.0"
] | 1 | 2021-07-21T08:13:37.000Z | 2021-07-21T08:13:37.000Z | notifandhelpline.py | deepthiinduri/TRACK_THE_COVID | c1f3773c5d7e9f41fe464786c6e29bb419ab78e0 | [
"Apache-2.0"
] | null | null | null | notifandhelpline.py | deepthiinduri/TRACK_THE_COVID | c1f3773c5d7e9f41fe464786c6e29bb419ab78e0 | [
"Apache-2.0"
] | 1 | 2022-02-28T13:05:29.000Z | 2022-02-28T13:05:29.000Z | from tkinter import *
from PIL import ImageTk, Image ,ImageDraw, ImageFont, ImageFilter
import json
import html5lib
import plyer
import urllib.request
import imageio
import webbrowser
import requests
import bs4
def notif_and_helplines():
url = "https://www.mohfw.gov.in/"
html_data = requests.get(url)
bs = bs4.BeautifulSoup(html_data.text,'html.parser')
newWindow = Toplevel()
newWindow.title("NOTIFICATIONS, HELPLINES AND ADVISORIES")
newWindow.state('zoomed')
newWindow.iconbitmap(r'Images\coronavirus_image_UXL_icon.ico')
def shift():
x1,y1,x2,y2 = canvas.bbox("marquee")
if(x2<0 or y1<0):
x1 = canvas.winfo_width()
y1 = canvas.winfo_height()//2
canvas.coords("marquee",x1,y1)
else:
canvas.move("marquee", -2, 0)
canvas.after(1000//fps,shift)
labe1 = Label(newWindow, text = " LATEST NOTIFICATIONS " , font = "Times 28 bold roman" , pady = 10, padx = 20 ,fg = "#EC4D37", bg = "black").pack()
labe2 = Label(newWindow, text = " " , font = "Times 15 bold roman").pack()
canvas = Canvas(newWindow,bg = '#EC4D37')
canvas.pack(fill = BOTH, expand = 1)
text_var = bs.find("span" ,class_ = "tested").get_text()
text = canvas.create_text(0,-2000, text = text_var, font = ('Times New Roman',20,'bold'),fill = 'black',tags = ("marquee",),anchor = 'w')
x1,y1,x2,y2 = canvas.bbox("marquee")
width = x2-x1
height = y2-y1
canvas['width'] = width
canvas['height'] = height
fps = 45
shift()
def labe3_open():
webbrowser.open_new('https://cdn.s3waas.gov.in/s30777d5c17d4066b82ab86dff8a46af6f/uploads/2020/05/2020050898.pdf')
labe3 = Label(newWindow, text = " For any technical enquiry with respect to COVID-19, you may kindly email on technicalquery.covid19@gov.in Aarogya Setu IVRS ✆ 1921 ",
font = "Times 15 normal roman" , pady = 3, padx = 170 ,fg = "red", bg = "gray13", cursor = "hand2")
labe3.bind("<Button-1>", lambda e: labe3_open())
labe3.pack()
labe4 = Label(newWindow,text = " Helpline Number : +91-11-23978046 Toll Free : 1075 Helpline Email ID : ncov2019@gov.in ", font = "Times 13 normal roman" ,fg = "black", bg = "yellow",padx = 420 ).pack()
def labe5_open():
url2 = "https://www.mohfw.gov.in/pdf/StatewiseCovidHospitalslink19062020.pdf"
webbrowser.open_new(url2)
labe5 = Label(newWindow,text = " COVID-19 Facilities in States & Union Territories ",font = "Times 12 bold roman" ,fg = "blue",
bg = "yellow", cursor = "hand2", padx = 620 )
labe5.bind("<Button-1>", lambda e: labe5_open())
labe5.pack()
frame = Frame(newWindow,width = 900,height = 900)
frame.pack(expand = True, fill = BOTH)
canvas1 = Canvas(frame,width = 900, height = 900,scrollregion = (0,0,1000,1000))
hbar = Scrollbar(frame, orient = HORIZONTAL)
hbar.pack(side = BOTTOM,fill = X)
hbar.config(command = canvas1.xview)
vbar = Scrollbar(frame,orient = VERTICAL)
vbar.pack(side = LEFT,fill = Y)
vbar.config(command = canvas1.yview)
canvas1.config(width = 900,height = 900)
canvas1.config(xscrollcommand = hbar.set, yscrollcommand = vbar.set)
canvas1.pack(side=LEFT,expand = True,fill = BOTH)
info_div1 = bs.find("div" , class_ = "main-body-content").find("section" ,class_ = "site-update").find("div" , class_ = "container").find("div" , class_ = "row").find_all("div" , class_ = "update-box")
info_div2 = bs.find("div" , class_ = "main-body-content").find_all("section" ,class_ = "site-update")[4].find("div" , class_ = "container").find("div" , class_ = "row").find("div" , class_ = "site-faq").find("div" , class_ = "faq-content")
def Button_1_open():
webbrowser.open_new(info_div1[0].find("a").get('href'))
def Button_2_open():
webbrowser.open_new(info_div1[1].find("a").get('href'))
def Button_3_open():
webbrowser.open_new(info_div1[2].find("a").get('href'))
def Button_4_open():
webbrowser.open_new(info_div1[3].find("a").get('href'))
def Button_5_open():
webbrowser.open_new(info_div1[4].find("a").get('href'))
def Button_6_open():
webbrowser.open_new(info_div1[5].find("a").get('href'))
def Button_7_open():
webbrowser.open_new(info_div2.find("a").get('href'))
render = ImageTk.PhotoImage(Image.open ("Images/coronavirus3.png").resize((300,40) , Image.ANTIALIAS))
covid_img = Label(canvas1)
covid_img.image = render
canvas1.create_image(180, 45,image = render)
f1 = ('Bookman Old Style', "25", "bold roman")
text_1 = Label(canvas1, text = " Updates ",fg = "gray20" , font = f1)
canvas_text1 = canvas1.create_window(415, 45, window = text_1)
'''text = info_div1[0].find("a").get_text().strip()'''
button_1 = Button(canvas1, text = " COVID-19 Vaccination of Pregnant Women PosterEnglish " ,wraplength = 300,command = Button_1_open , cursor = "hand2", fg = "blue" , font = "serif 10 normal roman" , padx = 4, pady = 4,height = 5,width = 57)
canvas_button1 = canvas1.create_window(250, 150, window = button_1)
button_2 = Button(canvas1, text = " Counseling booklet for Frontline workers and Vaccinators " ,wraplength = 300,command = Button_2_open, cursor = "hand2", fg = "blue" , font = "serif 10 normal roman", padx = 4, pady = 4,height = 5,width = 57)
canvas_button2 = canvas1.create_window(250, 250, window = button_2)
button_3 = Button(canvas1, text = info_div1[2].find("a").get_text().strip() ,wraplength = 300,command = Button_3_open, cursor = "hand2", fg = "blue" , font = "serif 10 normal roman", padx = 4, pady = 4,height = 5,width = 57)
canvas_button3 = canvas1.create_window(250, 350, window = button_3)
button_4 = Button(canvas1, text = " Toolkit for Youth Campaign on COVID Appropriate Behaviour, Vaccination drive and Psychosocial well-being " ,wraplength = 300,command = Button_4_open, cursor = "hand2", fg = "blue" , font = "serif 10 normal roman", padx = 4, pady = 4,height = 5,width = 57)
canvas_button4 = canvas1.create_window(250, 450, window = button_4)
button_5 = Button(canvas1, text = info_div1[4].find("a").get_text().strip() ,wraplength = 300,command = Button_5_open, cursor = "hand2", fg = "blue" , font = "serif 10 normal roman", padx = 4, pady = 4,height = 5,width = 57)
canvas_button5 = canvas1.create_window(250, 550, window = button_5)
button_6 = Button(canvas1, text = info_div1[5].find("a").get_text().strip() ,wraplength = 300,command = Button_6_open, cursor = "hand2", fg = "blue" , font = "serif 10 normal roman" , padx = 4, pady = 4,height = 5,width = 57)
canvas_button6 = canvas1.create_window(250, 650, window = button_6)
text_2 = Label(canvas1, text = " FAQ's ", fg = "gray20" , font = "Times 25 bold roman")
canvas_text2 = canvas1.create_window(80, 755, window = text_2)
button_7 = Button(canvas1, text = info_div2.get_text() ,wraplength = 500,command = Button_7_open, cursor = "hand2", fg = "blue" , font = "serif 10 normal roman" , padx = 4, pady = 4,height = 5,width = 57)
canvas_button7 = canvas1.create_window(250, 855, window = button_7)
text_3 = Label(canvas1, text = " source: " , font = "Times 15 bold roman")
canvas_text2 = canvas1.create_window(160, 970, window = text_3)
def call_back(event):
webbrowser.open_new(event.widget.cget("text"))
lbl = Label(canvas1, text = r"www.mohfw.gov.in", fg = "blue" , cursor = "hand2",font = "Times 13 bold roman")
canvas_lbl = canvas1.create_window(280, 970, window = lbl)
lbl.bind("<Button-1>", call_back)
render2 = ImageTk.PhotoImage(Image.open ("Images/vaccination.png").resize((570,550) , Image.ANTIALIAS))
img2 = Label(frame, image = render2)
img2.image = render2
img2.pack(side = RIGHT)
url2 = "https://www.worldometers.info/coronavirus/"
html_data2 = requests.get(url2)
bs2 = bs4.BeautifulSoup(html_data2.text,'html.parser')
info_data = bs2.find("div" , class_ = "content-inner").find_all("div" , id = "maincounter-wrap")
f = ("Times", "20", "bold italic")
text1 = Label(canvas1, text = " Worldwide " , font = "Times 25 bold roman" , width = 17)
canvas1.create_window(750, 45, window = text1)
text2 = Label(canvas1, text = info_data[0].get_text() , font = f , bg = "light blue", height = 4, width = 17,borderwidth = 1, relief = "solid")
canvas1.create_window(750, 150, window = text2)
text3 = Label(canvas1, text = info_data[1].get_text() , font = f , bg = "tomato", height = 4, width = 17,borderwidth = 1, relief = "solid")
canvas1.create_window(750, 300, window = text3)
text4 = Label(canvas1, text = info_data[2].get_text() , font = f , bg = "light green", height = 4, width = 17,borderwidth = 1, relief = "solid")
canvas1.create_window(750, 450, window = text4)
info_data2 = bs2.find("div" , class_ = "content-inner").find_all("div" , class_ = "col-md-6")
text5 = Label(canvas1, text = " Active Cases " + "\n" + "────────────────────" + "\n\n" + info_data2[0].find("div" , class_ = "number-table-main").get_text() + "\n" + "currently infected patients" + "\n" , font = "Times 19 bold italic" , bg = "gray85", height = 6, width = 24,borderwidth = 1, relief = "solid")
canvas1.create_window(750, 650, window = text5)
text6 = Label(canvas1, text = " Closed Cases " + "\n" + "────────────────────" + "\n\n" + info_data2[1].find("div" , class_ = "number-table-main").get_text() + "\n" + "cases which had an outcome" + "\n" , font = "Times 19 bold italic" , bg = "gray85", height = 6, width = 24,borderwidth = 1, relief = "solid")
canvas1.create_window(750, 880, window = text6)
| 66.575342 | 315 | 0.648045 | 1,347 | 9,720 | 4.586488 | 0.244989 | 0.037876 | 0.052282 | 0.027193 | 0.362901 | 0.303173 | 0.240693 | 0.207996 | 0.197313 | 0.140175 | 0 | 0.074192 | 0.188786 | 9,720 | 145 | 316 | 67.034483 | 0.704122 | 0 | 0 | 0.014925 | 0 | 0.022388 | 0.222326 | 0.011484 | 0.014925 | 0 | 0 | 0 | 0 | 1 | 0.089552 | false | 0 | 0.074627 | 0 | 0.164179 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ca16157b5f09400ffca7155b0bd7515dc8fa4bf | 790 | py | Python | tests/test_mean_std.py | rdemaria/xfields | 0f1a984c4dda7cd5dadd199e748fb2b584a096c9 | [
"MIT"
] | null | null | null | tests/test_mean_std.py | rdemaria/xfields | 0f1a984c4dda7cd5dadd199e748fb2b584a096c9 | [
"MIT"
] | null | null | null | tests/test_mean_std.py | rdemaria/xfields | 0f1a984c4dda7cd5dadd199e748fb2b584a096c9 | [
"MIT"
] | null | null | null | import numpy as np
import xobjects as xo
import xfields as xf
def test_mean_and_std():
for CTX in xo.ContextCpu, xo.ContextPyopencl, xo.ContextCupy:
if CTX not in xo.context.available:
continue
print(f"Test {CTX}")
ctx = CTX()
n_x=100
a_host = np.array(np.random.rand(n_x))
a_dev = ctx.nparray_to_context_array(a_host)
mm, ss = xf.mean_and_std(a_dev)
assert np.isclose(mm, np.mean(a_host))
assert np.isclose(ss, np.std(a_host))
weights_host = np.zeros_like(a_host)+.2
weights_dev = ctx.nparray_to_context_array(weights_host)
mm, ss = xf.mean_and_std(a_dev, weights=weights_dev)
assert np.isclose(mm, np.mean(a_host))
assert np.isclose(ss, np.std(a_host))
| 28.214286 | 65 | 0.640506 | 129 | 790 | 3.689922 | 0.348837 | 0.073529 | 0.12605 | 0.063025 | 0.451681 | 0.451681 | 0.338235 | 0.338235 | 0.338235 | 0.243697 | 0 | 0.00678 | 0.253165 | 790 | 27 | 66 | 29.259259 | 0.8 | 0 | 0 | 0.2 | 0 | 0 | 0.012658 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.2 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ca225f5afe43c654b35619caef218fd0f7a679c | 3,286 | py | Python | promort/predictions_manager/serializers.py | lucalianas/ProMort | 63702e1b573025e1f956f7d7a0e829f655e728f9 | [
"MIT"
] | 3 | 2016-12-28T08:12:51.000Z | 2020-07-08T21:03:48.000Z | promort/predictions_manager/serializers.py | lucalianas/ProMort | 63702e1b573025e1f956f7d7a0e829f655e728f9 | [
"MIT"
] | 37 | 2016-11-11T09:57:45.000Z | 2022-03-31T16:04:53.000Z | promort/predictions_manager/serializers.py | lucalianas/ProMort | 63702e1b573025e1f956f7d7a0e829f655e728f9 | [
"MIT"
] | 4 | 2016-04-22T07:49:40.000Z | 2021-09-22T08:09:44.000Z | # Copyright (c) 2021, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
try:
import simplejson as json
except ImportError:
import json
from rest_framework import serializers
from predictions_manager.models import Prediction, TissueFragmentsCollection, TissueFragment
from slides_manager.serializers import SlideSerializer
class PredictionSerializer(serializers.ModelSerializer):
class Meta:
model = Prediction
fields = ('id', 'label', 'creation_date', 'slide', 'type', 'omero_id', 'provenance')
read_only_fields = ('id', 'creation_date')
def validate_provenance(self, value):
try:
json.loads(value)
return value
except ValueError:
raise serializers.ValidationError('Not a valid JSON in \'provenance\' field')
class PredictionDetailsSerializer(serializers.ModelSerializer):
slide = SlideSerializer(many=False, read_only=True)
class Meta:
model = Prediction
fields = ('id', 'label', 'creation_date', 'slide', 'type', 'omero_id', 'provenance')
read_only_fields = ('id', 'label', 'creation_date', 'slide', 'type', 'omero_id', 'provenance')
class TissueFragmentsCollectionSerializer(serializers.ModelSerializer):
class Meta:
model = TissueFragmentsCollection
fields = ('id', 'prediction', 'creation_date')
read_only_fields = ('id', 'creation_date')
class TissueFragmentSerializer(serializers.ModelSerializer):
class Meta:
model = TissueFragment
fields = ('id', 'collection', 'shape_json', 'creation_date')
read_only_fields = ('id', 'creation_date')
def validate_shape_json(self, value):
try:
json.loads(value)
return value
except ValueError:
raise serializers.ValidationError('Not a valid JSON in \'shape_json\' field')
class TissueFragmentsCollectionDetailsSerializer(serializers.ModelSerializer):
fragments = TissueFragmentSerializer(many=True, read_only=True)
prediction = PredictionSerializer(many=False, read_only=True)
class Meta:
model = TissueFragmentsCollection
fields = ('id', 'prediction', 'creation_date', 'fragments')
read_only_fields = ('id', 'creation_date')
| 38.209302 | 102 | 0.714242 | 378 | 3,286 | 6.119048 | 0.391534 | 0.034587 | 0.030264 | 0.034587 | 0.35668 | 0.316904 | 0.304799 | 0.304799 | 0.242542 | 0.182879 | 0 | 0.0019 | 0.19933 | 3,286 | 85 | 103 | 38.658824 | 0.877233 | 0.322276 | 0 | 0.510638 | 0 | 0 | 0.158133 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.12766 | 0 | 0.489362 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cac943593c7fe65cc7bacbda5a3b4ec95fe7dc4 | 3,764 | py | Python | research/im2txt/im2txt/gradcam_wrapper.py | dorazhao99/women-snowboard | 9cb2569d7a3cbb846d10aabae825ead9a6e1de29 | [
"Apache-2.0"
] | 19 | 2018-09-26T03:52:59.000Z | 2021-08-19T08:41:06.000Z | research/im2txt/im2txt/gradcam_wrapper.py | dorazhao99/women-snowboard | 9cb2569d7a3cbb846d10aabae825ead9a6e1de29 | [
"Apache-2.0"
] | 13 | 2020-06-29T03:53:45.000Z | 2022-03-11T23:28:19.000Z | research/im2txt/im2txt/gradcam_wrapper.py | dorazhao99/women-snowboard | 9cb2569d7a3cbb846d10aabae825ead9a6e1de29 | [
"Apache-2.0"
] | 6 | 2018-09-19T17:07:00.000Z | 2021-03-21T14:20:25.000Z | """Model wrapper class for performing GradCam visualization with a ShowAndTellModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from im2txt import show_and_tell_model
from im2txt.inference_utils import inference_wrapper_base
import numpy as np
import matplotlib
# Fix to run remotely (with no display)
# matplotlib.use('agg')
import tensorflow as tf
import PIL.Image
from matplotlib import pylab as P
import pickle
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.colors as mcolors
import os
import os.path as osp
slim=tf.contrib.slim
import scipy
import sys
sys.path.append('gradcam')
def transparent_cmap(cmap, N=255):
"Copy colormap and set alpha values"
mycmap = cmap
mycmap._init()
mycmap._lut[:,-1] = np.linspace(0, 0.8, N+4)
return mycmap
class GradCamWrapper(inference_wrapper_base.InferenceWrapperBase):
"""Model wrapper class for performing inference with a ShowAndTellModel."""
def __init__(self):
super(GradCamWrapper, self).__init__()
def build_model(self, model_config):
model = show_and_tell_model.ShowAndTellModel(model_config, mode="gradcam")
model.build()
return model
def process_image(self, sess, encoded_image, input_feed, filename, vocab, word_index=1, word_id=None, save_path=None):
graph = tf.get_default_graph()
softmax = sess.run(fetches=["softmax:0"], feed_dict={"image_feed:0": encoded_image, "input_feed:0": input_feed})
logits = graph.get_tensor_by_name('softmax:0')
neuron_selector = tf.placeholder(tf.int32)
neuron_pred = logits[0,word_index][neuron_selector]
pred_max = np.argmax(softmax[0][0][word_index])
if word_id != None:
print('%s\tpredicted: %s with prob %f , given: %s with prob %.10f' % (filename, vocab.id_to_word(pred_max), np.max(softmax[0][0][word_index]), vocab.id_to_word(word_id), softmax[0][0][word_index][word_id]))
pred_max = word_id
from grad_cam import GradCam
grad_cam = GradCam(graph, sess, neuron_pred, graph.get_tensor_by_name('concat:0'), conv_layer = graph.get_tensor_by_name('InceptionV3/InceptionV3/Mixed_7c/concat:0'))
input_image = PIL.Image.open(filename)
input_image = input_image.convert('RGB')
im = np.asarray(input_image)
im_resized = scipy.misc.imresize(im, (299, 299), interp='bilinear', mode=None)
im_resized = im_resized / 127.5 - 1.0
grad_mask_2d = grad_cam.GetMask(im_resized, feed_dict = {neuron_selector: pred_max, "input_feed:0": input_feed}, should_resize = False, three_dims = False)
# if np.min(grad_mask_2d) == np.max(grad_mask_2d): grad_mask_2d[0,0]=1.0000001 # Fix for a bug that happens very rarely
mycmap = transparent_cmap(plt.cm.jet)
w = im_resized.shape[0]
h = im_resized.shape[1]
y, x = np.mgrid[0:h, 0:w]
grad_mask_2d_norm = grad_mask_2d / np.max(grad_mask_2d)
grad_mask_2d_upscaled = scipy.misc.imresize(grad_mask_2d_norm, (w, h), interp='bilinear', mode='F')
percentile = 99
vmax = np.percentile(grad_mask_2d_upscaled, percentile)
vmin = np.min(grad_mask_2d_upscaled)
mask_grayscale_upscaled = np.clip((grad_mask_2d_upscaled - vmin) / (vmax - vmin), 0, 1)
fig, ax = plt.subplots(1, 1)
plt.axis('off')
ax.imshow( ((im_resized + 1.0) * 127.5)/255.0)
cb = ax.contourf(x, y, mask_grayscale_upscaled, 15, cmap=mycmap)
if save_path != None and save_path != '':
np.save(save_path + osp.basename(filename)[0:-4] + '_' + vocab.id_to_word(pred_max) + '.npy', grad_mask_2d)
plt.savefig(save_path + osp.basename(filename)[0:-4] + '_' + vocab.id_to_word(pred_max) + '.jpg', bbox_inches='tight')
plt.close()
else:
plt.show()
| 36.192308 | 212 | 0.7144 | 578 | 3,764 | 4.385813 | 0.32699 | 0.041026 | 0.051282 | 0.020513 | 0.16568 | 0.074162 | 0.066272 | 0.066272 | 0.066272 | 0.066272 | 0 | 0.029477 | 0.161796 | 3,764 | 103 | 213 | 36.543689 | 0.77401 | 0.096706 | 0 | 0 | 0 | 0 | 0.073121 | 0.011992 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056338 | false | 0 | 0.253521 | 0 | 0.352113 | 0.028169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cb1d1c1b2d0620a0d048a8f6d7b4fcc7668f049 | 1,851 | py | Python | tests/tcn/test_keras_onnx.py | ggardiakos/timemachines | 845001fc6ca3005d3612ef8f44040f5d1e15d9b8 | [
"MIT"
] | null | null | null | tests/tcn/test_keras_onnx.py | ggardiakos/timemachines | 845001fc6ca3005d3612ef8f44040f5d1e15d9b8 | [
"MIT"
] | null | null | null | tests/tcn/test_keras_onnx.py | ggardiakos/timemachines | 845001fc6ca3005d3612ef8f44040f5d1e15d9b8 | [
"MIT"
] | null | null | null | # SPDX-License-Identifier: Apache-2.0
from timemachines.skaters.tcn.tcninclusiontraining import using_tcntraining
if using_tcntraining:
from onnxruntime import InferenceSession
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers, Input
def test_keras_onnx_runtime():
"""
:return: test if onnx and keras seem to be working
"""
# adapted from https://github.com/microprediction/tensorflow-onnx/blob/master/examples/end2end_tfkeras.py
# Creates the model.
model = keras.Sequential()
model.add(Input((4, 4)))
model.add(layers.SimpleRNN(8))
model.add(layers.Dense(2))
print(model.summary())
input_names = [n.name for n in model.inputs]
output_names = [n.name for n in model.outputs]
print('inputs:', input_names)
print('outputs:', output_names)
########################################
# Training
# ....
# Skipped.
########################################
# Testing the model.
input = np.random.randn(2, 4, 4).astype(np.float32)
expected = model.predict(input)
print(expected)
########################################
# Serialize but do not save the model
from tf2onnx.keras2onnx_api import convert_keras
onnx_model = convert_keras(model=model,name='example')
onnx_model_as_byte_string = onnx_model.SerializeToString()
########################################
# Runs onnxruntime.
session = InferenceSession(onnx_model_as_byte_string)
got = session.run(None, {'input_1': input})
print(got[0])
########################################
# Measures the differences.
assert (np.abs(got[0] - expected).max())<1e-5
| 34.277778 | 113 | 0.558617 | 196 | 1,851 | 5.153061 | 0.505102 | 0.035644 | 0.027723 | 0.025743 | 0.083168 | 0.041584 | 0.041584 | 0 | 0 | 0 | 0 | 0.013591 | 0.244733 | 1,851 | 53 | 114 | 34.924528 | 0.70887 | 0.179363 | 0 | 0 | 0 | 0 | 0.022551 | 0 | 0 | 0 | 0 | 0 | 0.038462 | 1 | 0.038462 | false | 0 | 0.230769 | 0 | 0.269231 | 0.192308 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cb4f3d7bb829612fae0f449b3de61ecc0409094 | 1,476 | py | Python | software/Opal/spud/diamond/build/lib.linux-x86_64-2.7/diamond/databuttonswidget.py | msc-acse/acse-9-independent-research-project-Wade003 | cfcba990d52ccf535171cf54c0a91b184db6f276 | [
"MIT"
] | 2 | 2020-05-11T02:39:46.000Z | 2020-05-11T03:08:38.000Z | software/multifluids_icferst/libspud/diamond/build/lib.linux-x86_64-2.7/diamond/databuttonswidget.py | msc-acse/acse-9-independent-research-project-Wade003 | cfcba990d52ccf535171cf54c0a91b184db6f276 | [
"MIT"
] | null | null | null | software/multifluids_icferst/libspud/diamond/build/lib.linux-x86_64-2.7/diamond/databuttonswidget.py | msc-acse/acse-9-independent-research-project-Wade003 | cfcba990d52ccf535171cf54c0a91b184db6f276 | [
"MIT"
] | 2 | 2020-05-21T22:50:19.000Z | 2020-10-28T17:16:31.000Z | #!/usr/bin/env python
# This file is part of Diamond.
#
# Diamond is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diamond is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diamond. If not, see <http://www.gnu.org/licenses/>.
import gobject
import gtk
class DataButtonsWidget(gtk.HBox):
__gsignals__ = { "revert" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ()),
"store" : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE, ())}
def __init__(self):
gtk.HBox.__gobject_init__(self)
revertButton = gtk.Button()
revertButton.set_label("Revert data")
revertButton.connect("clicked", self._revert)
storeButton = gtk.Button()
storeButton.set_label("Store data")
storeButton.connect("clicked", self._store)
self.pack_start(revertButton)
self.pack_end(storeButton)
return
def _revert(self, widget = None):
self.emit("revert")
def _store(self, widget = None):
self.emit("store")
gobject.type_register(DataButtonsWidget)
| 30.75 | 79 | 0.70664 | 199 | 1,476 | 5.100503 | 0.517588 | 0.014778 | 0.038424 | 0.056158 | 0.193103 | 0.124138 | 0.068966 | 0 | 0 | 0 | 0 | 0.000845 | 0.197832 | 1,476 | 47 | 80 | 31.404255 | 0.856419 | 0.45935 | 0 | 0 | 0 | 0 | 0.072797 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.095238 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cb5891ba04d0b3c481ca3b5fc5261fc95bd5a1e | 5,274 | py | Python | metapose/launch_iterative_solver.py | dumpmemory/google-research | bc87d010ab9086b6e92c3f075410fa6e1f27251b | [
"Apache-2.0"
] | null | null | null | metapose/launch_iterative_solver.py | dumpmemory/google-research | bc87d010ab9086b6e92c3f075410fa6e1f27251b | [
"Apache-2.0"
] | null | null | null | metapose/launch_iterative_solver.py | dumpmemory/google-research | bc87d010ab9086b6e92c3f075410fa6e1f27251b | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch script for running a full probabilistic iterative solver baseline."""
from absl import app
from absl import flags
import tensorflow as tf
import tensorflow_datasets as tfds
from metapose import data_utils
from metapose import inference_time_optimization as inf_opt
_INPUT_PATH = flags.DEFINE_string(
'input_path', '',
'path to an folder containing a tfrec file and a features.json file')
_OUTPUT_PATH = flags.DEFINE_string(
'output_path', None,
'path to the output a dataset with refined 3d poses')
_N_STEPS = flags.DEFINE_integer('n_steps', 100, 'optimizer (adam) steps')
_DEBUG_FIRST_N = flags.DEFINE_integer(
'debug_first_n', None, 'read only first n records')
_LEARNING_RATE = flags.DEFINE_float(
'learning_rate', 1e-2, 'optimizer (adam) learning rate')
_REPORT_N_APPROX = flags.DEFINE_integer(
'report_n_approx', 50,
'number of intermediate optimization results to report')
_CAM_SUBSET = flags.DEFINE_list(
'cam_subset', list(map(str, range(4))),
'comma-separated list of camera ids to use, e.g. 3,4,5')
_GT_HEATMAPS = flags.DEFINE_bool(
'gt_heatmaps', False,
'whether to replace heatmaps with fake ground truth heatmaps')
_FAKE_GT_HT_STD = flags.DEFINE_float(
'fake_gt_ht_std', 0.0,
'how much noise to add to positions of means of fake gt heatmaps')
_USE_WEAK_REPR = flags.DEFINE_bool(
'use_weak_repr', False,
'whether to use weak projection to get ground truth heatmaps')
_FAKE_GT_INIT = flags.DEFINE_bool(
'fake_gt_init', False,
'whether to use ground truth instead of monocular 3d predictions')
_RANDOM_INIT = flags.DEFINE_bool(
'random_init', False,
'whether to use random noise instead of monocular 3d predictions')
_EDGE_LENS_LAMBDA = flags.DEFINE_float(
'edge_lens_lambda', 0.0,
'weight of the normalized limb length loss during refinement')
flags.mark_flag_as_required('output_path')
def main(_):
cam_subset = list(map(int, _CAM_SUBSET.value))
n_cam = len(cam_subset)
report_n = (
_N_STEPS.value // (_N_STEPS.value // (_REPORT_N_APPROX.value - 1)) + 1)
output_shape_dtype = {
# optimization results
'loss': ([report_n], tf.float32),
'iters': ([report_n], tf.int32),
'pose3d_opt_preds': ([report_n, 17, 3], tf.float32),
'cam_rot_opt_preds': ([report_n, n_cam, 3, 3], tf.float32),
'scale_opt_preds': ([report_n, n_cam], tf.float32),
'shift_opt_preds': ([report_n, n_cam, 3], tf.float32),
# metrics
'pose2d_opt_preds': ([report_n, n_cam, 17, 2], tf.float32),
'pose3d_gt_aligned_pred_3d_proj': ([report_n, n_cam, 17, 2], tf.float32),
'pose3d_pred_pmpjpe': ([report_n], tf.float32),
'pose2d_pred_err': ([report_n], tf.float32),
'pose2d_pred_vs_posenet_err': ([report_n], tf.float32),
'pose2d_gt_posenet_err_mean': ([], tf.float32),
'pose3d_gt_backaligned_pose2d_gt_err': ([report_n], tf.float32),
# input data
'pose3d': ([17, 3], tf.float64),
'cam_pose3d': ([n_cam, 3], tf.float64),
'cam_rot': ([n_cam, 3, 3], tf.float64),
'cam_intr': ([n_cam, 4], tf.float64),
'cam_kd': ([n_cam, 5], tf.float64),
'pose2d_gt': ([n_cam, 17, 2], tf.float64),
'pose2d_repr': ([n_cam, 17, 2], tf.float64),
'heatmaps': ([n_cam, 17, 4, 4], tf.float64),
# note! pose2d_pred is actually the "mean heatmap" 2D pred
'pose2d_pred': ([n_cam, 17, 2], tf.float64),
'keys': ([n_cam], tf.string),
'bboxes': ([n_cam, 4], tf.int32),
'pose3d_epi_pred': ([n_cam, 17, 3], tf.float32),
'cam_subset': ([n_cam], tf.int32),
}
output_spec = tfds.features.FeaturesDict({
k: tfds.features.Tensor(shape=s, dtype=d)
for k, (s, d) in output_shape_dtype.items()
})
ds = data_utils.read_tfrec_feature_dict_ds(_INPUT_PATH.value)
if _DEBUG_FIRST_N.value is not None:
ds = ds.take(_DEBUG_FIRST_N.value)
dataset = []
for _, data_rec in ds:
opt_stats = inf_opt.run_inference_optimization(
data_rec=data_rec,
opt_steps=_N_STEPS.value,
report_n_results=_REPORT_N_APPROX.value,
cam_subset=cam_subset,
edge_lens_lambda=_EDGE_LENS_LAMBDA.value,
fake_gt_heatmaps=_GT_HEATMAPS.value,
fake_gt_ht_std=_FAKE_GT_HT_STD.value,
fake_gt_init=_FAKE_GT_INIT.value,
random_init=_RANDOM_INIT.value,
recompute_weak_repr=_USE_WEAK_REPR.value,
learning_rate=_LEARNING_RATE.value)
print('pmpjpe', opt_stats['pose3d_pred_pmpjpe'][-1])
dataset.append(opt_stats)
data_utils.write_tfrec_feature_dict_ds(
dataset, output_spec, _OUTPUT_PATH.value)
if __name__ == '__main__':
app.run(main)
| 37.140845 | 79 | 0.695108 | 795 | 5,274 | 4.296855 | 0.300629 | 0.036885 | 0.012295 | 0.023419 | 0.147248 | 0.074063 | 0.028689 | 0.016979 | 0.016979 | 0 | 0 | 0.031591 | 0.183732 | 5,274 | 141 | 80 | 37.404255 | 0.761905 | 0.142207 | 0 | 0 | 0 | 0 | 0.269496 | 0.025994 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009804 | false | 0 | 0.058824 | 0 | 0.068627 | 0.009804 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cb679704e23a1b0acbe6405efd9aa5634185c0e | 3,450 | py | Python | models.py | RCSnyder/subreddit_scraper | 17062c585f2dc0136e6e4ecb914d1ff456c80069 | [
"MIT"
] | null | null | null | models.py | RCSnyder/subreddit_scraper | 17062c585f2dc0136e6e4ecb914d1ff456c80069 | [
"MIT"
] | null | null | null | models.py | RCSnyder/subreddit_scraper | 17062c585f2dc0136e6e4ecb914d1ff456c80069 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
import nltk
nltk.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
s = SentimentIntensityAnalyzer()
import flair
flair_sentiment = flair.models.TextClassifier.load('en-sentiment')
""" from azure.ai.textanalytics import TextAnalyticsClient
from azure.core.credentials import AzureKeyCredential
azureclient = TextAnalyticsClient(endpoint="https://textsentimentcheck.cognitiveservices.azure.com/", credential=AzureKeyCredential("")) """
# add an instance of your model to this once you have defined it
models = []
# all added sentiment analysis models must be wrapped
# in a class that inherits from this class to enforce
# a common api between different models
class baseSentimentModel(ABC):
def __init__(self, name, model):
self.name = name
self.model = model
# this is the only required method
# it should take the text and return the predicted
# sentiment as a number between [-1, 1] where
# 1 is maximally positive, 0 is nuetral, and -1 is maximally negative
@abstractmethod
def predict(self, texts):
pass
class nltkModel(baseSentimentModel):
def predict(self, texts):
return [self.parsePolarity(self.model.polarity_scores(text)) for text in texts]
def parsePolarity(self, polarity):
if polarity['neg'] > polarity['pos'] and polarity['neg'] > polarity['neu']:
return -1.0
elif polarity['pos'] > polarity['neg'] and polarity['pos'] > polarity['neu']:
return 1.0
return 0.0
models.append(nltkModel('nltkVader', s))
class flairModel(baseSentimentModel):
def __init__(self, name, model):
self.sentMapping = {'NEGATIVE' : -1.0, 'NEUTRAL': 0.0, 'POSITIVE': 1.0}
super().__init__(name, model)
def predict(self, texts):
sents = [flair.data.Sentence(text) for text in texts]
self.model.predict(sents)
result = []
for i, t in enumerate(sents):
try:
result.append(self.sentMapping[t.labels[0].value])
except:
print(texts[i])
return result
models.append(flairModel('flair', flair_sentiment))
""" class azureModel(baseSentimentModel):
def predict(self, texts):
responses = self.model.analyze_sentiment(documents=texts)
return list(map(self.parseResponses, responses))
def parseResponses(self, responses):
totals = [0.0, 0.0, 0.0]
for response in responses:
totals[0] += response.confidence_scores.positive
totals[1] += response.confidence_scores.neutral
totals[2] += response.confidence_scores.negative
max_idx = 0
if totals[1] > totals[0]:
max_idx = 1
if totals[2] > totals[max_idx]:
max_idx = 2
return 1.0 - max_idx # this returns 1.0 for pos, 0.0 for neutral, and -1.0 for negative
models.append(azureModel('azureModel', azureclient)) """
"""
example of this:
class myModel(baseSentimentModel):
# this example is a categorical model
# so the values must be converted to numbers
def predict(self, text):
pred = self.model.evaluateSentiment(text)
if pred == 'positive':
return 1.0
elif pred == 'nuetral':
return 0.0
else:
return -1.0
models.append(myModel('example model', somePackage.model))
"""
| 31.944444 | 140 | 0.655072 | 419 | 3,450 | 5.334129 | 0.334129 | 0.008054 | 0.03132 | 0.034004 | 0.09038 | 0.021477 | 0 | 0 | 0 | 0 | 0 | 0.017898 | 0.238841 | 3,450 | 107 | 141 | 32.242991 | 0.833206 | 0.116232 | 0 | 0.125 | 0 | 0 | 0.055556 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0.025 | 0.1 | 0.025 | 0.45 | 0.025 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cc062591cfe12fa5a06316443ec900f3dbf315c | 1,139 | py | Python | sagemaker-pyspark-sdk/tests/wrapper_test.py | hyandell/sagemaker-spark | 0149cf0f52562008a1a163e455207bb6d00d3e4a | [
"Apache-2.0"
] | 261 | 2017-11-30T04:53:01.000Z | 2022-03-27T14:52:46.000Z | sagemaker-pyspark-sdk/tests/wrapper_test.py | hyandell/sagemaker-spark | 0149cf0f52562008a1a163e455207bb6d00d3e4a | [
"Apache-2.0"
] | 114 | 2017-12-15T23:10:09.000Z | 2022-01-07T18:52:30.000Z | sagemaker-pyspark-sdk/tests/wrapper_test.py | hyandell/sagemaker-spark | 0149cf0f52562008a1a163e455207bb6d00d3e4a | [
"Apache-2.0"
] | 127 | 2017-11-30T18:53:51.000Z | 2022-03-13T18:58:10.000Z | import os
import pytest
from pyspark import SparkConf, SparkContext
from sagemaker_pyspark import classpath_jars
from sagemaker_pyspark.wrapper import Option, ScalaMap, ScalaList
@pytest.fixture(autouse=True)
def with_spark_context():
os.environ['SPARK_CLASSPATH'] = ":".join(classpath_jars())
conf = (SparkConf()
.set("spark.driver.extraClassPath", os.environ['SPARK_CLASSPATH']))
if SparkContext._active_spark_context is None:
SparkContext(conf=conf)
yield SparkContext._active_spark_context
# TearDown
SparkContext.stop(SparkContext._active_spark_context)
def test_convert_dictionary():
dictionary = {"key": "value"}
map = ScalaMap(dictionary)._to_java()
assert map.apply("key") == "value"
def test_convert_list():
list = ["features", "label", "else"]
s_list = ScalaList(list)._to_java()
assert s_list.apply(0) == "features"
assert s_list.apply(1) == "label"
assert s_list.apply(2) == "else"
def test_convert_option():
list = ["features", "label", "else"]
option = Option(list)._to_java()
assert option.get().apply(0) == "features"
| 24.76087 | 79 | 0.697103 | 138 | 1,139 | 5.514493 | 0.384058 | 0.063075 | 0.09067 | 0.118265 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004228 | 0.169447 | 1,139 | 45 | 80 | 25.311111 | 0.800211 | 0.007024 | 0 | 0.071429 | 0 | 0 | 0.117803 | 0.023915 | 0 | 0 | 0 | 0 | 0.178571 | 1 | 0.142857 | false | 0 | 0.178571 | 0 | 0.321429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cc149375b44096dc121a4abf69408bc16d3f4e2 | 401 | py | Python | python/controls/datepicker/datepicker_with_change_event.py | pglet/pglet-samples | ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9 | [
"MIT"
] | null | null | null | python/controls/datepicker/datepicker_with_change_event.py | pglet/pglet-samples | ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9 | [
"MIT"
] | null | null | null | python/controls/datepicker/datepicker_with_change_event.py | pglet/pglet-samples | ab47e797a4daccfa4779daa3d1fd1cc27d92e7f9 | [
"MIT"
] | null | null | null | from datetime import datetime
import pglet
from pglet import DatePicker, Text
with pglet.page("datepicker-with-change-event") as page:
def datepicker_changed(e):
t.value = f"DatePicker value changed to {dp.value}"
t.update()
now = datetime.now()
t = Text()
dp = DatePicker(label="Start date", value=now, width=150, on_change=datepicker_changed)
page.add(dp, t)
input() | 28.642857 | 89 | 0.698254 | 58 | 401 | 4.775862 | 0.5 | 0.101083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009146 | 0.182045 | 401 | 14 | 90 | 28.642857 | 0.835366 | 0 | 0 | 0 | 0 | 0 | 0.189055 | 0.069652 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cc5b7cbbc5f291340922f4de02d42c9001eb684 | 5,312 | py | Python | interactive_bots/test/test_commons/test_form_crawler.py | dmitrijbozhkov/emergenecy-medicine-data | 7fea6b2c76a180c5e4c145a7fa6c83ae3e7af7bc | [
"Apache-2.0"
] | null | null | null | interactive_bots/test/test_commons/test_form_crawler.py | dmitrijbozhkov/emergenecy-medicine-data | 7fea6b2c76a180c5e4c145a7fa6c83ae3e7af7bc | [
"Apache-2.0"
] | null | null | null | interactive_bots/test/test_commons/test_form_crawler.py | dmitrijbozhkov/emergenecy-medicine-data | 7fea6b2c76a180c5e4c145a7fa6c83ae3e7af7bc | [
"Apache-2.0"
] | null | null | null | """ Tests for form crawlers """
from unittest import TestCase, main
from unittest.mock import Mock
from functools import partial
from interactive_bots.commons.form_crawler import FormActionOptions, FormCrawler
class FormActionOptionsTestCase(TestCase):
""" Test case for FormActionOptions class """
def setUp(self):
self.driver_mock = Mock()
self.form_action = FormActionOptions(self.driver_mock)
self.navigate_mock = Mock()
self.action_mock = Mock()
self.data_mock = Mock()
def test_set_actions_should_set_navigate(self):
""" set_actions should take function for navigate and make partial with driver """
self.navigate_mock.side_effect = lambda x: self.assertTrue(self.driver_mock is x)
self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock)
self.form_action.navigate()
def test_set_actions_should_set_data(self):
""" set_actions should take function for data and make partial with driver """
self.data_mock.side_effect = lambda x: self.assertTrue(self.driver_mock is x)
self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock)
self.form_action.data()
def test_set_actions_should_set_action(self):
""" set_actions should take function for action and make partial with driver """
self.action_mock = lambda x: self.assertTrue(self.driver_mock is x)
self.form_action.set_actions(self.navigate_mock, self.action_mock, self.action_mock)
self.form_action.action()
def test_reset_accumulator_should_set_acc_to_0(self):
""" reset_accumulator should set acc to 0 """
self.form_action.acc = 12
self.form_action.reset_accumulator()
self.assertEqual(self.form_action.acc, 0)
def test_iteration_should_stop_iteration_if_acc_is_False(self):
""" Iteration through actions should stop if accumulator passed from action is false """
self.navigate_mock.return_value = []
self.action_mock.return_value = False
self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock)
self.assertRaises(StopIteration, partial(next, self.form_action))
def test_iteration_should_pass_acc_to_data(self):
""" acc should be passed to data if True """
acc = ["stuff"]
self.navigate_mock.return_value = [1]
self.action_mock.return_value = acc
self.data_mock.side_effect = lambda d, a: self.assertTrue(a is acc)
self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock)
next(self.form_action)
def test_iteration_should_return_from_data(self):
""" Iteration through FormActionOptions should return wahtever data returned """
val = 1
self.navigate_mock.return_value = [1]
self.data_mock.return_value = val
self.form_action.set_actions(self.navigate_mock, self.action_mock, self.data_mock)
self.assertEqual(next(self.form_action), val)
class FormCrawlerTestCase(TestCase):
""" Test case for FormCrawler """
def setUp(self):
self.form_crawler = FormCrawler()
def test_add_action_should_add_action_to_list(self):
""" add_action method should append action to actions list """
act = Mock()
self.form_crawler.add_action(act)
self.assertTrue(act is self.form_crawler.actions[0])
def test_remove_action_should_remove_action(self):
""" remove_action should remove action from actions list by given index """
act = Mock()
self.form_crawler.add_action(act)
self.form_crawler.remove_action(0)
self.assertEqual(len(self.form_crawler.actions), 0)
def test_crawl_should_set_header(self):
""" crawl should call writeheader before writing anything else """
writer = Mock()
option = FormActionOptions(Mock())
option.set_actions(Mock(return_value=[]), Mock(return_value=False), Mock())
self.form_crawler.add_action(option)
self.form_crawler.crawl(writer)
writer.writeheader.assert_called_once()
def test_crawl_should_write_row_of_all_values(self):
""" crawl should write row from dictionary with all the fields passed by actions data function """
write_dict = {"foo": 1, "bar": 2}
writer = Mock()
writer.writerow = lambda d: self.assertEqual(d, write_dict)
def counter(d, l, a):
if not a:
return True
else:
return False
option1 = FormActionOptions(Mock())
option2 = FormActionOptions(Mock())
option1.set_actions(Mock(return_value=[1]), Mock(side_effect=counter), Mock(return_value={"foo": write_dict["foo"]}))
option2.set_actions(Mock(return_value=[1]), Mock(side_effect=counter), Mock(return_value=[{"bar": write_dict["bar"]}]))
self.form_crawler.add_action(option1)
self.form_crawler.add_action(option2)
self.form_crawler.crawl(writer)
def test_crawl_should_throw_exception_if_actions_list_is_empty(self):
""" crawl should throw IndexError if actions is empty """
self.assertRaises(IndexError, partial(self.form_crawler.crawl, Mock()))
if __name__ == "__main__":
main()
| 46.191304 | 127 | 0.69823 | 703 | 5,312 | 5.002845 | 0.167852 | 0.063691 | 0.063691 | 0.040944 | 0.466022 | 0.388684 | 0.325562 | 0.260449 | 0.217799 | 0.19619 | 0 | 0.004974 | 0.205196 | 5,312 | 114 | 128 | 46.596491 | 0.828044 | 0.164157 | 0 | 0.202381 | 0 | 0 | 0.00713 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0.178571 | false | 0.011905 | 0.047619 | 0 | 0.27381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cc96bccafaee3453353f6c6ebd4f4f2e2c88027 | 1,828 | py | Python | tests/test_reader.py | hsuchristine/code_challenge | de82243a685e465a01445cd700f156cbf9b89572 | [
"MIT"
] | 1 | 2020-06-29T01:53:30.000Z | 2020-06-29T01:53:30.000Z | tests/test_reader.py | hsuchristine/code_challenge | de82243a685e465a01445cd700f156cbf9b89572 | [
"MIT"
] | 2 | 2022-01-13T01:54:57.000Z | 2022-03-12T00:07:09.000Z | tests/test_reader.py | hsuchristine/code_challenge | de82243a685e465a01445cd700f156cbf9b89572 | [
"MIT"
] | null | null | null | """Unit test for DataReader (public methods only)"""
import unittest
import numpy as np
import os
from dicom_data_preprocess import parsing
from dicom_data_preprocess.reader import DataReader
__author__ = 'Christine Hsu'
class TestReader(unittest.TestCase):
@classmethod
def setUpClass(TestReader):
TestReader.download_data_path = 'tests/data/sample-batchset/'
TestReader.data_basepath = 'tests/data/output_data/'
TestReader.logs_path = 'tests/logs/',
TestReader.plots_path = 'tests/plots/'
TestReader.contour_type = 'i-contours'
TestReader.save_plot = False
TestReader.dicoms_basepath = os.path.join(TestReader.download_data_path, 'dicoms')
TestReader.contours_basepath = os.path.join(TestReader.download_data_path, 'contourfiles')
TestReader.link_filepath = os.path.join(TestReader.download_data_path, 'link.csv')
link_tuples = DataReader._read_link(TestReader, TestReader.link_filepath)
TestReader.sample_tuples = DataReader._assemble_link(TestReader, link_tuples)
def test_load_samples(self):
print('\nTesting the loading of eight assembled samples...')
reader = DataReader(download_data_path=TestReader.download_data_path,
data_basepath=TestReader.data_basepath,
logs_path=TestReader.logs_path,
plots_path=TestReader.plots_path,
contour_type=TestReader.contour_type,
save_plot=TestReader.save_plot)
images, masks, metadata = reader.load_samples(TestReader.sample_tuples)
self.assertTrue(isinstance(images, list))
self.assertTrue(isinstance(masks, list))
self.assertTrue(isinstance(metadata, list))
self.assertTrue(isinstance(images[0], np.ndarray))
self.assertEqual(masks[0].dtype, np.bool)
self.assertTrue(isinstance(metadata[0], str))
reader.plot_samples(images, masks, metadata, 'test_load_samples.jpg')
if __name__ == "__main__":
unittest.main()
| 35.153846 | 92 | 0.791028 | 231 | 1,828 | 5.991342 | 0.341991 | 0.052023 | 0.069364 | 0.093931 | 0.089595 | 0.089595 | 0.089595 | 0.063584 | 0 | 0 | 0 | 0.001827 | 0.101751 | 1,828 | 51 | 93 | 35.843137 | 0.841048 | 0.025164 | 0 | 0 | 0 | 0 | 0.113739 | 0.039977 | 0 | 0 | 0 | 0 | 0.157895 | 1 | 0.052632 | false | 0 | 0.131579 | 0 | 0.210526 | 0.026316 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cc99818ef3d420ba12270f6fd7f4e8403fb924e | 4,064 | py | Python | lib/dashboard/logger.py | hexueyuan/Adanos | b35873fc88b61dabda49c85f0e2b2d126731d34f | [
"MIT"
] | null | null | null | lib/dashboard/logger.py | hexueyuan/Adanos | b35873fc88b61dabda49c85f0e2b2d126731d34f | [
"MIT"
] | 8 | 2020-07-17T01:49:53.000Z | 2022-02-17T22:58:31.000Z | lib/dashboard/logger.py | hexueyuan/Adanos | b35873fc88b61dabda49c85f0e2b2d126731d34f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import logging.config
import logging.handlers
class Logger:
_default_conf = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "[%(asctime)s][%(name)s][%(levelname)s][%(filename)s:%(lineno)d]: %(message)s",
"datefmt": "%d-%M-%Y %H:%M:%S"
}
},
"handlers": {
"defaultHandler": {
"class":"logging.StreamHandler",
"level":"DEBUG",
"formatter":"default",
"stream":"ext://sys.stdout"
}
},
"root": {
"level": "DEBUG",
"handlers": ['defaultHandler']
}
}
_current_conf = None
_logger = None
_register_loggers = ['root']
def __init__(self, conf=None):
if conf is not None and not getattr(conf, 'get'):
raise TypeError("conf has no get method")
self._current_conf = self._default_conf
if conf is not None:
self._current_conf['formatters'].update(conf.get('formatters', {}))
self._current_conf['handlers'].update(conf.get('handlers', {}))
self._current_conf['loggers'] = conf.get('loggers', {})
#set default propagate = 0
for logger in self._current_conf['loggers'].values():
logger['propagate'] = 0
try:
logging.config.dictConfig(self._current_conf)
except ValueError:
self._current_conf = self._default_conf
logging.config.dictConfig(self._current_conf)
logging.getLogger("defaultLogger").exception("logger config error.")
finally:
self._logger = logging.getLogger("defaultLogger")
for key in self._current_conf.get('loggers', {}).keys():
self._register_loggers.append(key)
def getLogger(self, name):
if name == "root":
return self._logger
if name in self._register_loggers:
return logging.getLogger(name)
else:
raise NameError("No this logger: {}".format(name))
if __name__ == "__main__":
conf = {
"formatters": {
"default": {
"format": "[%(asctime)s][%(name)s][%(levelname)s][%(filename)s:%(lineno)d]: %(message)s",
"datefmt": "%d-%M-%Y %H:%M:%S"
}
},
"handlers": {
"consoleHandler": {
"class":"logging.StreamHandler",
"level":"NOTSET",
"formatter":"default",
"stream":"ext://sys.stdout"
},
"fileHandler": {
"class": "logging.FileHandler",
"level": "NOTSET",
"formatter": "default",
"filename": "testHandler2.log"
}
},
"loggers": {
"testLogger1": {
"handlers": ["consoleHandler"],
"level": "INFO"
},
"testLogger2": {
"handlers": ["fileHandler"],
"level": "DEBUG"
}
}
}
loggerHome = Logger(conf)
#root = loggerHome.getLogger('root')
#root.debug('this is a debug message')
#root.info('this is a info message')
#root.warn('this is a warning message')
#root.error('this is a error message')
#root.fatal('this is a fatal message')
testLogger1 = loggerHome.getLogger('testLogger1')
testLogger1.debug('this is a debug message')
testLogger1.info('this is a info message')
testLogger1.warn('this is a warning message')
testLogger1.error('this is a error message')
testLogger1.fatal('this is a fatal message')
testLogger2 = loggerHome.getLogger('testLogger2')
testLogger2.debug('this is a debug message')
testLogger2.info('this is a info message')
testLogger2.warn('this is a warning message')
testLogger2.error('this is a error message')
testLogger2.fatal('this is a fatal message')
| 33.04065 | 105 | 0.532726 | 397 | 4,064 | 5.329975 | 0.239295 | 0.042533 | 0.049622 | 0.017013 | 0.365312 | 0.351134 | 0.086011 | 0.086011 | 0.086011 | 0.086011 | 0 | 0.007631 | 0.322835 | 4,064 | 122 | 106 | 33.311475 | 0.761265 | 0.065207 | 0 | 0.245098 | 0 | 0.019608 | 0.293668 | 0.051187 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019608 | false | 0 | 0.029412 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ccbceb319266f245b6042a34c87089c38999e11 | 846 | py | Python | FileFolder/tempFile.py | jwannebo3524/Attendence | e8ff3f7457337c0516b1e53f2918b9a87f3f1de4 | [
"Unlicense",
"MIT"
] | null | null | null | FileFolder/tempFile.py | jwannebo3524/Attendence | e8ff3f7457337c0516b1e53f2918b9a87f3f1de4 | [
"Unlicense",
"MIT"
] | 2 | 2021-09-17T16:56:28.000Z | 2021-11-02T00:57:32.000Z | FileFolder/tempFile.py | jwannebo3524/Attendence | e8ff3f7457337c0516b1e53f2918b9a87f3f1de4 | [
"Unlicense",
"MIT"
] | null | null | null | from tempfile import NamedTemporaryFile
import shutil
import csv
import datetime
import time
#filename = 'tmpEmployeeDatabase.csv'
tempfile = NamedTemporaryFile('w+t', newline='', delete=False)
class tempFile:
def __init__ ():
filename = "" + str(datetime.date().month) + str(datetime.date().day) + str((datetime.date().year) - 2000) + "-WildStang_Attendance.csv"
def createTemp ():
tempfile = NamedTemporaryFile('w+t', newline='', delete=False)
def findID ():
x=2
with open(filename, 'r', newline='') as csvFile, tempfile:
reader = csv.reader(csvFile, delimiter=',', quotechar='"')
writer = csv.writer(tempfile, delimiter=',', quotechar='"')
for row in reader:
row[1] = row[1].title()
writer.writerow(row)
shutil.move(tempfile.name, filename)
| 26.4375 | 144 | 0.637116 | 93 | 846 | 5.741935 | 0.505376 | 0.061798 | 0.08427 | 0.104869 | 0.172285 | 0.172285 | 0.172285 | 0 | 0 | 0 | 0 | 0.010495 | 0.211584 | 846 | 31 | 145 | 27.290323 | 0.790105 | 0.042553 | 0 | 0.1 | 0 | 0 | 0.044499 | 0.030902 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.25 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ccfae5fa95146d589ab65e046f6f95b2dcb1775 | 3,205 | py | Python | imagingresponse/explore_layouts.py | marivasq/gamma-ai | 735953e80901afea3e5cdeb2a7b27c9ab5725434 | [
"MIT"
] | 6 | 2020-01-29T07:24:14.000Z | 2022-03-16T10:05:25.000Z | imagingresponse/explore_layouts.py | marivasq/gamma-ai | 735953e80901afea3e5cdeb2a7b27c9ab5725434 | [
"MIT"
] | 6 | 2020-07-03T00:31:10.000Z | 2021-09-10T07:45:01.000Z | imagingresponse/explore_layouts.py | marivasq/gamma-ai | 735953e80901afea3e5cdeb2a7b27c9ab5725434 | [
"MIT"
] | 5 | 2019-02-27T22:56:49.000Z | 2019-08-24T19:01:41.000Z | ###################################################################################################
#
#
# Copyright (C) by Shivani Kishnani & Andreas Zoglauer.
# All rights reserved.
#
# Please see the file License.txt in the main repository for the copyright-notice.
#
###################################################################################################
###################################################################################################
import os
import sys
import argparse
import itertools
from ToyModel3DCone import ToyModel3DCone
import signal
###################################################################################################
"""
This program loops over different layout and determines their performance
For all the command line options, try:
python3 explorelayouts.py --help
"""
parser = argparse.ArgumentParser(description='Passing in values to run ToyModel3DCone to test different layouts')
parser.add_argument('-f', '--file', default='changethis.txt', help='File name used for training/testing')
parser.add_argument('-o', '--output', default='output.txt', help='The output file name where the final results will be stored')
parser.add_argument('-l', '--hiddenlayers', default='3', help='Number of hidden layers. Default: 3')
parser.add_argument('-n', '--startingnode', default='10', help='Number of nodes to start with. Default: 50')
parser.add_argument('-m', '--multfactor', default='10', help='Number that is to be multiplied to starting nodes to get layers of new file')
parser.add_argument('-a', '--activation', default='relu', help='Name of default activation layer to be applied')
parser.add_argument('-mn', '--maxNode', default='50', help='Maximum number of nodes in a layer')
parser.add_argument('-t', '--time', default='600', help='Time in seconds to run the model for')
args = parser.parse_args()
hiddenLayers = int(args.hiddenlayers)
multFactor = int(args.multfactor)
startingNode = int(args.startingnode)
maxNode = int(args.maxNode)
LayoutList = []
output = args.output
filew = open(output,"w+")
#Step 0: Take care of Ctrl+C
Interrupted = False
NInterrupts = 0
def signal_handler(signal, frame):
print("You pressed Ctrl+C! inside explore_layouts!")
global Interrupted
Interrupted = True
global NInterrupts
NInterrupts += 1
if NInterrupts >= 3:
print("Aborting!")
filew.close()
System.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# Step 1: Create function to get layout
def create_layout(node, numLayers):
layer_list = [node]
while numLayers > 0 and node!= 0:
add = node*multFactor
layer_list.append(node*multFactor)
node = add
numLayers -= 1
return layer_list
# Step 2: Create list of layouts for NN
for Layout in list(create_layout(x, hiddenLayers) for x in range(startingNode, maxNode+1, 10)):
LayoutList.append(Layout)
print(Layout)
# Step 3: Loop over all layouts and record performance
for Layout in LayoutList:
ToyModel3DCone(filew, Layout, args.activation)
filew.close()
print("Finished!")
# END
###################################################################################################
| 33.041237 | 139 | 0.60936 | 372 | 3,205 | 5.204301 | 0.419355 | 0.03719 | 0.070248 | 0.019628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011565 | 0.136661 | 3,205 | 96 | 140 | 33.385417 | 0.68811 | 0.099532 | 0 | 0.039216 | 0 | 0 | 0.281601 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0.019608 | 0.117647 | 0 | 0.176471 | 0.078431 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cd096bb54ddc46b06b8bf177a453f80287a8129 | 24,252 | py | Python | inspect4py/utils.py | SoftwareUnderstanding/inspect4py | 9c4d7252535082ad938b26baf281d93f3a27285e | [
"BSD-3-Clause"
] | 2 | 2022-02-15T20:30:57.000Z | 2022-03-17T00:50:37.000Z | inspect4py/utils.py | SoftwareUnderstanding/inspect4py | 9c4d7252535082ad938b26baf281d93f3a27285e | [
"BSD-3-Clause"
] | 14 | 2022-01-25T14:03:50.000Z | 2022-03-28T13:21:08.000Z | inspect4py/utils.py | SoftwareUnderstanding/inspect4py | 9c4d7252535082ad938b26baf281d93f3a27285e | [
"BSD-3-Clause"
] | null | null | null | import ast
import os
import subprocess
from pathlib import Path
from json2html import *
from inspect4py.parse_setup_files import inspect_setup
from inspect4py.structure_tree import DisplayablePath, get_directory_structure
def print_summary(json_dict):
"""
This method prints a small summary of the classes and properties recognized during the analysis.
At the moment this method is only invoked when a directory with multiple files is passed.
"""
folders = 0
files = 0
dependencies = 0
functions = 0
classes = 0
for key, value in json_dict.items():
if "/" in key:
folders += 1
if isinstance(value, list):
for element in value:
files += 1
if "dependencies" in element:
dependencies += len(element["dependencies"])
if "functions" in element:
functions += len(element["functions"])
if "classes" in element:
classes += len(element["classes"])
print("Analysis completed")
print("Total number of folders processed (root folder is considered a folder):", folders)
print("Total number of files found: ", files)
print("Total number of classes found: ", classes)
print("Total number of dependencies found in those files", dependencies)
print("Total number of functions parsed: ", functions)
def extract_directory_tree(input_path, ignore_dirs, ignore_files, visual=0):
"""
Method to obtain the directory tree of a repository.
The ignored directories and files that were inputted are also ignored.
:input_path path of the repo to
"""
ignore_set = ['.git', '__pycache__', '.idea', '.pytest_cache']
ignore_set = tuple(list(ignore_dirs) + list(ignore_files) + ignore_set)
if visual:
paths = DisplayablePath.make_tree(Path(input_path), criteria=lambda
path: True if path.name not in ignore_set and not os.path.join("../", path.name).endswith(".pyc") else False)
for path in paths:
print(path.displayable())
return get_directory_structure(input_path, ignore_set)
def prune_json(json_dict):
"""
Method that given a JSON object, removes all its empty fields.
This method simplifies the resultant JSON.
:param json_dict input JSON file to prune
:return JSON file removing empty values
"""
final_dict = {}
if not (isinstance(json_dict, dict)):
# Ensure the element provided is a dict
return json_dict
else:
for a, b in json_dict.items():
if b or isinstance(b, bool):
if isinstance(b, dict):
aux_dict = prune_json(b)
if aux_dict: # Remove empty dicts
final_dict[a] = aux_dict
elif isinstance(b, list):
aux_list = list(filter(None, [prune_json(i) for i in b]))
if len(aux_list) > 0: # Remove empty lists
final_dict[a] = aux_list
else:
final_dict[a] = b
return final_dict
def extract_requirements(input_path):
print("Finding the requirements with the pigar package for %s" % input_path)
try:
file_name = 'requirements_' + os.path.basename(input_path) + '.txt'
# Attention: we can modify the output of pigar, if we use echo N.
# Answering yes (echo y), we allow searching for PyPI
# for the missing modules and filter some unnecessary modules.
cmd = 'echo y | pigar -P ' + input_path + ' --without-referenced-comments -p ' + file_name
# cmd = 'echo n | pigar -P ' + input_path + ' --without-referenced-comments -p ' + file_name
# print("cmd: %s" %cmd)
proc = subprocess.Popen(cmd.encode('utf-8'), shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
req_dict = {}
with open(file_name, "r") as file:
lines = file.readlines()[1:]
file.close()
for line in lines:
try:
if line != "\n":
splitLine = line.split(" == ")
req_dict[splitLine[0]] = splitLine[1].split("\n")[0]
except:
pass
# Note: Pigar requirement file is being deleted
# in the future we might want to keep it (just commenting the line bellow)
os.system('rm ' + file_name)
return req_dict
except:
print("Error finding the requirements in" % input_path)
def extract_software_invocation(dir_info, dir_tree_info, input_path, call_list, readme):
"""
Method to detect the directory type of a software project. This method also detects tests
We distinguish four main types: script, package, library and service. Some can be more than one.
:dir_info json containing all the extracted information about the software repository
:dir_tree_info json containing the directory information of the target repo
:input_path path of the repository to analyze
:call_list json file containing the list of calls per file and functions or methods.
:readme content of the readme file of the project (if any)
"""
software_invocation_info = []
setup_files = ("setup.py", "setup.cfg")
server_dependencies = ("flask", "flask_restful", "falcon", "falcon_app", "aiohttp", "bottle", "django", "fastapi",
"locust", "pyramid", "hug", "eve", "connexion")
# Note: other server dependencies are missing here. More testing is needed.
flag_package_library = 0
for directory in dir_tree_info:
for elem in setup_files: # first check setup.py, then cfg
if elem in dir_tree_info[directory]:
# 1. Exploration for package or library
software_invocation_info.append(inspect_setup(input_path, elem))
flag_package_library = 1
break
# We continue exploration to make sure we continue exploring mains even after detecting this is a
# library
# Looping across all mains
# to decide if it is a service (main + server dep) or just a script (main without server dep)
main_files = []
# new list to store the "mains that have been previously classified as "test".
test_files_main = []
test_files_no_main = []
# new list to store files without mains
body_only_files = []
flag_service_main = 0
for key in dir_info: # filter (lambda key: key not in "directory_tree", dir_info):
if key!="requirements":
for elem in dir_info[key]:
if elem["main_info"]["main_flag"]:
flag_service_main = 0
flag_service = 0
main_stored = 0
if elem["is_test"]:
test_files_main.append(elem["file"]["path"])
main_stored = 1
else:
try:
# 2. Exploration for services in files with "mains"
flag_service, software_invocation_info = service_check(elem, software_invocation_info,
server_dependencies, "main", readme)
except:
main_files.append(elem["file"]["path"])
if flag_service:
flag_service_main = 1
if not flag_service and not main_stored:
main_files.append(elem["file"]["path"])
elif elem["is_test"]:
test_files_no_main.append(elem["file"]["path"])
# Filtering scripts with just body in software invocation
elif elem['body']['calls']:
body_only_files.append(elem)
m_secondary = [0] * len(main_files)
flag_script_main = 0
# this list (of lists) stores the mains that each main import
import_mains = []
# this list (of lists) stores the mains that each main is imported by
imported_by = [None]*len(main_files)
# 3. Exploration for main scripts
for m in range(0, len(main_files)):
m_calls = find_file_calls(main_files[m], call_list)
# HERE I STORE WHICH OTHER MAIN FILES CALLS EACH "M" MAIN_FILE
m_imports = extract_relations(main_files[m], m_calls, main_files, call_list)
# storing those m_imports in the import_mains[m]
import_mains.append(m_imports)
for m_i in m_imports:
m_secondary[main_files.index(m_i)] = 1
if not imported_by[main_files.index(m_i)]:
imported_by[main_files.index(m_i)] = []
imported_by[main_files.index(m_i)].append(main_files[m])
for m in range(0, len(main_files)):
soft_info = {"type": "script", "run": "python " + main_files[m], "has_structure": "main",
"mentioned_in_readme": os.path.basename(os.path.normpath(main_files[m])) in readme,
"imports": import_mains[m], "imported_by": imported_by[m]}
software_invocation_info.append(soft_info)
flag_script_main = 1
# tests with main.
for t in range(0, len(test_files_main)):
# Test files do not have help, they are usually run by themselves
soft_info = {"type": "test", "run": "python " + test_files_main[t], "has_structure": "main",
"mentioned_in_readme": os.path.basename(os.path.normpath(test_files_main[t])) in readme}
software_invocation_info.append(soft_info)
# tests with no main.
for t in range(0, len(test_files_no_main)):
# Test files do not have help, they are usually run by themselves
soft_info = {"type": "test", "run": "python " + test_files_no_main[t], "has_structure": "body",
"mentioned_in_readme": os.path.basename(os.path.normpath(test_files_no_main[t])) in readme}
software_invocation_info.append(soft_info)
flag_service_body = 0
flag_script_body = 0
for elem in body_only_files:
# 4. Exploration for services in files with body
flag_service, software_invocation_info = service_check(elem, software_invocation_info,
server_dependencies, "body", readme)
if flag_service:
flag_service_body = 1
# Only adding this information if we haven't not found libraries, packages, services or scripts with mains.
# 5. Exploration for script without main in files with body
if not flag_service_main and not flag_service_body and not flag_package_library and not flag_script_main:
soft_info = {"type": "script", "run": "python " + elem["file"]["path"], "has_structure": "body",
"mentioned_in_readme": elem["file"]["fileNameBase"] + "." + elem["file"][
"extension"] in readme}
software_invocation_info.append(soft_info)
flag_script_body = 1
# Only adding this information if we haven't not found libraries, packages, services or scripts with mains
# or bodies.
# 6. Exploration for script without main or body in files with body
if not flag_script_body and not flag_service_main and not flag_service_body and not flag_package_library \
and not flag_script_main:
python_files = []
for directory in dir_tree_info:
for elem in dir_tree_info[directory]:
if ".py" in elem:
python_files.append(os.path.abspath(input_path + "/" + directory + "/" + elem))
for f in range(0, len(python_files)):
soft_info = {"type": "script without main", "import": python_files[f], "has_structure": "without_body",
"mentioned_in_readme": os.path.basename(os.path.normpath(python_files[f])) in readme}
software_invocation_info.append(soft_info)
return software_invocation_info
def generate_output_html(pruned_json, output_file_html):
"""
Method to generate a simple HTML view of the obtained JSON.
:pruned_json JSON to print out
:output_file_html path where to write the HTML
"""
html = json2html.convert(json=pruned_json)
with open(output_file_html, "w") as ht:
ht.write(html)
def top_level_functions(body):
return (f for f in body if isinstance(f, ast.FunctionDef))
def top_level_classes(body):
return (c for c in body if isinstance(c, ast.ClassDef))
def parse_module(filename):
with open(filename, "rt") as file:
return ast.parse(file.read(), filename=filename)
def list_functions_classes_from_module(m, path):
functions_classes = []
try:
# to open a module inside a directory
m = m.replace(".", "/")
repo_path = Path(path).parent.absolute()
abs_repo_path = os.path.abspath(repo_path)
file_module = abs_repo_path + "/" + m + ".py"
tree = parse_module(file_module)
for func in top_level_functions(tree.body):
functions_classes.append(func.name)
for cl in top_level_classes(tree.body):
functions_classes.append(cl.name)
type = "internal"
except:
#module = __import__(m)
#functions = dir(module)
type = "external"
return functions_classes, type
def type_module(m, i, path):
repo_path = Path(path).parent.absolute()
abs_repo_path = os.path.abspath(repo_path)
if m:
m = m.replace(".", "/")
file_module = abs_repo_path + "/" + m + "/" + i + ".py"
else:
file_module = abs_repo_path + "/" + i + ".py"
file_module_path = Path(file_module)
if file_module_path.is_file():
return "internal"
else:
return "external"
def extract_call_functions(funcs_info, body=0):
call_list = {}
if body:
if funcs_info["body"]["calls"]:
call_list["local"] = funcs_info["body"]["calls"]
else:
for funct in funcs_info:
if funcs_info[funct]["calls"]:
call_list[funct] = {}
call_list[funct]["local"] = funcs_info[funct]["calls"]
if funcs_info[funct]["functions"]:
call_list[funct]["nested"] = extract_call_functions(funcs_info[funct]["functions"])
return call_list
def extract_call_methods(classes_info):
call_list = {}
for method in classes_info:
if classes_info[method]["calls"]:
call_list[method] = {}
call_list[method]["local"] = classes_info[method]["calls"]
if classes_info[method]["functions"]:
call_list[method]["nested"] = extract_call_methods(classes_info[method]["functions"])
return call_list
def call_list_file(code_info):
call_list = {}
call_list["functions"] = extract_call_functions(code_info.funcsInfo)
call_list["body"] = extract_call_functions(code_info.bodyInfo, body=1)
for class_n in code_info.classesInfo:
call_list[class_n] = extract_call_methods(code_info.classesInfo[class_n]["methods"])
return call_list
def call_list_dir(dir_info):
call_list = {}
for dir in dir_info:
call_list[dir] = {}
for file_info in dir_info[dir]:
file_path = file_info["file"]["path"]
call_list[dir][file_path] = extract_call_functions(file_info["functions"])
for class_n in file_info["classes"]:
call_list[dir][file_path][class_n] = extract_call_methods(file_info["classes"][class_n]["methods"])
return call_list
def find_file_calls(file_name, call_list):
for dir in call_list:
for elem in call_list[dir]:
if elem in file_name:
return call_list[dir][elem]
def find_module_calls(module, call_list):
for dir in call_list:
for elem in call_list[dir]:
if "/"+module+"." in elem:
#print("---MODULE %s, elem %s, giving call_list[%s][%s]" %(module, elem, dir, elem))
return call_list[dir][elem]
# DFS algorithm - Allowing up to 2 levels of depth.
def file_in_call(base, call, file, m_imports, call_list, orig_base, level):
### NOTE: LEVEL is a parameter very important here!
### It allows us to track how deep we are inside the recursivity search.
### If we want to modify the depth of the recursity, we just need to change the level_depth.
level_depth = 2
## For each call, we extract all its sub_calls (level 1),
## and for each sub_call we extract all its sub_sub_calls (level 2)
####
if base in call and m_imports.count(file) == 0 and orig_base not in call:
m_imports.append(file)
return 1
elif orig_base in call:
return 0
elif level < level_depth and call!="":
m_calls_extern = {}
module_base = call.split(".")[0]
module_base = module_base + "."
m_calls_extern = find_module_calls(module_base, call_list)
# Note: Here is when we increase the level of recursivity
level += 1
if m_calls_extern:
for m_c in m_calls_extern:
flag_found = extract_data(base, m_calls_extern[m_c], file, m_imports, 0, call_list, orig_base, level)
if flag_found:
return 1
return 0
else:
return 0
def extract_local_function(base, m_calls_local, file, m_imports, flag_found, call_list, orig_base, level):
for call in m_calls_local:
flag_found = file_in_call(base, call, file, m_imports, call_list, orig_base, level)
if flag_found:
return flag_found
return flag_found
def extract_nested_function(base, m_calls_nested, file, m_imports, flag_found, call_list, orig_base, level):
for call in m_calls_nested:
flag_found = extract_data(base, m_calls_nested, file, m_imports, flag_found, call_list, orig_base, level)
if flag_found:
return flag_found
return flag_found
def extract_data(base, m_calls, file, m_imports, flag_found, call_list, orig_base, level):
for elem in m_calls:
if elem == "local":
flag_found = extract_local_function(base, m_calls[elem], file, m_imports, flag_found, call_list, orig_base,
level)
elif elem == "nested":
flag_found = extract_nested_function(base, m_calls[elem], file, m_imports, flag_found, call_list, orig_base,
level)
else:
flag_found = extract_data(base, m_calls[elem], file, m_imports, flag_found, call_list, orig_base, level)
if flag_found:
return flag_found
return flag_found
# We will apply the DFS strategy later to find the external relationships.
def extract_relations(file_name, m_calls, main_files, call_list):
m_imports = []
orig_base = os.path.basename(file_name)
orig_base = os.path.splitext(orig_base)[0]
orig_base = orig_base + "."
for file in main_files:
if file not in file_name:
flag_found = 0
base = os.path.basename(file)
base = os.path.splitext(base)[0]
base = base + "."
for m_c in m_calls:
level = 0
flag_found = extract_data(base, m_calls[m_c], file, m_imports, flag_found, call_list, orig_base, level)
if flag_found:
return m_imports
return m_imports
def service_check(elem, software_invocation_info, server_dependencies, has_structure, readme):
flag_service = 0
for dep in elem["dependencies"]:
imports = dep["import"]
flag_service, software_invocation_info = service_in_set(imports, server_dependencies, elem,
software_invocation_info, has_structure, readme)
if flag_service:
return flag_service, software_invocation_info
else:
modules = dep["from_module"]
flag_service, software_invocation_info = service_in_set(modules, server_dependencies, elem,
software_invocation_info, has_structure, readme)
if flag_service:
return flag_service, software_invocation_info
return flag_service, software_invocation_info
def service_in_set(data, server_dependencies, elem, software_invocation_info, has_structure, readme):
flag_service = 0
if isinstance(data, list):
for data_dep in data:
if data_dep.lower() in server_dependencies:
soft_info = {"type": "service", "run": "python " + elem["file"]["path"],
"has_structure": has_structure,
"mentioned_in_readme": elem["file"]["fileNameBase"] + "." + elem["file"][
"extension"] in readme}
flag_service = 1
if soft_info not in software_invocation_info:
software_invocation_info.append(soft_info)
else:
if data:
if data.lower() in server_dependencies:
soft_info = {"type": "service", "run": "python " + elem["file"]["path"],
"has_structure": has_structure,
"mentioned_in_readme": elem["file"]["fileNameBase"] + "." + elem["file"][
"extension"] in readme}
flag_service = 1
if soft_info not in software_invocation_info:
software_invocation_info.append(soft_info)
return flag_service, software_invocation_info
def rank_software_invocation(soft_invocation_info_list):
"""
Function to create a ranking over the different ways of executing a program.
If two elements have the same position in the ranking, it means that there is no priority among them.
Heuristic to order the invocation list is as follows, in decreasing order of prioritization:
- If package or library is detected, this will be always first.
- If something (script or service) is mentioned in the readme file, it is considered a priority.
- Services are prioritized over scripts
- Scripts with main are prioritized over script with body.
- Scripts with body are prioritized over scripts with no body.
TO DOs:
- If a script imports other scripts (or service), it gets prioritized (TO DO when examples are available)
- If several scripts are available, those at root level are prioritized (TO DO when examples are available)
:param soft_invocation_info_list JSON list with the different ways to execute a program.
"""
if len(soft_invocation_info_list) == 0:
return soft_invocation_info_list
# Calculate score for every entry in the list
for entry in soft_invocation_info_list:
score = 0
if "library" in entry["type"] or "package" in entry["type"]:
score += 100
try:
if entry["mentioned_in_readme"]:
score += 10
except:
pass
if "service" in entry["type"]:
score += 5
try:
if "main" in entry["has_structure"]:
score += 2
if "body" in entry["has_structure"]:
score += 1
except:
pass
entry["ranking"] = score
# Reorder vector and assign ranking
soft_invocation_info_list.sort(key=lambda x: x["ranking"], reverse=True)
# Replace score by number (but keep those with same score with the same ranking)
position = 1
previous_score = soft_invocation_info_list[0]["ranking"]
for entry in soft_invocation_info_list:
current_score = entry["ranking"]
if previous_score > current_score: # Ordered in descending order
position += 1
previous_score = current_score
entry["ranking"] = position
return soft_invocation_info_list
| 41.813793 | 121 | 0.616485 | 3,128 | 24,252 | 4.557545 | 0.144182 | 0.027497 | 0.040123 | 0.012346 | 0.388608 | 0.319304 | 0.282057 | 0.255261 | 0.229026 | 0.210227 | 0 | 0.004732 | 0.294161 | 24,252 | 579 | 122 | 41.88601 | 0.828076 | 0.204643 | 0 | 0.285354 | 0 | 0 | 0.084796 | 0.001524 | 0 | 0 | 0 | 0 | 0 | 1 | 0.063131 | false | 0.007576 | 0.088384 | 0.005051 | 0.242424 | 0.025253 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cd6e644acdbb15b82dfab8d7e7022c02998853f | 2,151 | py | Python | days/day10.py | Kurocon/AdventOfCode2020 | 40ae8e604eb0e3bc0967c220cf868a8194769a6b | [
"BSD-3-Clause"
] | null | null | null | days/day10.py | Kurocon/AdventOfCode2020 | 40ae8e604eb0e3bc0967c220cf868a8194769a6b | [
"BSD-3-Clause"
] | null | null | null | days/day10.py | Kurocon/AdventOfCode2020 | 40ae8e604eb0e3bc0967c220cf868a8194769a6b | [
"BSD-3-Clause"
] | null | null | null | from functools import lru_cache
from typing import List
from days import AOCDay, day
@day(10)
class Day10(AOCDay):
print_debug = "c12"
test_input = """16
10
15
5
1
11
7
19
6
12
4""".split("\n")
test_input2 = """28
33
18
42
31
14
46
20
48
47
24
23
49
45
19
38
39
11
1
32
25
35
8
17
7
9
4
2
34
10
3""".split("\n")
def common(self, input_data):
# input_data = self.test_input2
self.input_data = list(map(int, input_data))
def check_smallest_adapter_recurse(self, current_rating, target_rating, adapters_left) -> List[int]:
options = [current_rating + i for i in range(1, 4)]
for option in options:
if option in adapters_left:
difference = option - current_rating
current_rating = option
if current_rating + 3 == target_rating:
return [difference, 3]
new_adapters = adapters_left[:]
new_adapters.remove(option)
return self.check_smallest_adapter_recurse(current_rating, target_rating, new_adapters) + [difference]
def part1(self, input_data):
current_rating = 0
target_rating = max(self.input_data) + 3
adapters_left = self.input_data[:]
differences = self.check_smallest_adapter_recurse(current_rating, target_rating, adapters_left)
yield len([x for x in differences if x == 1]) * len([x for x in differences if x == 3])
@lru_cache
def check_adapter_recurse(self, current_rating, target_rating, adapters) -> int:
if current_rating == target_rating:
return 1
options = [i for i in adapters if 1 <= i - current_rating <= 3]
count = 0
for option in options:
count += self.check_adapter_recurse(option, target_rating, adapters)
return count
def part2(self, input_data):
current_rating = 0
target_rating = max(self.input_data) + 3
adapters_plus_builtin = tuple(self.input_data[:] + [target_rating])
differences = self.check_adapter_recurse(current_rating, target_rating, adapters_plus_builtin)
yield differences
| 23.637363 | 118 | 0.645281 | 298 | 2,151 | 4.432886 | 0.315436 | 0.127933 | 0.078728 | 0.11355 | 0.342922 | 0.342922 | 0.336866 | 0.295231 | 0.181681 | 0.096896 | 0 | 0.061108 | 0.269642 | 2,151 | 90 | 119 | 23.9 | 0.779758 | 0.013482 | 0 | 0.146341 | 0 | 0 | 0.056132 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060976 | false | 0 | 0.036585 | 0 | 0.195122 | 0.012195 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5cd8b1b9a2e75158a87d41bc9d2a842af4dc3ce7 | 642 | py | Python | problems/287_find_dup_number.py | apoorvkk/LeetCodeSolutions | 1c3461cfc05deb930d0866428eb00362b4338aab | [
"MIT"
] | 1 | 2018-02-03T14:17:18.000Z | 2018-02-03T14:17:18.000Z | problems/287_find_dup_number.py | apoorvkk/LeetCodeSolutions | 1c3461cfc05deb930d0866428eb00362b4338aab | [
"MIT"
] | null | null | null | problems/287_find_dup_number.py | apoorvkk/LeetCodeSolutions | 1c3461cfc05deb930d0866428eb00362b4338aab | [
"MIT"
] | null | null | null | '''
URL: https://leetcode.com/problems/find-the-duplicate-number/
Time complexity: O(nlogn)
Space complexity: O(1)
'''
class Solution(object):
def findDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 2:
return -1
lo, hi = 1, len(nums) - 1
while lo < hi:
mid = (lo + hi) // 2
count = 0
for num in nums:
if num <= mid:
count += 1
if count <= mid:
lo = mid + 1
else: # count > mid
hi = mid
return lo
| 20.0625 | 61 | 0.423676 | 73 | 642 | 3.726027 | 0.547945 | 0.044118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025788 | 0.456386 | 642 | 31 | 62 | 20.709677 | 0.753582 | 0.244548 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ce2e2885759b71f922db45b1b250447965a354c | 14,927 | py | Python | certifire/cli.py | CertiFire/certifire | 722da20bade41b8cc8553177e70e1f56015fe335 | [
"MIT"
] | null | null | null | certifire/cli.py | CertiFire/certifire | 722da20bade41b8cc8553177e70e1f56015fe335 | [
"MIT"
] | null | null | null | certifire/cli.py | CertiFire/certifire | 722da20bade41b8cc8553177e70e1f56015fe335 | [
"MIT"
] | 1 | 2021-02-06T03:29:56.000Z | 2021-02-06T03:29:56.000Z | import argparse
import logging
import os
import sys
from certifire import app, auth, config, database, db, get_version
from certifire.errors import CertifireError
from certifire.plugins.acme import crypto
from certifire.plugins.acme.models import Account, Certificate, Order
from certifire.plugins.acme.plugin import (create_order, register, reorder,
revoke_certificate)
from certifire.plugins.destinations.models import Destination
from certifire import app
logger = logging.getLogger(__name__)
# Text
DESCRIPTION = \
"""
Certifire {}.
Interact with ACME certification authorities such as Let's Encrypt.
No idea what you're doing? Register an account, authorize your domains and
issue a certificate or two. Call a command with -h for more instructions.
""".format(get_version())
DESCRIPTION_REGISTER = \
"""
Creates a new account key and registers on the server. The resulting --account
is saved in the database, and required for most other operations.
Takes email as required argument
You can pass arguments like organization, organizational_unit, country, state,
and location for csr generations from this account. if not provided, default
values from the config file will be used
You can also pass your own RSA private key if needed
(Provide key size 2048 and above, otherwise the server won't accept it.)
You only have to do this once.
"""
DESCRIPTION_ISSUE = \
"""
Issues a certificate for one or more domains. Firstly, domains passed will be
authorized by the type of authentication specified. If dns authentication
is used, also provide the dns provider. If type and dns provider not passed,
default values will be used from the config file
Takes account_id as required argument
You can pass arguments like organization, organizational_unit, country, state,
and location for csr generations from this account. if not provided, default
values from the account will be used
This will generate a new RSA key and CSR for you. But if you want, you can
bring your own with the --key-file and --csr-file attributes.
(Provide key size 2048 and above, otherwise the server won't accept it.)
The resulting key and certificate are written into the database.
A chained certificate with the intermediate included is also written to databse.
(If you're passing your own CSR, the given domains can be whatever you want.)
Note that unlike many other certification authorities, ACME does not add a
non-www or www alias to certificates. If you want this to happen, add it
yourself. You need to authorize both as well.
Certificate issuance has a server-side rate limit. Don't overdo it.
"""
DESCRIPTION_REVOKE = \
"""
Revokes a certificate. The certificate must have been issued using the
current account.
Takes account_id and certificate_id as required arguments
"""
# Command handlers
def _register(args):
key = None
if args.key_file:
with open(args.key_file, 'rb') as f:
key = crypto.load_private_key(f.read())
with app.app_context():
ret, act_id = register(
user_id=1,
email=args.email,
server=args.server,
rsa_key=key,
organization=args.organization,
organizational_unit=args.organizational_unit,
country=args.country,
state=args.state,
location=args.location)
if ret:
print("Account created with account id: {}".format(act_id))
print("Pass this account id for issue, revoke, etc...")
else:
print("Account with same email exists: account id: {}".format(act_id))
def _issue(args):
key = None
if args.key_file:
with open(args.key_file, 'rb') as f:
key = crypto.load_private_key(f.read())
csr = None
if args.csr_file:
with open(args.csr_file, 'rb') as f:
key = crypto.load_csr(f.read())
with app.app_context():
ret, order_id = create_order(
account_id=args.account,
destination_id=args.destination,
domains=args.domains,
type=args.type,
provider=args.provider,
email=args.email,
organization=args.organization,
organizational_unit=args.organizational_unit,
country=args.country,
state=args.state,
location=args.location,
reissue=args.reissue,
csr=csr,
key=key)
if ret:
print("Order created with order id: {}".format(order_id))
else:
print("Order creation failed.")
def _revoke(args):
with app.app_context():
certdb = Certificate.query.get(args.certificate)
if not certdb:
print("There is no such certificate {}".format(args.certificate))
return
order = Order.query.get(certdb.order_id)
if not order:
print("Order for this certificate not found")
return
revoke_certificate(order.account_id, certdb.id)
def _create_dest(args):
pkey = None
if args.pkey:
with open(args.pkey, 'rb') as f:
pkey = crypto.load_private_key(f.read())
with app.app_context():
dest = Destination(user_id=1,
host=args.host,
port=args.port,
user=args.user,
password=args.pwd,
ssh_priv_key=pkey,
ssh_priv_key_pass=args.pkeypass,
challengeDestinationPath=args.challengePath,
certDestinationPath=args.certPath,
exportFormat=args.exportFormat,
no_check=args.nocheck)
if dest.create():
print("Destination: {} created".format(dest.id))
print(dest.json)
else:
print("Error creating destination with given data. Check hostname, password, private key")
print(dest.json)
def _update_dest(args):
with app.app_context():
dest = Destination.query.get(args.id)
if not dest:
print("There is no such destination {}".format(args.id))
return
if dest.user_id != 1:
print("This destination does not belong to the admin")
return
pkey = None
if args.pkey:
with open(args.pkey, 'rb') as f:
pkey = crypto.load_private_key(f.read())
if dest.update(user_id=1,
host=args.host,
port=args.port,
user=args.user,
password=args.pwd,
ssh_priv_key=pkey,
ssh_priv_key_pass=args.pkeypass,
challengeDestinationPath=args.challengePath,
certDestinationPath=args.certPath,
exportFormat=args.exportFormat,
no_check=args.nocheck):
print("Destination: {} updated".format(dest.id))
print(dest.json)
else:
print("Error updating destination with given data. Check hostname, password, private key")
print(dest.json)
def _delete_dest(args):
with app.app_context():
dest = Destination.query.get(args.id)
if not dest:
print("There is no such destination {}".format(args.id))
return
if dest.user_id != 1:
print("This destination does not belong to the admin")
return
dest = dest.delete()
print("Destination {} deleted from database".format(dest.id))
class Formatter(argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
def certifire_main():
parser = argparse.ArgumentParser(
description=DESCRIPTION,
formatter_class=Formatter,
)
subparsers = parser.add_subparsers()
# Account creation
register = subparsers.add_parser(
'register',
help="Create a new account and register",
description=DESCRIPTION_REGISTER,
formatter_class=Formatter,
)
register.add_argument('email', type=str, help="Account email address")
register.add_argument('--server', '-i', help="ACME Server url")
register.add_argument('--key-file', '-k',
help="Existing key file to use for the account")
register.add_argument('--organization', '-o', help="Name of organization")
register.add_argument('--organizational_unit', '-u',
help="Name of organizational unit")
register.add_argument('--country', '-c', help="Name of country")
register.add_argument('--state', '-s', help="Name of state")
register.add_argument('--location', '-l', help="Name of location")
register.set_defaults(func=_register)
# Certificate issuance
issue = subparsers.add_parser(
'issue',
help="Authorize and Request a new certificate",
description=DESCRIPTION_ISSUE,
formatter_class=Formatter,
)
issue.add_argument('--account', '-a',
help="The acme account id to use", required=True)
issue.add_argument('--destination',
help="Destination to authorize/push certificates")
issue.add_argument('--domains',
help="One or more domain names to authorize", nargs='+')
issue.add_argument('--type',
'-t',
help="Authorization type",
choices=('dns', 'sftp'),
default='dns')
issue.add_argument('--provider',
'-p',
help="DNS Provider",
choices=config.VALID_DNS_PROVIDERS,
default=config.VALID_DNS_PROVIDERS[0])
issue.add_argument('--key-file', '-k',
help="Existing key file to use for the certificate")
issue.add_argument('--csr-file', help="Existing signing request to use")
issue.add_argument('--email', '-e', help="email address for CSR")
issue.add_argument('--organization', '-o', help="Name of organization")
issue.add_argument('--organizational_unit', '-u',
help="Name of organizational unit")
issue.add_argument('--country', '-c', help="Name of country")
issue.add_argument('--state', '-s', help="Name of state")
issue.add_argument('--location', '-l', help="Name of location")
issue.add_argument('--reissue',
dest='reissue',
help="Reissue certificate",
action='store_true')
issue.set_defaults(func=_issue, reissue=False)
# Certificate revocation
revoke = subparsers.add_parser(
'revoke',
help="Revoke an issued certificate",
description=DESCRIPTION_REVOKE,
formatter_class=Formatter,
)
revoke.add_argument("certificate", help="The certificate id to revoke")
revoke.add_argument('--account', '-a',
help="The acme account id to use", required=True)
revoke.set_defaults(func=_revoke)
destination = subparsers.add_parser(
'destination',
help="Manage Destinations",
# description=DESCRIPTION_REVOKE, #TODO: Destinations description
formatter_class=Formatter,
)
destination_subparsers = destination.add_subparsers()
create_dest = destination_subparsers.add_parser(
'create',
help='Create a Destination',
formatter_class=Formatter
)
create_dest.add_argument("host", help="Host FQDN. eg: api.certifire.xyz")
create_dest.add_argument('--port', '-p', help="SSH port", default=22)
create_dest.add_argument('--user', '-u', help="SSH user", default='root')
create_dest.add_argument('--pwd', '-s', help="SSH password")
create_dest.add_argument('--pkey', '-k', help="SSH private key file")
create_dest.add_argument('--pkeypass', '-c', help="SSH private key password")
create_dest.add_argument('--challengePath', help="HTTP-01 Challenge destination path", default='/var/www/html')
create_dest.add_argument('--certPath', help="Certificate push destination path", default='/etc/nginx/certs')
create_dest.add_argument('--exportFormat', help="Certificate export format", choices=('NGINX', 'Apache'),default='NGINX')
create_dest.add_argument('--nocheck', help="Pass this flag to skip SSH initial checks", dest='nocheck', action='store_true')
create_dest.set_defaults(func=_create_dest, nocheck=False)
update_dest = destination_subparsers.add_parser(
'update',
help='Update a Destination',
formatter_class=Formatter
)
update_dest.add_argument("id", help="Destination id")
update_dest.add_argument("--host", '-f', help="Host FQDN. eg: api.certifire.xyz")
update_dest.add_argument('--port', '-p', help="SSH port")
update_dest.add_argument('--user', '-u', help="SSH user")
update_dest.add_argument('--pwd', '-s', help="SSH password")
update_dest.add_argument('--pkey', '-k', help="SSH private key file")
update_dest.add_argument('--pkeypass', '-c', help="SSH private key password")
update_dest.add_argument('--challengePath', help="HTTP-01 Challenge destination path")
update_dest.add_argument('--certPath', help="Certificate push destination path")
update_dest.add_argument('--exportFormat', help="Certificate export format", choices=('NGINX', 'Apache'))
update_dest.add_argument('--nocheck', help="Pass this flag to skip SSH initial checks", dest='nocheck', action='store_true')
update_dest.set_defaults(func=_update_dest, nocheck=False)
delete_dest = destination_subparsers.add_parser(
'delete',
help='Delete a Destination',
formatter_class=Formatter
)
delete_dest.add_argument("id", help="Destination id")
delete_dest.set_defaults(func=_delete_dest)
# Version
version = subparsers.add_parser("version", help="Show the version number")
version.set_defaults(func=lambda *args: print(
"certifire {}\n".format(get_version())))
# Parse
args = parser.parse_args()
if not hasattr(args, 'func'):
parser.print_help()
sys.exit()
# Set up logging
root = logging.getLogger('certifire')
root.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter("%(message)s"))
root.addHandler(handler)
# Let's encrypt
try:
args.func(args)
except CertifireError as e:
if str(e):
logger.error(e)
sys.exit()
except KeyboardInterrupt:
logger.error("")
logger.error("Interrupted.")
sys.exit()
except Exception as e:
logger.error("Oops! An unhandled error occurred. Please file a bug.")
logger.exception(e)
sys.exit()
if __name__ == "__main__":
certifire_main()
| 36.85679 | 128 | 0.638239 | 1,793 | 14,927 | 5.192415 | 0.182376 | 0.05435 | 0.035446 | 0.024812 | 0.435768 | 0.40247 | 0.39957 | 0.380666 | 0.32739 | 0.306767 | 0 | 0.001795 | 0.253567 | 14,927 | 404 | 129 | 36.94802 | 0.833782 | 0.012595 | 0 | 0.318339 | 0 | 0 | 0.213956 | 0.003342 | 0 | 0 | 0 | 0.002475 | 0 | 1 | 0.024221 | false | 0.048443 | 0.038062 | 0 | 0.086505 | 0.076125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ce43cf725e3bf8836e708394822671100ec1604 | 2,783 | py | Python | data_gen/gen_p_e_m/gen_p_e_m_from_wiki.py | EMBEDDIA/multilingual_entity_linking | 9042259dd72ae85d94a460a981e9716df4eac203 | [
"Apache-2.0"
] | null | null | null | data_gen/gen_p_e_m/gen_p_e_m_from_wiki.py | EMBEDDIA/multilingual_entity_linking | 9042259dd72ae85d94a460a981e9716df4eac203 | [
"Apache-2.0"
] | 2 | 2021-04-20T13:30:09.000Z | 2021-05-03T14:24:06.000Z | data_gen/gen_p_e_m/gen_p_e_m_from_wiki.py | EMBEDDIA/multilingual_entity_linking | 9042259dd72ae85d94a460a981e9716df4eac203 | [
"Apache-2.0"
] | null | null | null | import argparse, os
from urllib.parse import unquote
import os.path
from os import path
import pickle
ap = argparse.ArgumentParser()
ap.add_argument("-l", "--language", default='en',type = str,
help="path")
args = ap.parse_args()
exec(open("utils/utils.py").read())
exec(open("data_gen/parse_wiki_dump/parse_wiki_dump_tools.py").read())
print('Computing Wikipedia p_e_m')
wiki_e_m_counts = {}
num_lines = 0
parsing_errors = 0
list_ent_errors = 0
diez_ent_errors = 0
disambiguation_ent_errors = 0
num_valid_hyperlinks = 0
with open('wiki_data/' + args.language + '/' + args.language + '-wikidataid-TextWithAnchorsFromAllWikipedia.txt', encoding="utf-8") as f:
for line in f:
line = unquote(line.strip())
num_lines += 1
if num_lines % 5000000 == 0:
print('Processed ' + str(num_lines) + ' lines. Parsing errs = ' +\
str(parsing_errors) + ' List ent errs = ' + \
str(list_ent_errors) + ' diez errs = ' + str(diez_ent_errors) +\
' disambig errs = ' + str(disambiguation_ent_errors) + \
' . Num valid hyperlinks = ' + str(num_valid_hyperlinks))
if not '<doc id="' in line:
list_hyp, text, le_errs, p_errs, dis_errs, diez_errs = extract_text_and_hyp(line, False)
parsing_errors += p_errs
list_ent_errors += le_errs
disambiguation_ent_errors += dis_errs
diez_ent_errors += diez_errs
for el in list_hyp:
mention = el
ent_wikiid = list_hyp[el]['ent_wikiid']
num_valid_hyperlinks += 1
if mention not in wiki_e_m_counts:
wiki_e_m_counts[mention] = {}
if ent_wikiid not in wiki_e_m_counts[mention]:
wiki_e_m_counts[mention][ent_wikiid] = 0
wiki_e_m_counts[mention][ent_wikiid] += 1
print(' Done computing Wikipedia p(e|m). Num valid hyperlinks = ', num_valid_hyperlinks)
print('Now sorting and writing ..')
with open('generated/' + args.language + '/wikipedia_p_e_m.txt', "w", encoding="utf-8") as f:
for mention in wiki_e_m_counts:
tbl = {}
for ent_wikiid in wiki_e_m_counts[mention]:
tbl[ent_wikiid] = wiki_e_m_counts[mention][ent_wikiid]
tbl = {k: v for k, v in sorted(tbl.items(), key=lambda item: item[1], reverse=True)}
text = ''
total_freq = 0
for ent_wikiid in tbl:
text += str(ent_wikiid) + ',' + str(tbl[ent_wikiid])
text += ',' + get_ent_name_from_wikiid(ent_wikiid).replace(' ', '_') + '\t'
total_freq = total_freq + tbl[ent_wikiid]
f.write(mention + '\t' + str(total_freq) + '\t' + text + '\n')
print(' Done sorting and writing.')
| 37.106667 | 137 | 0.610492 | 381 | 2,783 | 4.170604 | 0.275591 | 0.073631 | 0.033984 | 0.067967 | 0.161737 | 0.114537 | 0.052863 | 0 | 0 | 0 | 0 | 0.010779 | 0.266619 | 2,783 | 74 | 138 | 37.608108 | 0.767761 | 0 | 0 | 0 | 0 | 0 | 0.164211 | 0.034495 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
5ce7079747845637d28230c2367ed9d83c91e81e | 1,865 | py | Python | setup.py | Pranavj94/All-things-NLP | 009e63e35611679afb54ca981675019679179fd3 | [
"Apache-2.0"
] | null | null | null | setup.py | Pranavj94/All-things-NLP | 009e63e35611679afb54ca981675019679179fd3 | [
"Apache-2.0"
] | null | null | null | setup.py | Pranavj94/All-things-NLP | 009e63e35611679afb54ca981675019679179fd3 | [
"Apache-2.0"
] | 1 | 2021-07-27T05:53:36.000Z | 2021-07-27T05:53:36.000Z | ############################################################################################
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
############################################################################################
from setuptools import setup, find_packages
def readme():
with open("README.md") as f:
README = f.read()
return README
#with open("requirements.txt") as f:
# required = f.read().splitlines()
#with open("requirements-optional.txt") as f:
# optional_required = f.read().splitlines()
setup(
name="allthingsnlp",
version="0.0.4",
description="All things NLP - An open source, low-code NLP library in Python.",
long_description=readme(),
long_description_content_type="text/markdown",
url="https://github.com/Pranavj94/all-things-nlp",
author="Pranav J",
author_email="pranavj13594@gmail.com",
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
install_requires=['pandas','numpy','tqdm','nltk','wordcloud','matplotlib','IPython']
#extras_require={"full": optional_required,},
) | 38.061224 | 92 | 0.619839 | 221 | 1,865 | 5.171946 | 0.588235 | 0.052493 | 0.065617 | 0.068241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012739 | 0.158177 | 1,865 | 49 | 93 | 38.061224 | 0.715287 | 0.384987 | 0 | 0 | 0 | 0 | 0.422996 | 0.023207 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.04 | 0 | 0.12 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a228588da1a1865d35b49af11b46dd3d71bba03 | 3,532 | py | Python | pysph/examples/sphysics/dambreak_sphysics.py | nauaneed/pysph | 9cb9a859934939307c65a25cbf73e4ecc83fea4a | [
"BSD-3-Clause"
] | 293 | 2017-05-26T14:41:15.000Z | 2022-03-28T09:56:16.000Z | pysph/examples/sphysics/dambreak_sphysics.py | nauaneed/pysph | 9cb9a859934939307c65a25cbf73e4ecc83fea4a | [
"BSD-3-Clause"
] | 217 | 2017-05-29T15:48:14.000Z | 2022-03-24T16:16:55.000Z | pysph/examples/sphysics/dambreak_sphysics.py | nauaneed/pysph | 9cb9a859934939307c65a25cbf73e4ecc83fea4a | [
"BSD-3-Clause"
] | 126 | 2017-05-25T19:17:32.000Z | 2022-03-25T11:23:24.000Z | """Dam break past an obstacle with data from SPHysics. (40 minutes)
For benchmarking, we use the input geometry and discretization as the
SPHYSICS Case 5
(https://wiki.manchester.ac.uk/sphysics/index.php/SPHYSICS_Home_Page)
We only require the INDAT and IPART files generated by SPHysics. These
define respectively, the numerical parameters and the initial particle
data used for the run. The rest of the problem is set-up in the usual
way.
"""
import os
import numpy
from pysph.sph.equation import Group
from pysph.base.kernels import CubicSpline
from pysph.sph.wc.basic import TaitEOS, TaitEOSHGCorrection, MomentumEquation
from pysph.sph.basic_equations import ContinuityEquation, XSPHCorrection
from pysph.solver.solver import Solver
from pysph.solver.application import Application
from pysph.sph.integrator import EPECIntegrator, PECIntegrator
from pysph.sph.integrator_step import WCSPHStep
from pysph.tools.sphysics import sphysics2pysph
MY_DIR = os.path.dirname(__file__)
INDAT = os.path.join(MY_DIR, 'INDAT.gz')
IPART = os.path.join(MY_DIR, 'IPART.gz')
# problem dimensionality
dim = 3
# suggested initial time step and final time
dt = 1e-5
tf = 2.0
# physical constants for the run loaded from SPHysics INDAT
indat = numpy.loadtxt(INDAT)
H = float( indat[10] )
B = float( indat[11] )
gamma = float( indat[12] )
eps = float( indat[14] )
rho0 = float( indat[15] )
alpha = float( indat[16] )
beta = 0.0
c0 = numpy.sqrt( B*gamma/rho0 )
class DamBreak3DSPhysics(Application):
def add_user_options(self, group):
group.add_argument(
"--test", action="store_true", dest="test", default=False,
help="For use while testing of results, uses PEC integrator."
)
def create_particles(self):
return sphysics2pysph(IPART, INDAT, vtk=False)
def create_solver(self):
kernel = CubicSpline(dim=3)
if self.options.test:
integrator = PECIntegrator(fluid=WCSPHStep(),boundary=WCSPHStep())
adaptive, n_damp = False, 0
else:
integrator = EPECIntegrator(fluid=WCSPHStep(),boundary=WCSPHStep())
adaptive, n_damp = True, 0
solver = Solver(dim=dim, kernel=kernel, integrator=integrator,
adaptive_timestep=adaptive, tf=tf, dt=dt,
n_damp=n_damp)
return solver
def create_equations(self):
equations = [
# Equation of state
Group(equations=[
TaitEOS(dest='fluid', sources=None,
rho0=rho0, c0=c0, gamma=gamma),
TaitEOSHGCorrection(dest='boundary', sources=None,
rho0=rho0, c0=c0, gamma=gamma),
], real=False),
# Continuity Momentum and XSPH equations
Group(equations=[
ContinuityEquation(dest='fluid',
sources=['fluid', 'boundary']),
ContinuityEquation(dest='boundary', sources=['fluid']),
MomentumEquation(
dest='fluid', sources=['fluid', 'boundary'], c0=c0,
alpha=alpha, beta=beta, gz=-9.81,
tensile_correction=True),
# Position step with XSPH
XSPHCorrection(dest='fluid', sources=['fluid'], eps=eps)
])
]
return equations
if __name__ == '__main__':
app = DamBreak3DSPhysics()
app.run()
| 33.009346 | 79 | 0.625708 | 410 | 3,532 | 5.312195 | 0.417073 | 0.03719 | 0.027548 | 0.028926 | 0.111111 | 0.070707 | 0.070707 | 0.030303 | 0 | 0 | 0 | 0.017633 | 0.277463 | 3,532 | 106 | 80 | 33.320755 | 0.835815 | 0.182616 | 0 | 0.057971 | 0 | 0 | 0.059151 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057971 | false | 0 | 0.15942 | 0.014493 | 0.275362 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a25c60958291760ba28905779810fb418738cea | 396 | py | Python | RaspberryPi/work1.py | DTK-Creaters/Course | eb6518306482d21cc6e5848a783ffc0820b017fd | [
"Apache-2.0"
] | 3 | 2020-05-15T15:14:17.000Z | 2021-04-05T11:39:53.000Z | RaspberryPi/work1.py | DTK-Creaters/Course | eb6518306482d21cc6e5848a783ffc0820b017fd | [
"Apache-2.0"
] | null | null | null | RaspberryPi/work1.py | DTK-Creaters/Course | eb6518306482d21cc6e5848a783ffc0820b017fd | [
"Apache-2.0"
] | 1 | 2020-05-17T02:48:13.000Z | 2020-05-17T02:48:13.000Z | # -*- coding: utf-8 -*-
'''
第1回
LEDの点滅を3回繰り返すプログラムを作ってください。
LEDが3つのバージョンを作ってください。
'''
import RPi.GPIO as GPIO
import time
PINS=[10, 11, 12]
#毎回するおまじない
GPIO.setmode(GPIO.BCM)
GPIO.setup(PINS,GPIO.OUT)
for x in range(3):
GPIO.output(PINS,GPIO.HIGH) # ピン10, 11, 12に電流を流す(HIGH)
time.sleep(2)
GPIO.output(PINS,GPIO.LOW) # ピン10, 11, 12に流れる電流を0にする(LOW)
time.sleep(2)
GPIO.cleanup()
| 17.217391 | 62 | 0.684343 | 58 | 396 | 4.672414 | 0.586207 | 0.088561 | 0.103321 | 0.132841 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.077381 | 0.151515 | 396 | 22 | 63 | 18 | 0.729167 | 0.35101 | 0 | 0.181818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.181818 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a2796507aa8a3ec5cbd031ab878c3b23d7bbd5a | 1,654 | py | Python | tests/test_reset.py | embray/amqp-mock | dfcf50a4a455063331fa334b19db98cf59d88ea9 | [
"Apache-2.0"
] | 6 | 2021-01-13T08:32:16.000Z | 2022-03-23T08:19:47.000Z | tests/test_reset.py | embray/amqp-mock | dfcf50a4a455063331fa334b19db98cf59d88ea9 | [
"Apache-2.0"
] | 20 | 2020-12-02T09:44:15.000Z | 2022-01-04T16:33:09.000Z | tests/test_reset.py | embray/amqp-mock | dfcf50a4a455063331fa334b19db98cf59d88ea9 | [
"Apache-2.0"
] | 3 | 2020-08-20T13:21:13.000Z | 2021-11-05T19:14:58.000Z | import pytest
from amqp_mock import Message
from ._test_utils.fixtures import amqp_client, mock_client, mock_server
from ._test_utils.helpers import random_uuid, to_binary
from ._test_utils.steps import given, then, when
__all__ = ("mock_client", "mock_server", "amqp_client",)
@pytest.mark.asyncio
async def test_reset_exchanges(*, mock_server, mock_client, amqp_client):
with given:
exchange = "test_exchange"
message = {"id": random_uuid()}
await amqp_client.publish(to_binary(message), exchange)
with when:
result = await mock_client.reset()
with then:
assert result is None
messages = await mock_client.get_exchange_messages(exchange)
assert len(messages) == 0
@pytest.mark.asyncio
async def test_reset_queues(*, mock_server, mock_client, amqp_client):
with given:
queue = "test_queue"
await mock_client.publish_message(queue, Message("text"))
with when:
result = await mock_client.reset()
with then:
assert result is None
await amqp_client.consume(queue)
await amqp_client.wait(seconds=0.1)
assert len(amqp_client.get_consumed_messages()) == 0
@pytest.mark.asyncio
async def test_reset_history(*, mock_server, mock_client, amqp_client):
with given:
queue = "test_queue"
await mock_client.publish_message(queue, Message("text"))
await amqp_client.consume(queue)
with when:
result = await mock_client.reset()
with then:
assert result is None
history = await mock_client.get_queue_message_history(queue)
assert len(history) == 0
| 27.114754 | 73 | 0.692866 | 216 | 1,654 | 5.027778 | 0.226852 | 0.110497 | 0.096685 | 0.060773 | 0.542357 | 0.492634 | 0.492634 | 0.461326 | 0.425414 | 0.346225 | 0 | 0.003867 | 0.218259 | 1,654 | 60 | 74 | 27.566667 | 0.83604 | 0 | 0 | 0.571429 | 0 | 0 | 0.045949 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 1 | 0 | false | 0 | 0.119048 | 0 | 0.119048 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a27a7a55e2650fe9334f43d5b8dce70103b5737 | 12,749 | py | Python | karta/raster/band.py | fortyninemaps/karta | b35d8cbcfb62e9f1d826a5c73605d34a0c0990b6 | [
"MIT"
] | 84 | 2016-03-18T15:42:02.000Z | 2022-02-20T15:12:28.000Z | karta/raster/band.py | fortyninemaps/karta | b35d8cbcfb62e9f1d826a5c73605d34a0c0990b6 | [
"MIT"
] | 21 | 2016-03-06T01:47:38.000Z | 2019-01-13T20:33:52.000Z | karta/raster/band.py | fortyninemaps/karta | b35d8cbcfb62e9f1d826a5c73605d34a0c0990b6 | [
"MIT"
] | 12 | 2016-03-18T15:33:53.000Z | 2022-03-02T08:18:22.000Z | """
Band implementations for storing data in Karta Grid instances
Overview
--------
`BandIndexer` interface for accessing data from one or more bands
`SimpleBand` use numpy arrays for data storage
`CompressedBand` uses blosc compression to reduce in-memory footprint
Implementation
--------------
Bands are expected to implement the following methods:
- `__init__(self, size, dtype, initval=None)`
- `getblock(self, yoff, xoff, ny, nx)`
- `setblock(self, yoff, xoff, array)`
Attributes:
- `dtype`
- `size`
The following methods are deprecated:
- `__getitem__(self, key)`, accepting as *key* any of
- an int
- a slice
- a 2-tuple of ints
- a 2-tuple of slices
- `__setitem__(self, key, value)`, accepting as *key* the same
possibilities as __getitem__
"""
import blosc
import numpy as np
from numbers import Real, Integral
from math import ceil
class BandIndexer(object):
def __init__(self, bands):
self.bands = bands
def __getitem__(self, key):
if isinstance(key, np.ndarray):
return self._get_from_array_mask(key)
if isinstance(key, slice):
key = (key, slice(None, None, None), slice(None, None, None))
if not isinstance(key, tuple):
raise TypeError("key should be an array or a tuple")
collapse_rows = collapse_cols = collapse_bands = False
ny, nx = self.bands[0].size
if isinstance(key[0], Integral):
collapse_rows = True
r = key[0] % ny
ystart, yend, ystep = (r, r+1, 1)
elif isinstance(key[0], slice):
ystart, yend, ystep = key[0].indices(ny)
else:
raise TypeError("first key item should be an integer or a slice")
if isinstance(key[1], Integral):
collapse_cols = True
r = key[1] % nx
xstart, xend, xstep = (r, r+1, 1)
elif isinstance(key[1], slice):
xstart, xend, xstep = key[1].indices(nx)
else:
raise TypeError("second key item should be an integer or a slice")
if len(key) == 2:
bands = list(range(len(self.bands)))
elif len(key) == 3 and isinstance(key[2], Integral):
collapse_bands = True
bands = [key[2] % len(self.bands)]
elif len(key) == 3 and isinstance(key[2], slice):
bands = list(range(*key[2].indices(len(self.bands))))
else:
raise TypeError("third key item should be an integer or a slice")
if ystep < 0:
ystart, yend = yend+1, ystart+1
if xstep < 0:
xstart, xend = xend+1, xstart+1
shape = [1 + (yend-ystart-1) // abs(ystep),
1 + (xend-xstart-1) // abs(xstep),
len(bands)]
out = np.empty(shape, dtype = self.bands[0].dtype)
for i, iband in enumerate(bands):
band = self.bands[iband]
band_values = band.getblock(ystart, xstart, yend-ystart, xend-xstart)
out[:,:,i] = band_values[::ystep,::xstep]
if collapse_bands:
out = out[:,:,0]
if collapse_cols:
out = out[:,0]
if collapse_rows:
out = out[0]
return out
def __setitem__(self, key, value):
if isinstance(key, np.ndarray):
return self._set_from_array_mask(key, value)
if isinstance(key, slice):
key = (key, slice(None, None, None), slice(None, None, None))
if not isinstance(key, tuple):
raise TypeError("key should be an array or a tuple")
ny, nx = self.bands[0].size
if isinstance(key[0], Integral):
r = key[0] % ny
ystart, yend, ystep = (r, r+1, 1)
elif isinstance(key[0], slice):
ystart, yend, ystep = key[0].indices(ny)
else:
raise TypeError("first key item should be an integer or a slice")
if isinstance(key[1], Integral):
r = key[1] % nx
xstart, xend, xstep = (r, r+1, 1)
elif isinstance(key[1], slice):
xstart, xend, xstep = key[1].indices(nx)
else:
raise TypeError("second key item should be an integer or a slice")
if len(key) == 2:
bands = list(range(len(self.bands)))
elif len(key) == 3 and isinstance(key[2], Integral):
collapse_bands = True
bands = [key[2] % len(self.bands)]
elif len(key) == 3 and isinstance(key[2], slice):
bands = list(range(*key[2].indices(len(self.bands))))
else:
raise TypeError("third key item should be an integer or a slice")
if not (xstep == ystep == 1):
raise NotImplementedError("setting band values with stepped slices")
#if ystep < 0:
# ystart, yend = yend+1, ystart+1
#if xstep < 0:
# xstart, xend = xend+1, xstart+1
shape = [1 + (yend-ystart-1) // abs(ystep),
1 + (xend-xstart-1) // abs(xstep),
len(bands)]
if isinstance(value, np.ndarray) and (value.ndim == 1) and (shape[0] == shape[1] == 1):
val_array = np.reshape(np.atleast_3d(value), shape)
else:
val_array = np.broadcast_to(np.atleast_3d(value), shape)
for i, iband in enumerate(bands):
band = self.bands[iband]
band.setblock(ystart, xstart, val_array[:,:,i])
return
def _get_from_array_mask(self, mask):
# The mask is assumed to be in (row, column[, band]) order
# TODO: make this memory efficient
if mask.ndim == 2:
return self[:,:,:][mask]
elif mask.ndim == 3:
return self[:,:,:][mask]
else:
raise IndexError("masking array must have two or three dimensions")
def _set_from_array_mask(self, mask, value):
# The mask is assumed to be in (row, column[, band]) order
# TODO: make this memory efficient
for i, band in enumerate(self.bands):
if mask.ndim == 3:
mask_ = mask[:,:,i]
else:
mask_ = mask
tmp = band.getblock(0, 0, *band.size)
if isinstance(value, Real) or (value.ndim == 1):
tmp[mask_] = value
else:
tmp[mask_] = value[:,i]
band.setblock(0, 0, tmp)
def __iter__(self):
nx = self.bands[0].size[1]
for i in range(self.bands[0].size[0]):
if len(self.bands) == 1:
yield self.bands[0].getblock(i, 0, 1, nx)
else:
yield np.vstack([b.getblock(i, 0, 1, nx) for b in self.bands])
@property
def shape(self):
""" Returns the dimensions of raster bands. If there is a single
(m x n) band, output is a tuple (m, n). If there are N>1 bands, output
is a tuple (N, m, n).
"""
if len(self.bands) == 0:
raise ValueError("no bands")
else:
return self.bands[0].size
@property
def dtype(self):
""" Returns bands' dtype """
return self.bands[0].dtype
class SimpleBand(object):
""" SimpleBand wraps a numpy.ndarray for storage. """
def __init__(self, size, dtype, initval=None):
self.size = size
if initval is None:
self._array = np.empty(size, dtype=dtype)
else:
self._array = np.full(size, initval, dtype=dtype)
self.dtype = dtype
def getblock(self, yoff, xoff, ny, nx):
return self._array[yoff:yoff+ny, xoff:xoff+nx]
def setblock(self, yoff, xoff, array):
(ny, nx) = array.shape
self._array[yoff:yoff+ny, xoff:xoff+nx] = array
return
class CompressedBand(object):
""" CompressedBand is a chunked, blosc-compressed array. """
CHUNKSET = 1
CHUNKUNSET = 0
def __init__(self, size, dtype, chunksize=(256, 256), initval=0):
""" Initialize a CompressedBand instance.
Parameters
----------
size : tuple of two ints
size of band in pixels
dtype : type
data type of pixel values
chunksize : tuple of two ints, optional
size of compressed chunks, default (256, 256)
initval : value, optional
if set, the entire grid is initialized with this value, which should
be of *dtype*
"""
assert len(size) == 2
self.size = size
self.dtype = dtype
self._chunksize = chunksize
self._initval = initval
self.nchunkrows = int(ceil(float(size[0])/float(chunksize[0])))
self.nchunkcols = int(ceil(float(size[1])/float(chunksize[1])))
nchunks = self.nchunkrows * self.nchunkcols
# Data store
self._data = [None for i in range(nchunks)]
# 0 => unset
# 1 => set
self.chunkstatus = np.zeros(nchunks, dtype=np.int8)
return
def _store(self, array, index):
self._data[index] = blosc.compress(array.tostring(),
np.dtype(self.dtype).itemsize)
self.chunkstatus[index] = self.CHUNKSET
return
def _retrieve(self, index):
bytestr = blosc.decompress(self._data[index], as_bytearray=True)
return np.frombuffer(bytestr, dtype=self.dtype).reshape(self._chunksize)
def _getchunks(self, yoff, xoff, ny, nx):
""" Return a generator returning tuples identifying chunks covered by a
range. The tuples contain (chunk_number, ystart, yend, xstart, xend)
for each chunk touched by a region defined by corner indices and region
size. """
chunksize = self._chunksize
ystart = yoff // chunksize[0]
yend = ceil(float(yoff+ny) / chunksize[0])
xstart = xoff // chunksize[1]
xend = ceil(float(xoff+nx) / chunksize[1])
nxchunks = int(ceil(float(self.size[1])/float(chunksize[1])))
i = ystart
while i < yend:
j = xstart
while j < xend:
chunk_number = i*nxchunks + j
chunk_ystart = i*chunksize[0]
chunk_xstart = j*chunksize[1]
chunk_yend = min((i+1)*chunksize[0], self.size[0])
chunk_xend = min((j+1)*chunksize[1], self.size[1])
yield (chunk_number, chunk_ystart, chunk_yend,
chunk_xstart, chunk_xend)
j += 1
i+= 1
def setblock(self, yoff, xoff, array):
""" Store block of values in *array* starting at offset *yoff*, *xoff*.
"""
size = array.shape[:2]
chunksize = self._chunksize
for i, yst, yen, xst, xen in self._getchunks(yoff, xoff, *size):
# Get from data store
if self.chunkstatus[i] == self.CHUNKSET:
chunkdata = self._retrieve(i)
else:
chunkdata = np.full(self._chunksize, self._initval, dtype=self.dtype)
# Compute region within chunk to place data in
cy0 = max(0, yoff-yst)
cy1 = min(chunksize[0], yoff+size[0]-yst)
cx0 = max(0, xoff-xst)
cx1 = min(chunksize[1], xoff+size[1]-xst)
# Compute region to slice from data
dy0 = max(0, yst-yoff)
dy1 = min(size[0], yen-yoff)
dx0 = max(0, xst-xoff)
dx1 = min(size[1], xen-xoff)
chunkdata[cy0:cy1, cx0:cx1] = array[dy0:dy1, dx0:dx1]
# Return to data store
self._store(chunkdata, i)
return
def getblock(self, yoff, xoff, ny, nx):
""" Retrieve values with dimensions *size*, starting at offset *yoff*,
*xoff*.
"""
result = np.empty([ny, nx], self.dtype)
for i, yst, yen, xst, xen in self._getchunks(yoff, xoff, ny, nx):
# Compute the bounds in the output
oy0 = max(0, yst-yoff)
oy1 = min(ny, yen-yoff)
ox0 = max(0, xst-xoff)
ox1 = min(nx, xen-xoff)
if self.chunkstatus[i] == self.CHUNKUNSET:
result[oy0:oy1, ox0:ox1] = np.full((oy1-oy0, ox1-ox0),
self._initval,
dtype=self.dtype)
else:
# Compute the extents from the chunk to retain
cy0 = max(yoff, yst) - yst
cy1 = min(yoff+ny, yen) - yst
cx0 = max(xoff, xst) - xst
cx1 = min(xoff+nx, xen) - xst
result[oy0:oy1, ox0:ox1] = self._retrieve(i)[cy0:cy1, cx0:cx1]
return result
| 33.287206 | 95 | 0.543337 | 1,618 | 12,749 | 4.211372 | 0.158838 | 0.029058 | 0.013208 | 0.013208 | 0.395656 | 0.33079 | 0.306721 | 0.288817 | 0.280305 | 0.280305 | 0 | 0.022151 | 0.33783 | 12,749 | 382 | 96 | 33.374346 | 0.785004 | 0.183073 | 0 | 0.412017 | 0 | 0 | 0.04303 | 0 | 0 | 0 | 0 | 0.002618 | 0.004292 | 1 | 0.072961 | false | 0 | 0.017167 | 0.004292 | 0.175966 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a29de7a43e2c76cd682a22718fc79113ae69a57 | 2,789 | py | Python | Kepler.py | wongongv/scholarship_wonjun | b46a621a756782bf0929ef96738bf484afd1708e | [
"MIT"
] | null | null | null | Kepler.py | wongongv/scholarship_wonjun | b46a621a756782bf0929ef96738bf484afd1708e | [
"MIT"
] | null | null | null | Kepler.py | wongongv/scholarship_wonjun | b46a621a756782bf0929ef96738bf484afd1708e | [
"MIT"
] | null | null | null | import tensorflow as tf
import matplotlib.pyplot as plt
import pdb
import numpy as np
import pandas as pd
from tensorflow.keras import layers
sample_num = 500000
# coeff = tf.cast(4*np.pi*np.pi/(6.673*10**-11), dtype = tf.float32)
coeff = tf.cast(1, dtype = tf.float32)
#try the range of 10**5 ~10**7 for both mass and radius
# radius = tf.random.normal(shape = [sample_num,1], mean = 0, dtype = tf.float32)
# massinv = tf.random.normal(shape = [sample_num,1], mean = 0, dtype = tf.float32)
radius = tf.random.truncated_normal(shape = [sample_num,1], mean = 2, stddev = 0.5, dtype = tf.float32)
massinv = tf.random.truncated_normal(shape = [sample_num,1], mean = 2, stddev = 0.5, dtype = tf.float32)
period = radius ** 3 * massinv * coeff
def normalize(data):
if isinstance(data, tf.Tensor):
data = data.numpy()
data = (data - np.mean(data)) / np.std(data)
return tf.cast(data, dtype = tf.float64)
def denorm(data, denorm_factor):
# denorm_factor is a tuple of (mean, std)
return data * denorm_factor[1] + denorm_factor[0]
data = tf.stack([radius, massinv], axis = 1)
data = tf.squeeze(data)
normed_label = normalize(period)
denorm_factor = (np.mean(period.numpy()), np.std(period.numpy()))
def build_model():
model = tf.keras.Sequential([layers.Dense(17),
layers.BatchNormalization(),
layers.Activation('sigmoid'),
layers.Dense(17),
layers.BatchNormalization(),
layers.Activation('sigmoid'),
layers.Dense(1)])
model.compile(optimizer = tf.keras.optimizers.Adam(0.0001),
loss = 'mse',
metrics = ['mape', 'mae', 'mse'])
return model
model = build_model()
history = model.fit(data, normed_label, epochs = 50, validation_split = 0.2, batch_size = 64, verbose =1)
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epochs'] = history.epoch
plt.figure()
plt.xlabel('epochs')
plt.ylabel('mae')
plt.plot(hist['epochs'], hist['mae'], label = 'train_mae')
plt.plot(hist['epochs'], hist['val_mae'], label = 'val_mae')
plt.legend()
plt.figure()
plt.xlabel('epochs')
plt.ylabel('mse')
plt.plot(hist['epochs'], hist['mse'], label = 'train_mse')
plt.plot(hist['epochs'], hist['val_mse'], label = 'val_mse')
plt.legend()
plt.show()
plot_history(history)
sun_earth = {'radius': [2440*10**6, 3390*10**6, 6052*10**6],'mass':[(3.3*10**23)**-1, (6.4*10**23)**-1, (4.87*10**24)**-1]}
sun_earth_data = np.stack([sun_earth['radius'], sun_earth['mass']], axis = 1)
result1 = model.predict(sun_earth_data)
result = denorm(result1,denorm_factor)
print(result)
#수 화 금
# 수성 0.2409
# 화성 1.8809
# 금성 0.6102
# 지구 1.0000
| 34.432099 | 123 | 0.630333 | 407 | 2,789 | 4.235872 | 0.324324 | 0.028422 | 0.048724 | 0.046404 | 0.312645 | 0.312645 | 0.24536 | 0.207077 | 0.207077 | 0.207077 | 0 | 0.06062 | 0.201506 | 2,789 | 80 | 124 | 34.8625 | 0.713516 | 0.131947 | 0 | 0.172414 | 0 | 0 | 0.061021 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.103448 | 0.017241 | 0.224138 | 0.017241 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a2b60eeeb9c5a441e5b481a07de842558d9a0f8 | 1,589 | py | Python | IOPool/Output/test/PoolOutputTestUnscheduled_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | IOPool/Output/test/PoolOutputTestUnscheduled_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | IOPool/Output/test/PoolOutputTestUnscheduled_cfg.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
process = cms.Process("TESTOUTPUT")
process.load("FWCore.Framework.test.cmsExceptionsFatal_cff")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(20)
)
process.Thing = cms.EDProducer("ThingProducer")
process.OtherThing = cms.EDProducer("OtherThingProducer")
process.thingWithMergeProducer = cms.EDProducer("ThingWithMergeProducer")
process.intProducer1 = cms.EDProducer("IntProducer",
ivalue = cms.int32(7)
)
process.intProducer2 = cms.EDProducer("IntProducer",
ivalue = cms.int32(11)
)
process.aliasForInt1 = cms.EDAlias(
intProducer1 = cms.VPSet(
cms.PSet(type = cms.string('edmtestIntProduct'))
)
)
process.aliasForInt2 = cms.EDAlias(
intProducer2 = cms.VPSet(
cms.PSet(type = cms.string('edmtestIntProduct'))
)
)
process.output = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('file:PoolOutputTestUnscheduled.root'),
outputCommands = cms.untracked.vstring(
'keep *',
'drop *_intProducer1_*_*',
'drop *_aliasForInt1_*_*',
'drop *_intProducer2_*_*'
)
)
process.getInt = cms.EDAnalyzer("TestFindProduct",
inputTags = cms.untracked.VInputTag(
cms.InputTag("aliasForInt1"),
),
expectedSum = cms.untracked.int32(140)
)
process.source = cms.Source("EmptySource")
process.t = cms.Task(process.Thing, process.OtherThing, process.thingWithMergeProducer, process.intProducer1, process.intProducer2)
process.path1 = cms.Path(process.getInt, process.t)
process.ep = cms.EndPath(process.output)
| 26.04918 | 131 | 0.721838 | 159 | 1,589 | 7.150943 | 0.408805 | 0.063325 | 0.029903 | 0.05277 | 0.158311 | 0.158311 | 0.091469 | 0.091469 | 0.091469 | 0 | 0 | 0.021339 | 0.144745 | 1,589 | 60 | 132 | 26.483333 | 0.815305 | 0 | 0 | 0.045455 | 0 | 0 | 0.206049 | 0.063642 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.022727 | 0 | 0.022727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a2fe90ca9c1760303393523fbb9dfacdfef8238 | 11,435 | py | Python | tests/test_main.py | Asday/ytdl | 96a51ba3589e855b27f75095b0cd4a6f00f8eefa | [
"MIT"
] | null | null | null | tests/test_main.py | Asday/ytdl | 96a51ba3589e855b27f75095b0cd4a6f00f8eefa | [
"MIT"
] | 1 | 2019-04-15T02:09:37.000Z | 2019-04-15T02:09:37.000Z | tests/test_main.py | Asday/ytdl | 96a51ba3589e855b27f75095b0cd4a6f00f8eefa | [
"MIT"
] | null | null | null | import datetime
import os
import subprocess
from django.apps import apps
from django.core.management import call_command
import attr
from freezegun import freeze_time
import pytest
import pytz
from downloader.exceptions import (
NoFilesCreatedError,
TooManyFilesCreatedError,
YoutubeDLError,
)
from playlists.models import Playlist, Video
def test_server_starts(client):
client.get('/')
def test_checks_pass():
call_command('check')
def test_get_playlist_info_raises_for_garbage_playlist():
downloader = apps.get_app_config('downloader')
with pytest.raises(YoutubeDLError):
downloader.get_playlist_info('asdf')
_TEST_PLAYLIST_ID = 'PL59FEE129ADFF2B12'
_TEST_VIDEO_ID = '007VM8NZxkI'
def test_get_playlist_info_returns_iterable():
downloader = apps.get_app_config('downloader')
results = downloader.get_playlist_info(_TEST_PLAYLIST_ID)
iter(results)
def test_get_playlist_info_returns_id_and_title_for_all_results():
downloader = apps.get_app_config('downloader')
results = downloader.get_playlist_info(_TEST_PLAYLIST_ID)
for result in results:
assert 'id' in result
assert 'title' in result
def test_download_video_raises_for_garbage_video(tmp_path):
downloader = apps.get_app_config('downloader')
with pytest.raises(YoutubeDLError):
downloader.download_video('asdf', tmp_path)
def test_download_video_creates_a_file(tmp_path):
downloader = apps.get_app_config('downloader')
filename = downloader.download_video(_TEST_VIDEO_ID, tmp_path)
expected_path = os.path.join(tmp_path, filename)
assert os.path.exists(expected_path)
os.remove(expected_path)
def test_download_video_raises_when_youtube_dl_misbehaves(tmp_path, mocker):
downloader = apps.get_app_config('downloader')
def run_factory(files_to_create):
def run(*args, cwd, **kwargs):
for i in range(files_to_create):
open(os.path.join(cwd, str(i)), 'w').close()
return run
mocker.patch.object(subprocess, 'run', run_factory(0))
with pytest.raises(NoFilesCreatedError):
downloader.download_video(_TEST_VIDEO_ID, tmp_path)
mocker.patch.object(subprocess, 'run', run_factory(2))
with pytest.raises(TooManyFilesCreatedError):
downloader.download_video(_TEST_VIDEO_ID, tmp_path)
@attr.s
class Params(object):
preexisting = attr.ib()
playlist_info = attr.ib()
expected = attr.ib()
now = datetime.datetime(2018, 12, 2, 0, 0, 0, tzinfo=pytz.UTC)
yesterday = datetime.datetime(2018, 12, 1, 0, 0, 0, tzinfo=pytz.UTC)
@freeze_time('2018-12-02 00:00:00.0')
@pytest.mark.django_db
@pytest.mark.parametrize(
'params',
[
Params( # None preexisting, none new.
preexisting=[],
playlist_info=[],
expected=[],
),
Params( # None preexisting, one new.
preexisting=[],
playlist_info=[{'id': 'testID', 'title': 'Test Title'}],
expected=[
{
'youtube_id': 'testID',
'title': 'Test Title',
'added': now,
'removed': None,
},
]
),
Params( # None preexisting, some new.
preexisting=[],
playlist_info=[
{'id': 'testID1', 'title': 'Test Title 1'},
{'id': 'testID2', 'title': 'Test Title 2'},
],
expected=[
{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': now,
'removed': None,
},
{
'youtube_id': 'testID2',
'title': 'Test Title 2',
'added': now,
'removed': None,
},
],
),
Params( # Some preexisting, none new.
preexisting=[{
'youtube_id': 'testID',
'title': 'Test Title',
'added': now,
'removed': None,
}],
playlist_info=[{'id': 'testID', 'title': 'Test Title'}],
expected=[{
'youtube_id': 'testID',
'title': 'Test Title',
'added': now,
'removed': None,
}],
),
Params( # Some preexisting, one new.
preexisting=[{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': yesterday,
'removed': None,
}],
playlist_info=[
{'id': 'testID1', 'title': 'Test Title 1'},
{'id': 'testID2', 'title': 'Test Title 2'},
],
expected=[
{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': yesterday,
'removed': None,
},
{
'youtube_id': 'testID2',
'title': 'Test Title 2',
'added': now,
'removed': None,
},
],
),
Params( # Some preexisting, one removed.
preexisting=[
{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': yesterday,
'removed': None,
},
{
'youtube_id': 'testID2',
'title': 'Test Title 2',
'added': yesterday,
'removed': None,
},
],
playlist_info=[{'id': 'testID1', 'title': 'Test Title 1'}],
expected=[
{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': yesterday,
'removed': None,
},
{
'youtube_id': 'testID2',
'title': 'Test Title 2',
'added': yesterday,
'removed': now,
},
],
),
Params( # Some preexisting, one new, one removed.
preexisting=[
{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': yesterday,
'removed': None,
},
{
'youtube_id': 'testID2',
'title': 'Test Title 2',
'added': yesterday,
'removed': None,
},
],
playlist_info=[
{'id': 'testID1', 'title': 'Test Title 1'},
{'id': 'testID3', 'title': 'Test Title 3'},
],
expected=[
{
'youtube_id': 'testID1',
'title': 'Test Title 1',
'added': yesterday,
'removed': None,
},
{
'youtube_id': 'testID2',
'title': 'Test Title 2',
'added': yesterday,
'removed': now,
},
{
'youtube_id': 'testID3',
'title': 'Test Title 3',
'added': now,
'removed': None,
},
],
),
Params( # Some preexisting, one renamed.
preexisting=[{
'youtube_id': 'testID',
'title': 'Test Title',
'added': yesterday,
'removed': None,
}],
playlist_info=[{'id': 'testID', 'title': 'Renamed'}],
expected=[{
'youtube_id': 'testID',
'title': 'Renamed',
'added': yesterday,
'removed': None,
}],
),
Params( # Some preexisting, one deleted.
preexisting=[{
'youtube_id': 'testID',
'title': 'Test Title',
'added': yesterday,
'removed': None,
'deleted': False,
}],
playlist_info=[{'id': 'testID', 'title': '[Deleted video]'}],
expected=[{
'youtube_id': 'testID',
'title': 'Test Title',
'deleted': True,
'privated': False,
}],
),
Params( # Some preexisting, one made private.
preexisting=[{
'youtube_id': 'testID',
'title': 'Test Title',
'added': yesterday,
'removed': None,
'privated': False,
}],
playlist_info=[{'id': 'testID', 'title': '[Private video]'}],
expected=[{
'youtube_id': 'testID',
'title': 'Test Title',
'deleted': False,
'privated': True,
}],
),
Params( # Some preexisting private, one made public.
preexisting=[{
'youtube_id': 'testID',
'title': '[Private video]',
'added': yesterday,
'removed': None,
'privated': True,
}],
playlist_info=[{'id': 'testID', 'title': 'Test Title'}],
expected=[{
'youtube_id': 'testID',
'title': 'Test Title',
'deleted': False,
'privated': False,
}],
),
Params( # None preexisting, one new private, one new deleted.
preexisting=[],
playlist_info=[
{'id': 'testID1', 'title': '[Private video]'},
{'id': 'testID2', 'title': '[Deleted video]'},
],
expected=[
{
'youtube_id': 'testID1',
'title': '[Private video]',
'added': now,
'removed': None,
'deleted': False,
'privated': True,
},
{
'youtube_id': 'testID2',
'title': '[Deleted video]',
'added': now,
'removed': None,
'deleted': True,
'privated': False,
},
],
),
],
)
def test_create_and_update_videos(params, mocker):
playlist = Playlist.objects.create(youtube_id='playlistID')
for details in params.preexisting:
Video.objects.create(playlist=playlist, **details)
downloader = apps.get_app_config('downloader')
mocker.patch.object(downloader, 'get_playlist_info')
downloader.get_playlist_info.return_value = (
item for item in params.playlist_info
)
playlist.create_and_update_videos()
videos = playlist.videos.all()
for details in params.expected:
video = videos.get(youtube_id=details['youtube_id'])
for attr_name, value in details.items():
assert getattr(video, attr_name) == value
assert playlist.videos.count() == len(params.expected)
| 30.412234 | 76 | 0.457718 | 949 | 11,435 | 5.336143 | 0.14647 | 0.058649 | 0.091232 | 0.064179 | 0.624605 | 0.549566 | 0.459913 | 0.439179 | 0.390798 | 0.380134 | 0 | 0.014275 | 0.418015 | 11,435 | 375 | 77 | 30.493333 | 0.746657 | 0.035068 | 0 | 0.631902 | 0 | 0 | 0.168315 | 0 | 0 | 0 | 0 | 0 | 0.015337 | 1 | 0.033742 | false | 0.003067 | 0.033742 | 0 | 0.082822 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a324221f1f84a57129bf65acf3d694eadd4186b | 900 | py | Python | where/parsers/vascc_crf.py | ingridfausk/where | b65398911075b7ddef3a3a1146efa428eae498fe | [
"MIT"
] | 16 | 2018-08-31T10:31:11.000Z | 2022-03-15T16:07:24.000Z | where/parsers/vascc_crf.py | ingridfausk/where | b65398911075b7ddef3a3a1146efa428eae498fe | [
"MIT"
] | 5 | 2018-07-13T14:04:24.000Z | 2021-06-17T02:14:44.000Z | where/parsers/vascc_crf.py | ingridfausk/where | b65398911075b7ddef3a3a1146efa428eae498fe | [
"MIT"
] | 15 | 2018-06-07T05:45:24.000Z | 2022-03-15T16:07:27.000Z | """A parser for reading radio source coordinates from VASCC apriori crf
Description:
------------
Reads radio source coordinates from VASCC (VLBI Software Analysis Comparison Campaign) apriori file.
"""
# Midgard imports
from midgard.dev import plugins
from midgard.parsers._parser_line import LineParser
@plugins.register
class VasccCrfParser(LineParser):
"""A parser for reading source coordinates from ICRF files
"""
def setup_parser(self):
return dict(usecols=(0, 3, 4), dtype="U8, f8, f8", skip_header=1)
def structure_data(self):
self.data = {
name: {
"ra": ra,
"dec": dec,
"special": False,
"undefined": True,
"non_vcs": False,
"vcs": False,
"defining": False,
}
for name, ra, dec in self._array
}
| 25.714286 | 100 | 0.576667 | 99 | 900 | 5.171717 | 0.616162 | 0.099609 | 0.123047 | 0.066406 | 0.121094 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011382 | 0.316667 | 900 | 34 | 101 | 26.470588 | 0.821138 | 0.305556 | 0 | 0 | 0 | 0 | 0.080065 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.105263 | 0.052632 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7a32575dae7683b788c3a10ea2963e61f6b9dee6 | 1,520 | py | Python | data_engineering/opcua/client.py | croidzen/playground | 37dfe861cdc1803b0f51a0ee623f42c450e75f04 | [
"MIT"
] | null | null | null | data_engineering/opcua/client.py | croidzen/playground | 37dfe861cdc1803b0f51a0ee623f42c450e75f04 | [
"MIT"
] | null | null | null | data_engineering/opcua/client.py | croidzen/playground | 37dfe861cdc1803b0f51a0ee623f42c450e75f04 | [
"MIT"
] | null | null | null | import asyncio
import sys
# sys.path.insert(0, "..")
import logging
from asyncua import Client, Node, ua
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger('asyncua')
async def main():
url = 'opc.tcp://localhost:4840/freeopcua/server/'
# url = 'opc.tcp://commsvr.com:51234/UA/CAS_UA_Server'
async with Client(url=url) as client:
# Client has a few methods to get proxy to UA nodes that should always be in address space such as Root or Objects
# Node objects have methods to read and write node attributes as well as browse or populate address space
_logger.info('Children of root are: %r', await client.nodes.root.get_children())
uri = 'http://examples.freeopcua.github.io'
idx = await client.get_namespace_index(uri)
# get a specific node knowing its node id
# var = client.get_node(ua.NodeId(1002, 2))
# var = client.get_node("ns=3;i=2002")
var = await client.nodes.root.get_child(["0:Objects", f"{idx}:MyObject", f"{idx}:MyVariable"])
print("My variable", var, await var.read_value())
# print(var)
# await var.read_data_value() # get value of node as a DataValue object
# await var.read_value() # get value of node as a python builtin
# await var.write_value(ua.Variant([23], ua.VariantType.Int64)) #set node value using explicit data type
# await var.write_value(3.9) # set node value using implicit data type
if __name__ == '__main__':
asyncio.run(main())
| 44.705882 | 122 | 0.678289 | 229 | 1,520 | 4.39738 | 0.484716 | 0.039722 | 0.03575 | 0.039722 | 0.089374 | 0.043694 | 0.043694 | 0 | 0 | 0 | 0 | 0.022388 | 0.206579 | 1,520 | 33 | 123 | 46.060606 | 0.812604 | 0.476974 | 0 | 0 | 0 | 0 | 0.213368 | 0.053985 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |