hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
33a49dcb8909ea1581dc4d90f918e7844abada00 | 223 | py | Python | WD/Cwiczenia/Kwadraty_w_petli.py | galursa/UWM | b7ab4a275662764a91af6c5bc79da0d98177d0ac | [
"MIT"
] | 1 | 2020-02-29T14:38:33.000Z | 2020-02-29T14:38:33.000Z | WD/Cwiczenia/Kwadraty_w_petli.py | galursa/UWM | b7ab4a275662764a91af6c5bc79da0d98177d0ac | [
"MIT"
] | null | null | null | WD/Cwiczenia/Kwadraty_w_petli.py | galursa/UWM | b7ab4a275662764a91af6c5bc79da0d98177d0ac | [
"MIT"
] | null | null | null | #2 ćwiczenia
#zadanie 7
ile=input("Podaj ile chcesz wczytać liczb: ")
ile=int(ile)
for i in range(ile):
liczba=input("Podaj liczbę numer "+str(i)+": ")
liczba=int(liczba)
print(str(liczba**2))
| 18.583333 | 52 | 0.609865 | 33 | 223 | 4.121212 | 0.606061 | 0.147059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017544 | 0.233184 | 223 | 11 | 53 | 20.272727 | 0.777778 | 0.089686 | 0 | 0 | 0 | 0 | 0.278947 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33a85b389cf87637147eaf9b17700c63e40bbe3a | 271 | py | Python | IEV.py | cptchloroplast/rosalind | 7ed0f7005c124a498c877ce5e1141e9fbb04bf7b | [
"MIT"
] | null | null | null | IEV.py | cptchloroplast/rosalind | 7ed0f7005c124a498c877ce5e1141e9fbb04bf7b | [
"MIT"
] | null | null | null | IEV.py | cptchloroplast/rosalind | 7ed0f7005c124a498c877ce5e1141e9fbb04bf7b | [
"MIT"
] | null | null | null | # http://rosalind.info/problems/iev/
from sys import argv
f = open(argv[1], 'r')
a = f.readline().strip().split(' ')
f.close()
a = map(int, a)
r = 2*(a[0]*1 + a[1]*1 + a[2]*1 + a[3]*.75 + a[4]*.5 + a[5]*0)
w = open('results_' + argv[1], 'w')
w.write(str(r))
w.close()
| 19.357143 | 62 | 0.535055 | 57 | 271 | 2.526316 | 0.526316 | 0.041667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.070175 | 0.158672 | 271 | 13 | 63 | 20.846154 | 0.561404 | 0.125461 | 0 | 0 | 0 | 0 | 0.047009 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33a915e7cad061d0d037885bed31be7570f8f117 | 8,371 | py | Python | utils/pitch_tools.py | ishine/DiffSinger | d5dbe05ee1c7da0878393c73129089a67d0fe935 | [
"MIT"
] | null | null | null | utils/pitch_tools.py | ishine/DiffSinger | d5dbe05ee1c7da0878393c73129089a67d0fe935 | [
"MIT"
] | null | null | null | utils/pitch_tools.py | ishine/DiffSinger | d5dbe05ee1c7da0878393c73129089a67d0fe935 | [
"MIT"
] | null | null | null | #########
# world
#########
import librosa
import parselmouth
import numpy as np
import torch
import torch.nn.functional as F
from pycwt import wavelet
from scipy.interpolate import interp1d
gamma = 0
mcepInput = 3 # 0 for dB, 3 for magnitude
alpha = 0.45
en_floor = 10 ** (-80 / 20)
FFT_SIZE = 2048
f0_bin = 256
f0_max = 1100.0
f0_min = 50.0
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
def f0_to_coarse(f0):
is_torch = isinstance(f0, torch.Tensor)
f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (f0_bin - 2) / (f0_mel_max - f0_mel_min) + 1
f0_mel[f0_mel <= 1] = 1
f0_mel[f0_mel > f0_bin - 1] = f0_bin - 1
f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int)
assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (f0_coarse.max(), f0_coarse.min())
return f0_coarse
def norm_f0(f0, uv, config):
is_torch = isinstance(f0, torch.Tensor)
if config["pitch_norm"] == "standard":
f0 = (f0 - config["f0_mean"]) / config["f0_std"]
if config["pitch_norm"] == "log":
eps = config["pitch_norm_eps"]
f0 = torch.log2(f0 + eps) if is_torch else np.log2(f0 + eps)
if uv is not None and config["use_uv"]:
f0[uv > 0] = 0
return f0
def norm_interp_f0(f0, config):
# is_torch = isinstance(f0, torch.Tensor)
# if is_torch:
# device = f0.device
# f0 = f0.data.cpu().numpy()
uv = f0 == 0
f0 = norm_f0(f0, uv, config)
if sum(uv) == len(f0):
f0[uv] = 0
elif sum(uv) > 0:
f0[uv] = np.interp(np.where(uv)[0], np.where(~uv)[0], f0[~uv])
# uv = torch.FloatTensor(uv)
# f0 = torch.FloatTensor(f0)
# if is_torch:
# f0 = f0.to(device)
return f0, uv
def denorm_f0(f0, uv, config, pitch_padding=None, min=None, max=None):
if config["pitch_norm"] == "standard":
f0 = f0 * config["f0_std"] + config["f0_mean"]
if config["pitch_norm"] == "log":
f0 = 2 ** f0
if min is not None:
f0 = f0.clamp(min=min)
if max is not None:
f0 = f0.clamp(max=max)
if uv is not None and config["use_uv"]:
f0[uv > 0] = 0
if pitch_padding is not None:
f0[pitch_padding] = 0
return f0
def get_pitch(wav_data, mel, config):
"""
:param wav_data: [T]
:param mel: [T, 80]
:param config:
:return:
"""
sampling_rate = config["preprocessing"]["audio"]["sampling_rate"]
hop_length = config["preprocessing"]["stft"]["hop_length"]
time_step = hop_length / sampling_rate * 1000
f0_min = 80
f0_max = 750
if hop_length == 128:
pad_size = 4
elif hop_length == 256:
pad_size = 2
else:
assert False
f0 = parselmouth.Sound(wav_data, sampling_rate).to_pitch_ac(
time_step=time_step / 1000, voicing_threshold=0.6,
pitch_floor=f0_min, pitch_ceiling=f0_max).selected_array["frequency"]
lpad = pad_size * 2
rpad = len(mel) - len(f0) - lpad
f0 = np.pad(f0, [[lpad, rpad]], mode="constant")
# mel and f0 are extracted by 2 different libraries. we should force them to have the same length.
# Attention: we find that new version of some libraries could cause ``rpad'' to be a negetive value...
# Just to be sure, we recommend users to set up the same environments as them in requirements_auto.txt (by Anaconda)
delta_l = len(mel) - len(f0)
assert np.abs(delta_l) <= 8
if delta_l > 0:
f0 = np.concatenate([f0, [f0[-1]] * delta_l], 0)
f0 = f0[:len(mel)]
pitch_coarse = f0_to_coarse(f0)
return f0, pitch_coarse
def expand_f0_ph(f0, mel2ph, config):
f0 = denorm_f0(f0, None, config)
f0 = F.pad(f0, [1, 0])
f0 = torch.gather(f0, 1, mel2ph) # [B, T_mel]
return f0
#########
# cwt
#########
def load_wav(wav_file, sr):
wav, _ = librosa.load(wav_file, sr=sr, mono=True)
return wav
def convert_continuos_f0(f0):
"""CONVERT F0 TO CONTINUOUS F0
Args:
f0 (ndarray): original f0 sequence with the shape (T)
Return:
(ndarray): continuous f0 with the shape (T)
"""
# get uv information as binary
f0 = np.copy(f0)
uv = np.float32(f0 != 0)
# get start and end of f0
if (f0 == 0).all():
print("| all of the f0 values are 0.")
return uv, f0
start_f0 = f0[f0 != 0][0]
end_f0 = f0[f0 != 0][-1]
# padding start and end of f0 sequence
start_idx = np.where(f0 == start_f0)[0][0]
end_idx = np.where(f0 == end_f0)[0][-1]
f0[:start_idx] = start_f0
f0[end_idx:] = end_f0
# get non-zero frame index
nz_frames = np.where(f0 != 0)[0]
# perform linear interpolation
f = interp1d(nz_frames, f0[nz_frames])
cont_f0 = f(np.arange(0, f0.shape[0]))
return uv, cont_f0
def get_cont_lf0(f0, frame_period=5.0):
uv, cont_f0_lpf = convert_continuos_f0(f0)
# cont_f0_lpf = low_pass_filter(cont_f0_lpf, int(1.0 / (frame_period * 0.001)), cutoff=20)
cont_lf0_lpf = np.log(cont_f0_lpf)
return uv, cont_lf0_lpf
def get_lf0_cwt(lf0):
"""
input:
signal of shape (N)
output:
Wavelet_lf0 of shape(10, N), scales of shape(10)
"""
mother = wavelet.MexicanHat()
dt = 0.005
dj = 1
s0 = dt * 2
J = 9
Wavelet_lf0, scales, _, _, _, _ = wavelet.cwt(np.squeeze(lf0), dt, dj, s0, J, mother)
# Wavelet.shape => (J + 1, len(lf0))
Wavelet_lf0 = np.real(Wavelet_lf0).T
return Wavelet_lf0, scales
def norm_scale(Wavelet_lf0):
Wavelet_lf0_norm = np.zeros((Wavelet_lf0.shape[0], Wavelet_lf0.shape[1]))
mean = Wavelet_lf0.mean(0)[None, :]
std = Wavelet_lf0.std(0)[None, :]
Wavelet_lf0_norm = (Wavelet_lf0 - mean) / std
return Wavelet_lf0_norm, mean, std
def normalize_cwt_lf0(f0, mean, std):
uv, cont_lf0_lpf = get_cont_lf0(f0)
cont_lf0_norm = (cont_lf0_lpf - mean) / std
Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_norm)
Wavelet_lf0_norm, _, _ = norm_scale(Wavelet_lf0)
return Wavelet_lf0_norm
def get_lf0_cwt_norm(f0s, mean, std):
uvs = []
cont_lf0_lpfs = []
cont_lf0_lpf_norms = []
Wavelet_lf0s = []
Wavelet_lf0s_norm = []
scaless = []
means = []
stds = []
for f0 in f0s:
uv, cont_lf0_lpf = get_cont_lf0(f0)
cont_lf0_lpf_norm = (cont_lf0_lpf - mean) / std
Wavelet_lf0, scales = get_lf0_cwt(cont_lf0_lpf_norm) # [560,10]
Wavelet_lf0_norm, mean_scale, std_scale = norm_scale(Wavelet_lf0) # [560,10],[1,10],[1,10]
Wavelet_lf0s_norm.append(Wavelet_lf0_norm)
uvs.append(uv)
cont_lf0_lpfs.append(cont_lf0_lpf)
cont_lf0_lpf_norms.append(cont_lf0_lpf_norm)
Wavelet_lf0s.append(Wavelet_lf0)
scaless.append(scales)
means.append(mean_scale)
stds.append(std_scale)
return Wavelet_lf0s_norm, scaless, means, stds
def inverse_cwt_torch(Wavelet_lf0, scales):
import torch
b = ((torch.arange(0, len(scales)).float().to(Wavelet_lf0.device)[None, None, :] + 1 + 2.5) ** (-2.5))
lf0_rec = Wavelet_lf0 * b
lf0_rec_sum = lf0_rec.sum(-1)
lf0_rec_sum = (lf0_rec_sum - lf0_rec_sum.mean(-1, keepdim=True)) / lf0_rec_sum.std(-1, keepdim=True)
return lf0_rec_sum
def inverse_cwt(Wavelet_lf0, scales):
b = ((np.arange(0, len(scales))[None, None, :] + 1 + 2.5) ** (-2.5))
lf0_rec = Wavelet_lf0 * b
lf0_rec_sum = lf0_rec.sum(-1)
lf0_rec_sum = (lf0_rec_sum - lf0_rec_sum.mean(-1, keepdims=True)) / lf0_rec_sum.std(-1, keepdims=True)
return lf0_rec_sum
def cwt2f0(cwt_spec, mean, std, cwt_scales):
assert len(mean.shape) == 1 and len(std.shape) == 1 and len(cwt_spec.shape) == 3
import torch
if isinstance(cwt_spec, torch.Tensor):
f0 = inverse_cwt_torch(cwt_spec, cwt_scales)
f0 = f0 * std[:, None] + mean[:, None]
f0 = f0.exp() # [B, T]
else:
f0 = inverse_cwt(cwt_spec, cwt_scales)
f0 = f0 * std[:, None] + mean[:, None]
f0 = np.exp(f0) # [B, T]
return f0
def cwt2f0_norm(cwt_spec, mean, std, mel2ph, config):
f0 = cwt2f0(cwt_spec, mean, std, config["cwt_scales"])
f0 = torch.cat(
[f0] + [f0[:, -1:]] * (mel2ph.shape[1] - f0.shape[1]), 1)
f0_norm = norm_f0(f0, None, config)
return f0_norm
| 29.684397 | 120 | 0.616533 | 1,351 | 8,371 | 3.595855 | 0.183568 | 0.057637 | 0.025937 | 0.014821 | 0.221284 | 0.175175 | 0.132771 | 0.127213 | 0.11198 | 0.096748 | 0 | 0.074232 | 0.242026 | 8,371 | 281 | 121 | 29.790036 | 0.691411 | 0.140485 | 0 | 0.155914 | 0 | 0 | 0.032331 | 0 | 0 | 0 | 0 | 0 | 0.021505 | 1 | 0.091398 | false | 0 | 0.048387 | 0 | 0.236559 | 0.005376 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33aae5bb3474518a6cd449ce9569ad5e5b7042ed | 5,101 | py | Python | PicoMorseKeyboard.py | batica81/PicoMorseKeyboard | ef6d3ebcd88cff99f5621a99936af4eaa52344ce | [
"Unlicense"
] | null | null | null | PicoMorseKeyboard.py | batica81/PicoMorseKeyboard | ef6d3ebcd88cff99f5621a99936af4eaa52344ce | [
"Unlicense"
] | null | null | null | PicoMorseKeyboard.py | batica81/PicoMorseKeyboard | ef6d3ebcd88cff99f5621a99936af4eaa52344ce | [
"Unlicense"
] | null | null | null | # This code is taken from many places and many authors and compiled by YU4HAK :)
# Raspberry Pi Pico is needed to use this app! Maybe I will do an Arduino version in the future.
# Basic purpose of the app is to take input from serial port and turn it into Morse code.
# Then it plays it via sound, light or by acting as a straight key and triggering an external device (radio).
# Interfacing with the external device can be done with an optocupler, transistor, relay or similar simple circuit.
# There is one memory slot that can be played with a button push, and changed through console.
# WPM and sidetone frequency can be changed as well.
# Code has many bugs, please fix them :)
# 73
import time, sys, utime # import necessary libraries,
from machine import Pin, PWM, Timer
led = Pin(25, Pin.OUT) # the LED on the Pico is pin 25
digitalOut = Pin(9, Pin.OUT) # output to trigger external device
button = Pin(10, Pin.IN, Pin.PULL_UP)
buzzer = PWM(Pin(15))
stdin_string = sys.stdin
lastInterrupt = 0
timer = Timer(-1)
memory1 = "CQ DE YU4HAK"
BlinkRate = 0.062
BuzzFrequency = 600
# functions for morse code signal durations
def dah():
led.value(1)
digitalOut.value(1)
buzzer.duty_u16(1000)
time.sleep(3 * BlinkRate)
led.value(0)
digitalOut.value(0)
buzzer.duty_u16(0)
time.sleep(BlinkRate)
def dit():
led.value(1)
digitalOut.value(1)
buzzer.duty_u16(1000)
time.sleep(BlinkRate)
led.value(0)
digitalOut.value(0)
buzzer.duty_u16(0)
time.sleep(BlinkRate)
def pause(elementcount):
time.sleep(elementcount * BlinkRate)
# morse code conversion
code = {"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..",
"0": "-----", "1": ".----", "2": "..---", "3": "...--", "4": "....-",
"5": ".....", "6": "-....", "7": "--...", "8": "---..", "9": "----.",
".": ".-.-.-",
",": "--..--",
"?": "..--..",
"/": "--..-.",
"@": ".--.-.",
" ": "|",
"-": "-....-",
"(": "-.--.",
")": "-.--.-",
"'": ".----.",
"!": "-.-.--",
"&": ".-...",
":": "---...",
";": "-.-.-.",
"=": "-...-",
"_": "..--.-",
"\"": ".-..-.",
"$": "...-..-",
"{": ".--.-.",
"}": ".--.-.",
"+": ".-.-."
}
# Function that returns morse code sentence from uppercase English sentence
def convertToMorseCode(sentence):
tempSentence = sentence.upper() # make it all caps so that the dictionary understands the character
workSentence = "" # empty sentence to add to
for j in tempSentence: # for each character in the sentence, reference code dictionary and change to morse code character
if j in code:
workSentence += code[j] + " "
return workSentence
# main function that blinks LED based on morse code sentence
def sendMessage(sendSentence):
secretSentence = convertToMorseCode(sendSentence)
print("Sending: ")
print(secretSentence)
for i in secretSentence:
if i == ".":
dit()
elif i == "-":
dah()
elif i == "|":
pause(1) # With two pauses of 3 elements, adds up to 7
else:
pause(3)
# A rough workout: seconds per dit: 60 / (50 * WPM)
def setWPM(wpm):
global BlinkRate
BlinkRate = 60 / (50 * wpm)
print(BlinkRate)
def setBuzzFrequency(freq):
buzzer.freq(freq)
def debounce(pin):
# Start or replace a timer, and trigger on_pressed.
timer.init(mode=Timer.ONE_SHOT, period=1000, callback=on_pressed)
def on_pressed(timer):
sendMessage(memory1)
# Initializing app
button.irq(trigger = Pin.IRQ_FALLING, handler = debounce)
setBuzzFrequency(600)
setWPM(25)
# Keeps reading from stdin and quits only if the word 'exit' is there
# This loop, by default does not terminate, since stdin is open
print("Enter a parameter to change it, or text to send:")
for line in stdin_string:
# Remove trailing newline characters using strip()
cleanLine = line.strip()
if '_SPEED=' == cleanLine[0:7]:
newSpeed = cleanLine.split('=')[1]
setWPM(int(newSpeed))
print('Setting speed to ' + newSpeed + ' WPM')
elif '_BUZZ=' == cleanLine[0:6]:
newBuzz = cleanLine.split('=')[1]
setBuzzFrequency(int(newBuzz))
print('Setting buzzer frequency to ' + newBuzz + ' Hz')
elif '_MEM1=' == cleanLine[0:6]:
newMem = cleanLine.split('=')[1]
memory1 = newMem
print('Setting memory 1 to "' + newMem + '"')
else:
sendMessage(cleanLine)
| 31.103659 | 127 | 0.525191 | 581 | 5,101 | 4.583477 | 0.452668 | 0.020278 | 0.019527 | 0.01427 | 0.090124 | 0.090124 | 0.090124 | 0.090124 | 0.090124 | 0.090124 | 0 | 0.025669 | 0.282102 | 5,101 | 163 | 128 | 31.294479 | 0.701529 | 0.291511 | 0 | 0.154545 | 0 | 0 | 0.137891 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081818 | false | 0 | 0.018182 | 0 | 0.109091 | 0.063636 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33b21765db87e055663ba068290a988c042d3e61 | 1,992 | py | Python | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/4_features/numtrees_12/rule_11.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/4_features/numtrees_12/rule_11.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/4_features/numtrees_12/rule_11.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | def findDecision(obj): #obj[0]: Coupon, obj[1]: Education, obj[2]: Occupation, obj[3]: Distance
# {"feature": "Education", "instances": 85, "metric_value": 0.9975, "depth": 1}
if obj[1]<=2:
# {"feature": "Distance", "instances": 70, "metric_value": 0.9787, "depth": 2}
if obj[3]<=2:
# {"feature": "Coupon", "instances": 63, "metric_value": 0.9468, "depth": 3}
if obj[0]<=3:
# {"feature": "Occupation", "instances": 39, "metric_value": 0.8582, "depth": 4}
if obj[2]>2:
return 'True'
elif obj[2]<=2:
return 'True'
else: return 'True'
elif obj[0]>3:
# {"feature": "Occupation", "instances": 24, "metric_value": 1.0, "depth": 4}
if obj[2]<=7:
return 'True'
elif obj[2]>7:
return 'False'
else: return 'False'
else: return 'True'
elif obj[3]>2:
# {"feature": "Occupation", "instances": 7, "metric_value": 0.5917, "depth": 3}
if obj[2]<=4:
return 'False'
elif obj[2]>4:
# {"feature": "Coupon", "instances": 3, "metric_value": 0.9183, "depth": 4}
if obj[0]<=1:
return 'False'
elif obj[0]>1:
return 'False'
else: return 'False'
else: return 'False'
else: return 'False'
elif obj[1]>2:
# {"feature": "Occupation", "instances": 15, "metric_value": 0.8366, "depth": 2}
if obj[2]>4:
# {"feature": "Distance", "instances": 10, "metric_value": 0.469, "depth": 3}
if obj[3]<=1:
return 'False'
elif obj[3]>1:
# {"feature": "Coupon", "instances": 4, "metric_value": 0.8113, "depth": 4}
if obj[0]>1:
return 'True'
elif obj[0]<=1:
return 'False'
else: return 'False'
else: return 'False'
elif obj[2]<=4:
# {"feature": "Coupon", "instances": 5, "metric_value": 0.971, "depth": 3}
if obj[0]>2:
# {"feature": "Distance", "instances": 3, "metric_value": 0.9183, "depth": 4}
if obj[3]<=2:
return 'False'
else: return 'False'
elif obj[0]<=2:
return 'True'
else: return 'True'
else: return 'True'
else: return 'False'
| 32.655738 | 95 | 0.568273 | 287 | 1,992 | 3.902439 | 0.146341 | 0.147321 | 0.117857 | 0.15 | 0.550893 | 0.439286 | 0.360714 | 0.232143 | 0.232143 | 0.18125 | 0 | 0.08679 | 0.213353 | 1,992 | 60 | 96 | 33.2 | 0.627952 | 0.49498 | 0 | 0.541667 | 0 | 0 | 0.111782 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020833 | false | 0 | 0 | 0 | 0.270833 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33b7faddaf9bc0cb8e5a9b63ad2f398485bee83a | 2,959 | py | Python | main.py | traian-d/compact_cellular_automata | f2d1d0aaaad357fc3516e00138c3efb36481c45f | [
"MIT"
] | 2 | 2020-02-12T14:17:57.000Z | 2020-04-29T13:35:03.000Z | main.py | traian-d/compact_cellular_automata | f2d1d0aaaad357fc3516e00138c3efb36481c45f | [
"MIT"
] | null | null | null | main.py | traian-d/compact_cellular_automata | f2d1d0aaaad357fc3516e00138c3efb36481c45f | [
"MIT"
] | null | null | null | import topologies as top
import rules as rules
import starting_patterns as sp
from Game import Game
import os
import time
import numpy as np
def run_game(topology, rule, starting_pattern, start_pos, iterations):
starting_pattern_instance = starting_pattern(topology.n_rows, topology.n_cols, start_pos)
game = Game(topology, starting_pattern_instance)
for i in range(iterations):
prev_state_seen_idx = game.apply_rule(rule)
if prev_state_seen_idx > -1:
print(len(game.states) - prev_state_seen_idx - 1)
break
return game
def print_states(game, sleep=0.02, clear=True):
states = game.states_to_string()
for state in states:
print(state)
time.sleep(sleep)
if clear:
os.system('clear')
def np_plot(game, sleep=0.02):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
states, n_rows = game.pad_states()
for i in range(len(states)):
ax.cla()
ax.imshow(np.reshape(states[i], (n_rows, game.topology.n_cols)))
ax.set_title("frame {}".format(i))
plt.pause(sleep)
def save_gif(game, file_path, dpi=80, sleep=50):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
states, n_rows = game.pad_states()
def update(i):
return ax.imshow(np.reshape(states[i], (n_rows, game.topology.n_cols)))
from matplotlib.animation import FuncAnimation
anim = FuncAnimation(fig, update, frames=np.arange(0, len(states)), interval=sleep)
anim.save(file_path, dpi=dpi, writer='imagemagick')
def binary(l):
return sum(c << i for i, c in enumerate(l))
# acorn_game = run_game(top.eight_neighbor_torus(50, 50), rules.conway_game_of_life, sp.acorn, 1261, iterations=250)
# np_plot(acorn_game, sleep=0.01)
# save_gif(acorn_game, 'acorn_game.gif', 100)
ibm_game = run_game(top.four_neighbor_torus(11, 11), rules.ibm_may2020, sp.tessalation_factory('01111001', 121), 60, iterations=250000)
# np_plot(ibm_game, sleep=0.01)/
# def sim():
# topology = top.four_neighbor_torus(11, 11)
# for i in range(10000):
# run_game(topology, rules.ibm_may2020, sp.one_dot, 60, iterations=1000000)
#
# import cProfile
# cProfile.run('sim()')
# glider_torus = run_game(top.eight_neighbor_torus(25, 25), rules.conway_game_of_life, sp.conway_glider, 2, iterations=100)
# np_plot(glider_torus)
# save_gif(glider_torus, 'glider_torus.gif', 100)
# glider_simple = run_game(top.eight_neighbor_grid(25, 25), rules.conway_game_of_life, sp.conway_glider, 2, iterations=100)
# save_gif(glider_simple, 'glider_simple.gif', 100)
# rule30 = run_game(top.single_row(200), rules.rule_30, sp.one_dot, 101, iterations=100)
# save_gif(rule30, 'rule30.gif', dpi=80, sleep=30)
# sierpinski_triangle = run_game(top.single_row(200), rules.sierpinski_triangle, sp.one_dot, 101, iterations=100)
# np_plot(sierpinski_triangle)
# save_gif(sierpinski_triangle, 'sierpinski_triangle.gif', dpi=80, sleep=30)
| 34.406977 | 135 | 0.711727 | 456 | 2,959 | 4.394737 | 0.278509 | 0.027944 | 0.02994 | 0.016467 | 0.323353 | 0.27994 | 0.192615 | 0.165669 | 0.165669 | 0.165669 | 0 | 0.05569 | 0.162555 | 2,959 | 85 | 136 | 34.811765 | 0.753027 | 0.385941 | 0 | 0.136364 | 0 | 0 | 0.017827 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.227273 | 0.045455 | 0.431818 | 0.068182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33ba3cc755c2a9206c00fdc1be1e77ea3da0a51b | 548 | py | Python | Day-1/Andelabs/data_types.py | kamaumike/bootcamp-10 | 59ef238518cdd9434afeb07eac0d586dc75821db | [
"MIT"
] | null | null | null | Day-1/Andelabs/data_types.py | kamaumike/bootcamp-10 | 59ef238518cdd9434afeb07eac0d586dc75821db | [
"MIT"
] | null | null | null | Day-1/Andelabs/data_types.py | kamaumike/bootcamp-10 | 59ef238518cdd9434afeb07eac0d586dc75821db | [
"MIT"
] | null | null | null | def data_type(data):
"""Returns a specific result based on the
data type of the argument supplied
"""
if type(data) == str:
return len(data)
elif data == None:
return 'no value'
elif data == True or data == False:
return data
elif type(data) == int:
if data < 100:
return 'less than 100'
elif data > 100:
return 'more than 100'
else:
return 'equal to 100'
elif type(data) == list:
if len(data) >= 3:
return data[2]
else:
return None
print(data_type(45))
| 20.296296 | 44 | 0.578467 | 79 | 548 | 3.987342 | 0.468354 | 0.101587 | 0.07619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.050532 | 0.313869 | 548 | 26 | 45 | 21.076923 | 0.787234 | 0.135037 | 0 | 0.1 | 0 | 0 | 0.099352 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0 | 0 | 0.45 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33be3d84bce31cd3e91a5c47d1f4596a4006fe31 | 3,850 | py | Python | main.py | R3dsc4rf/ErrorTicketTool | 79acae796b4c3411495c0a4caa218668bbb5c153 | [
"MIT"
] | null | null | null | main.py | R3dsc4rf/ErrorTicketTool | 79acae796b4c3411495c0a4caa218668bbb5c153 | [
"MIT"
] | null | null | null | main.py | R3dsc4rf/ErrorTicketTool | 79acae796b4c3411495c0a4caa218668bbb5c153 | [
"MIT"
] | null | null | null | # !/usr/bin/env python3
# for linux. exestiert -> program can be executed by terminal without "Python 3" in command
# does not exist for windows
'''
Main.py for Error TicketTool_v001
this tool aims to help Quality tester to create tickets and show some simple diagramm with often to use data
this tool also does not repair corrupted data
this tool does not limit inputs. Status = "NEW" "new" or "new Error" are all counted as seperate enteties
this tool alwas loads data from TicketData.csv first
'''
import TicketCreator as TC
from tkinter import filedialog as fd # for open file funktion
import DiagramCreator as DC
import tkinter as tk
from tkinter import ttk
import os
###########################################################################
#################### init Class/ Global Var/ Funktion ####################
###########################################################################
class main_window(tk.Frame):
def __init__(self, master=None, *args, **kwargs):
tk.Frame.__init__(self, master, *args, **kwargs)
self.dataname = "TicketData.csv"
self.cwd = os.getcwd() # cwd = current working directory
self.master.title("ErrorTicketTool v001")
self.create_Widgets()
self.pack(expand=True, fill="both")
def create_Widgets(self):
'''create Widgets - Only buttons that open the Features'''
self.mainFrame = tk.Frame(self, borderwidth=10) # , bg="black") # step 1 - everything is in mainframe
self.lTitle = tk.Label(self.mainFrame, text=f" Error TIcket Tool v003 ", padx=25, font=("Arial", 12))
self.lTitle.grid(row=0, column=0)
#Tab Creator
tabControl = ttk.Notebook(self.mainFrame)
self.tab1 = ttk.Frame(tabControl)
self.tab2 = ttk.Frame(tabControl)
tabControl.add(self.tab1, text="Ticket Creator")
tabControl.add(self.tab2, text="Diagram")
tabControl.grid(column=0, row=1)
self.ticketTab = TC.ticket_creator_window(master=self.tab1)
self.diagramTab = DC.diagram_window(master=self.tab2)
self.bot_window()
self.mainFrame.pack(expand=True, fill="both", anchor="e")
def bot_window(self):
self.botFrame = tk.Frame(self.mainFrame)
self.lDataName = tk.Label(self.botFrame, text=f"{str(self.dataname):>35}")
self.lDataName.grid(row=0, column=1, padx=2, pady=2, ipadx=10)
self.openFIleButton = tk.Button(self.botFrame, text="openFIle", command=self.open_file)
self.openFIleButton.grid(row=0, column=2, padx=2, pady=2, ipadx=10)
self.quitButton = tk.Button(self.botFrame, text="quit", command=self.quit, padx=20, pady=2)
self.quitButton.grid(column=3, row=0, sticky=tk.E)
self.botFrame.grid(column=0, row=10, sticky=tk.SE)
def open_file(self):
'''open another csv file. activated by openFIleButton'''
print("askopenfilename Started")
# self.dataname = fd.askopenfilename(initialdir="/", title="Select file", filetypes=(("CSV Files", "*.csv")))
self.dataname = fd.askopenfilename(title="Select file", filetypes=(("CSV Files", "*.csv"),))
print("Warning: " + self.dataname + " is changed!")
self.diagramTab.set_file(self.dataname)
self.ticketTab.set_file(self.dataname)
#self.open_Data_File()
#self.update_data_file()
###########################################################################
################################### Main ##################################
###########################################################################
if __name__ == '__main__':
mainWindow = main_window()
mainWindow.mainloop()
else:
print("Main of Error Ticket Tool entered")
| 41.397849 | 118 | 0.581039 | 453 | 3,850 | 4.863135 | 0.401766 | 0.03813 | 0.010894 | 0.019065 | 0.113482 | 0.05084 | 0.05084 | 0 | 0 | 0 | 0 | 0.016345 | 0.205455 | 3,850 | 92 | 119 | 41.847826 | 0.703825 | 0.234286 | 0 | 0 | 0 | 0 | 0.099833 | 0.010025 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.125 | 0 | 0.229167 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33c0385b782f367408a11e61a102f7ba38bcbea5 | 1,688 | py | Python | tests/test_serialization.py | dwhall/farc | 05856119a43288214b9bd0a2605452d1eb48b808 | [
"MIT"
] | 21 | 2018-10-21T15:54:45.000Z | 2022-03-29T19:27:44.000Z | tests/test_serialization.py | Datacraft-GaaS/farc | 05856119a43288214b9bd0a2605452d1eb48b808 | [
"MIT"
] | 9 | 2019-06-22T04:51:10.000Z | 2020-12-21T17:14:48.000Z | tests/test_serialization.py | Datacraft-GaaS/farc | 05856119a43288214b9bd0a2605452d1eb48b808 | [
"MIT"
] | 8 | 2019-06-21T16:13:32.000Z | 2022-03-29T19:34:27.000Z | #!/usr/bin/env python3
"""This unit test proves that event values are immutable.
An event with a global-scope array is declared.
A simple state machine event handler modifies the value
of the event it is given.
The state machine is called with the global-scope array
and the framework is stopped.
The test passess if the global-scope array is unchanged.
"""
import asyncio
import unittest
import farc
# arbitrary mutable value
v = ["one",2,3]
class SimpleSM(farc.Ahsm):
@farc.Hsm.state
def _initial(self, event):
return self.tran(SimpleSM.ready)
@farc.Hsm.state
def ready(self, event):
sig = event.signal
if sig == farc.Signal.ENTRY:
return self.handled(event)
elif sig == farc.Signal.APPEND:
event.value.append("four")
return self.handled(event)
elif sig == farc.Signal.EXIT:
return self.handled(event)
return self.super(self.top)
def async_test(f):
def wrapper(*args, **kwargs):
coro = asyncio.coroutine(f)
future = coro(*args, **kwargs)
loop = asyncio.get_event_loop()
loop.run_until_complete(future)
return wrapper
class TestSerialization(unittest.TestCase):
def setUp(self):
global v
# create an event with the mutable value
farc.Signal.register("APPEND")
self.event = farc.Event(farc.Signal.APPEND, v)
self.sm = SimpleSM()
self.sm.start(0)
@async_test
def test_event_value_modification(self,):
self.sm.post_fifo(self.event)
farc.Framework.stop()
self.assertEqual(v, ["one",2,3])
if __name__ == '__main__':
unittest.main()
| 23.444444 | 57 | 0.647512 | 228 | 1,688 | 4.710526 | 0.421053 | 0.046555 | 0.044693 | 0.061453 | 0.072626 | 0.072626 | 0.072626 | 0.072626 | 0 | 0 | 0 | 0.004732 | 0.248815 | 1,688 | 71 | 58 | 23.774648 | 0.842271 | 0.244076 | 0 | 0.125 | 0 | 0 | 0.018942 | 0 | 0 | 0 | 0 | 0 | 0.025 | 1 | 0.15 | false | 0 | 0.075 | 0.025 | 0.425 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33c1e4374c9158716c8a41a27ad3efbd56ce34b4 | 7,771 | py | Python | grafanahelper.py | Voronenko/errbot-grafana | eea280dac5f61f96b37f7b45e0413be90e23fc27 | [
"MIT"
] | 3 | 2019-11-15T20:16:52.000Z | 2020-05-04T08:14:48.000Z | grafanahelper.py | Voronenko/errbot-grafana | eea280dac5f61f96b37f7b45e0413be90e23fc27 | [
"MIT"
] | null | null | null | grafanahelper.py | Voronenko/errbot-grafana | eea280dac5f61f96b37f7b45e0413be90e23fc27 | [
"MIT"
] | 1 | 2019-03-28T17:42:07.000Z | 2019-03-28T17:42:07.000Z | import requests
import json
from jinja2 import Template
import urllib.request
import re
class GrafanaHelper(object):
def __init__(self, grafana_server_address, grafana_token):
self.grafana_token = grafana_token
self.grafana_server_address = grafana_server_address
def get_dashboards(self, tag=None):
if tag is not None:
result = self.call_grafana(
"search?type=dash-db&tag={0}".format(tag))
else:
result = self.call_grafana("search?type=dash-db")
return result
def get_dashboard_details(self, slug):
dashboard = self.call_grafana("dashboards/db/{0}".format(slug))
data = dashboard["dashboard"]
panels = []
if ("rows" not in data) or len(data["rows"]) == 0:
if len(data["panels"]) == 0:
return "Dashboard empty"
else:
data["rows"] = [{"panels": data["panels"]}]
if len(data["templating"]["list"]) > 0:
template_map = {}
for template in data["templating"]["list"]:
if "current" not in template:
continue
if "text" in template["current"]:
template_map["$" + template["name"]] = template["current"][
"text"]
else:
template_map["$" + template["name"]] = ""
panel_number = 0
for row in data["rows"]:
for panel in row["panels"]:
panel["panel_number"] = panel_number
panel_number += 1
panels.append(panel)
data["allpanels"] = panels
return data
def search_dashboards(self, query):
result = self.call_grafana(
"search?type=dash-db&query={0}".format(query))
return result
def render_raw(self, mess):
regex = "(?:!grafana) (?:render) ([A-Za-z0-9\-\:_]+)(.*)?"
matches = re.findall(regex, mess)[0]
slug = matches[0].strip()
tuning_params = matches[1].strip()
return self.render(slug, tuning_params)
def render(self, slug, tuning_params="", period_from=None, period_to=None):
timespan = {
"from": period_from or "now-6h",
"to": period_to or "now"
}
apiEndpoint = 'dashboard-solo'
variables = ''
template_params = []
visual_panel_id = False
apiPanelId = False
visual_panel_name = False
imagesize = {
"width": 1000,
"height": 500
}
variables = ""
# Check if we have any extra fields
if tuning_params and tuning_params != '':
# The order we apply non-variables in
timeFields = ['from', 'to']
for part in tuning_params.split():
name, value = part.split('=')
variables = "{0}&var-{1}".format(variables, part)
template_params.append({"name": name, "value": value})
parts = slug.split(":")
if len(parts) > 1:
slug = parts[0]
if parts[1].isdigit():
visual_panel_id = int(parts[1])
else:
visual_panel_name = parts[1].lower()
data = self.get_dashboard_details(slug)
if ("rows" not in data) or len(data["rows"]) == 0:
if len(data["panels"]) == 0:
return "Dashboard empty"
else:
data["rows"] = [{"panels": data["panels"]}]
if len(data["templating"]["list"]) > 0:
template_map = {}
for template in data["templating"]["list"]:
if "current" not in template:
continue
for _param in template_params:
if template["name"] == _param["name"]:
template_map["$" + template["name"]] = _param["value"]
else:
if len(template["current"]) > 0:
template_map[
"$" + template["name"]] = template["current"][
"text"]
panel_number = 0
for row in data["rows"]:
for panel in row["panels"]:
panel_number += 1
# Skip if visual panel ID was specified and didn't match
if visual_panel_id and visual_panel_id != panel["id"]:
continue
# Skip if API panel ID was specified and didn't match
if apiPanelId and apiPanelId != panel["id"]:
continue
if visual_panel_name and panel["title"].lower().find(
visual_panel_name) == -1:
continue
title = panel["title"]
# imageUrl = "#{grafana_host}/render/#{apiEndpoint}/db/#{slug}/
# ?panelId=#{panel.id}&width=#{imagesize.width}&height=#{imagesize.height}
# &from=#{timespan.from}&to=#{timespan.to}#{variables}"
imageUrl = "{0}/render/{1}/db/{2}/?panelId={3}&width={4}&height={5}&from={6}&to={7}{8}".format(
self.grafana_server_address,
apiEndpoint,
slug,
panel["id"],
imagesize["width"],
imagesize["height"],
timespan["from"],
timespan["to"],
variables
)
link = "{0}/dashboard/db/{1}/?panelId={2}&width={3}&height={4}&from={5}&to={6}{7}".format(
self.grafana_server_address,
slug,
panel["id"],
imagesize["width"],
imagesize["height"],
timespan["from"],
timespan["to"],
variables
)
return {
"title": title,
"imageUrl": imageUrl,
"link": link,
"template_params": template_params,
"timespan": timespan
}
return {}
def pretty_dashboards(self, response):
with open('templates/grafana_dashboards_list.md') as file_:
template = Template(file_.read())
rendered = template.render(dashboards=response)
return rendered
def call_grafana(self, url):
"""
:type url: basestring
"""
target_url = "{0}/api/{1}".format(self.grafana_server_address, url)
r = requests.get(target_url, headers=self.grafana_headers(False))
result = json.loads(r.content)
return result
def post_grafana(self, url, data):
target_url = "{0}/api/{1}".format(self.grafana_server_address, url)
r = requests.post(target_url, data=json.dumps(data),
headers=self.grafana_headers(True))
result = json.loads(r.content)
return result
def get_grafana_image(self, url):
opener = urllib.request.build_opener()
opener.addheaders = [
("Authorization", "Bearer {0}".format(self.grafana_token))
]
urllib.request.install_opener(opener)
# fd, path = tempfile.mkstemp()
path, headers = urllib.request.urlretrieve(url)
return {
"path": path,
"headers": headers
}
def grafana_headers(self, post=False):
headers = {"Accept": "application/json",
"Authorization": "Bearer {0}".format(self.grafana_token)
}
if post:
headers["Content-Type"] = "application/json"
return headers
| 36.829384 | 111 | 0.49659 | 773 | 7,771 | 4.862872 | 0.214748 | 0.032189 | 0.037244 | 0.038308 | 0.340782 | 0.308327 | 0.308327 | 0.263634 | 0.213887 | 0.195797 | 0 | 0.012167 | 0.376013 | 7,771 | 210 | 112 | 37.004762 | 0.763044 | 0.054948 | 0 | 0.369318 | 0 | 0.011364 | 0.122552 | 0.036013 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.028409 | 0 | 0.170455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33c45593e1c6d3a8ad98e019dc38be9163694fd5 | 1,392 | py | Python | inspection.py | tzechiop/Traffic-Counter-Image-Analysis | 54fbd50ed616e9e09686b8a02debe96d584486f7 | [
"MIT"
] | null | null | null | inspection.py | tzechiop/Traffic-Counter-Image-Analysis | 54fbd50ed616e9e09686b8a02debe96d584486f7 | [
"MIT"
] | null | null | null | inspection.py | tzechiop/Traffic-Counter-Image-Analysis | 54fbd50ed616e9e09686b8a02debe96d584486f7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 3 21:28:32 2016
@author: thasegawa
"""
import cv2
import numpy as np
# ============================================================================
INPUT_WIDTH = 160
INPUT_HEIGHT = 120
OUTPUT_TILE_WIDTH = 10
OUTPUT_TILE_HEIGHT = 12
TILE_COUNT = OUTPUT_TILE_WIDTH * OUTPUT_TILE_HEIGHT
# ============================================================================
def stitch_images(input_format, output_filename):
output_shape = (INPUT_HEIGHT * OUTPUT_TILE_HEIGHT
, INPUT_WIDTH * OUTPUT_TILE_WIDTH
, 3)
output = np.zeros(output_shape, np.uint8)
for i in range(TILE_COUNT):
img = cv2.imread(input_format % i)
cv2.rectangle(img, (0, 0), (INPUT_WIDTH - 1, INPUT_HEIGHT - 1), (0, 0, 255), 1)
# Draw the frame number
cv2.putText(img, str(i), (2, 10)
, cv2.FONT_HERSHEY_PLAIN, 0.7, (255, 255, 255), 1)
x = i % OUTPUT_TILE_WIDTH * INPUT_WIDTH
y = i / OUTPUT_TILE_WIDTH * INPUT_HEIGHT
output[y:y+INPUT_HEIGHT, x:x+INPUT_WIDTH,:] = img
cv2.imwrite(output_filename, output)
# ============================================================================
stitch_images("images/frame_%04d.png", "stitched_frames.png")
stitch_images("images/mask_%04d.png", "stitched_masks.png")
stitch_images("images/processed_%04d.png", "stitched_processed.png") | 30.933333 | 87 | 0.559626 | 173 | 1,392 | 4.236994 | 0.393064 | 0.109141 | 0.102319 | 0.043656 | 0.057299 | 0 | 0 | 0 | 0 | 0 | 0 | 0.052905 | 0.171695 | 1,392 | 45 | 88 | 30.933333 | 0.582827 | 0.237787 | 0 | 0 | 0 | 0 | 0.119048 | 0.064762 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.083333 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33c896e298d79bbd19efa3f04edd11b852097f58 | 4,037 | py | Python | cloudcix_metrics/metrics.py | CloudCIX/metrics_cloudcix | 4ce4cac2c90e920e5f745bbcf33d54efd34b6b67 | [
"Apache-2.0"
] | null | null | null | cloudcix_metrics/metrics.py | CloudCIX/metrics_cloudcix | 4ce4cac2c90e920e5f745bbcf33d54efd34b6b67 | [
"Apache-2.0"
] | null | null | null | cloudcix_metrics/metrics.py | CloudCIX/metrics_cloudcix | 4ce4cac2c90e920e5f745bbcf33d54efd34b6b67 | [
"Apache-2.0"
] | null | null | null | # python
import atexit
import logging
import subprocess
import urllib3
from collections import namedtuple
from datetime import datetime
from multiprocessing.dummy import Pool as ThreadPool
from typing import List, Dict, Callable, Optional, Any
# libs
import influxdb
from cloudcix.conf import settings
# Suppress InsecureRequestWarnings
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
Pool = ThreadPool(1)
def stop_pool():
Pool.close()
Pool.join()
atexit.register(stop_pool)
INFLUX_CLIENT = None
# Define a helper data type for Influx data
InfluxData = Dict[str, Any]
Metric = namedtuple('Metric', ['table', 'value', 'tags'])
def _generate_data_packet(measurement: str, fields: dict, tags: dict={}) -> List[InfluxData]:
"""
Generates a data packet for the current region with the given measure name
and whatever fields are passed to this method, and turns it into a format to be sent to influx
:param measurement: The name of the measurement to send
:param fields: Key-value pairs of data to be sent. Not indexed in Influx
:param tags: Extra meta-data to be associated with a data point. Indexed in Influx
:return: A prepared data packet in a form ready to be sent to InfluxDB
"""
extra_tags = getattr(settings, 'CLOUDCIX_INFLUX_TAGS', {})
tags.update(extra_tags)
data = [{
'measurement': measurement,
'tags': tags,
'fields': fields,
'time': datetime.utcnow(),
}]
return data
def _get_current_git_sha() -> str:
"""
Finds the current git commit sha and returns it
:return: The sha of the current commit
"""
return subprocess.check_output([
'git',
'describe',
'--always',
]).strip().decode()
def _get_influx_client() -> Optional[influxdb.InfluxDBClient]:
"""
Lazy creates a client for connecting to our InfluxDB instance
:return: An InfluxDBClient that can log metrics to our instance of Influx
"""
global INFLUX_CLIENT
if INFLUX_CLIENT is None and settings.CLOUDCIX_INFLUX_DATABASE is not None:
try:
INFLUX_CLIENT = influxdb.InfluxDBClient(
host=settings.CLOUDCIX_INFLUX_URL,
port=settings.CLOUDCIX_INFLUX_PORT,
database=settings.CLOUDCIX_INFLUX_DATABASE,
ssl=settings.CLOUDCIX_INFLUX_PORT == 443,
)
# Ensure the database exists
INFLUX_CLIENT.create_database(settings.CLOUDCIX_INFLUX_DATABASE)
except Exception:
logging.getLogger('cloudcix_metrics._get_influx_client').error(
'Error connecting to Influx.',
exc_info=True,
)
return INFLUX_CLIENT
def current_commit(commit=None):
commit = commit or _get_current_git_sha()
_post_metrics('commit', commit)
def prepare_metrics(preprocess: Callable[[Optional[Dict]], Metric], **kwargs):
"""
Places the function preprocess in the thread pool queue. Currently assumes that tags are determined in preprocess.
:param preprocess: a function that takes in kwargs and returns a named tuple with the fields table, value and tags.
table is a str, value is any (probably primitives only) and tags is a dict
"""
Pool.apply_async(_post, args=(preprocess,), kwds=kwargs)
def _post(preprocess, **kwargs):
metric = preprocess(**kwargs)
if metric is None:
return
_post_metrics(metric.table, metric.value, metric.tags)
def _post_metrics(measurement: str, value, tags: dict={}):
"""
Sends the given k-v pair (measurement->value) to influx
along with the given tags
:param measurement: the key for the significant metric field
:param value: the value for the significant metric field
:param tags: the relevant tags
"""
client = _get_influx_client()
if client is None:
return
client.write_points(
_generate_data_packet(
measurement,
{'value': value},
tags,
),
)
| 30.816794 | 119 | 0.680951 | 506 | 4,037 | 5.306324 | 0.341897 | 0.040223 | 0.057356 | 0.03352 | 0.052886 | 0.024581 | 0 | 0 | 0 | 0 | 0 | 0.002274 | 0.237553 | 4,037 | 130 | 120 | 31.053846 | 0.870045 | 0.328709 | 0 | 0.027397 | 0 | 0 | 0.0609 | 0.013576 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109589 | false | 0 | 0.136986 | 0 | 0.315068 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33cb1849ec5f3de5bd50256cf13966f6e710718b | 1,273 | py | Python | tests/unit/test_oauth_client_backend.py | przemekk1385/django-oac | 379b29921551ea1d53edb5e3fbb6fa8d3c972acb | [
"MIT"
] | null | null | null | tests/unit/test_oauth_client_backend.py | przemekk1385/django-oac | 379b29921551ea1d53edb5e3fbb6fa8d3c972acb | [
"MIT"
] | 3 | 2022-03-02T18:08:13.000Z | 2022-03-02T18:29:33.000Z | tests/unit/test_oauth_client_backend.py | przemekk1385/django_oac | 379b29921551ea1d53edb5e3fbb6fa8d3c972acb | [
"MIT"
] | null | null | null | from unittest.mock import Mock, PropertyMock
import pytest
from django.contrib.auth import get_user_model
from django_oac.backends import OAuthClientBackend
from django_oac.exceptions import NoUserError
from ..common import USER_PAYLOAD
UserModel = get_user_model()
@pytest.mark.django_db
def test_get_user_does_not_exist():
assert not OAuthClientBackend.get_user(999)
@pytest.mark.django_db
def test_get_user_succeeded():
user = UserModel.objects.create(**USER_PAYLOAD)
assert OAuthClientBackend.get_user(user.id)
@pytest.mark.django_db
def test_authenticate_succeeded(oac_valid_get_request):
user = UserModel.objects.create(**USER_PAYLOAD)
token = Mock()
type(token).user = PropertyMock(return_value=user)
token_provider = Mock()
token_provider.create.return_value = token
authenticated_user = OAuthClientBackend.authenticate(
oac_valid_get_request, token_provider=token_provider
)
assert authenticated_user.email == USER_PAYLOAD["email"]
def test_authenticate_no_user_error(oac_valid_get_request):
token_provider = Mock()
token_provider.create.side_effect = NoUserError("foo")
assert not OAuthClientBackend.authenticate(
oac_valid_get_request, token_provider=token_provider
)
| 25.46 | 60 | 0.785546 | 162 | 1,273 | 5.839506 | 0.296296 | 0.109937 | 0.046512 | 0.07611 | 0.42389 | 0.42389 | 0.224101 | 0.224101 | 0.156448 | 0.156448 | 0 | 0.002735 | 0.138256 | 1,273 | 49 | 61 | 25.979592 | 0.859617 | 0 | 0 | 0.290323 | 0 | 0 | 0.006284 | 0 | 0 | 0 | 0 | 0 | 0.129032 | 1 | 0.129032 | false | 0 | 0.193548 | 0 | 0.322581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33cf3174ad53c2b52d8f80ce94cd572841a6a3d1 | 1,520 | py | Python | actions_levels/discrete_actions.py | pedMatias/matias_hfo | 6d88e1043a1455f5c1f6cc11b9380869772f4176 | [
"MIT"
] | 1 | 2021-06-03T20:03:50.000Z | 2021-06-03T20:03:50.000Z | actions_levels/discrete_actions.py | pedMatias/matias_hfo | 6d88e1043a1455f5c1f6cc11b9380869772f4176 | [
"MIT"
] | null | null | null | actions_levels/discrete_actions.py | pedMatias/matias_hfo | 6d88e1043a1455f5c1f6cc11b9380869772f4176 | [
"MIT"
] | 1 | 2021-03-14T01:22:33.000Z | 2021-03-14T01:22:33.000Z | from hfo import MOVE_TO, DRIBBLE_TO, KICK_TO
from actions_levels import BaseActions
class DiscreteActions:
actions = ["MOVE_UP", "MOVE_DOWN", "MOVE_LEFT", "MOVE_RIGHT",
"KICK_TO_GOAL",
"DRIBBLE_UP", "DRIBBLE_DOWN", "DRIBBLE_LEFT", "DRIBBLE_RIGHT"]
def get_num_actions(self):
return len(self.actions)
def map_action_idx_to_hfo_action(self, agent_pos: tuple, action_idx: int)\
-> tuple:
action_name = self.actions[action_idx]
return self.get_action_params(agent_pos, action_name)
def map_action_to_str(self, action_idx: int) -> str:
return self.actions[action_idx]
def get_action_params(self, position: tuple, action_name: str) -> tuple:
x_pos, y_pos = position
if action_name == "KICK_TO_GOAL":
return KICK_TO, 0.9, 0, 3
else:
if "MOVE" in action_name:
action = MOVE_TO
elif "DRIBBLE" in action_name:
action = DRIBBLE_TO
else:
raise ValueError("ACTION NAME is WRONG")
if "UP" in action_name:
return action, x_pos, - 0.9
elif "DOWN" in action_name:
return action, x_pos, 0.9
elif "LEFT" in action_name:
return action, -0.8, y_pos
elif "RIGHT" in action_name:
return action, 0.8, y_pos
else:
raise ValueError("ACTION NAME is WRONG")
| 34.545455 | 78 | 0.571711 | 194 | 1,520 | 4.195876 | 0.247423 | 0.14742 | 0.088452 | 0.088452 | 0.2457 | 0.2457 | 0.2457 | 0.157248 | 0.157248 | 0.083538 | 0 | 0.012 | 0.342105 | 1,520 | 43 | 79 | 35.348837 | 0.802 | 0 | 0 | 0.142857 | 0 | 0 | 0.113158 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.057143 | 0.057143 | 0.457143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33d00a7613ecc002e69a038b2036dfaa2537b297 | 2,946 | py | Python | websocket-microphone/asr_server_microphone.py | madkote/kaldi-websocket-python | 202ae7b34ca68e04d212ffbcc7a501dcde76d481 | [
"Apache-2.0"
] | 1 | 2021-11-06T22:08:13.000Z | 2021-11-06T22:08:13.000Z | websocket-microphone/asr_server_microphone.py | madkote/kaldi-websocket-python | 202ae7b34ca68e04d212ffbcc7a501dcde76d481 | [
"Apache-2.0"
] | null | null | null | websocket-microphone/asr_server_microphone.py | madkote/kaldi-websocket-python | 202ae7b34ca68e04d212ffbcc7a501dcde76d481 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import json
import os
import sys
import asyncio
import websockets
import logging
import sounddevice as sd
import argparse
import queue
from vosk import Model, KaldiRecognizer
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
def callback(indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
loop.call_soon_threadsafe(audio_queue.put_nowait, bytes(indata))
async def client_loop(websocket, path):
clients.add(websocket)
print ("Client connected from", websocket)
await websocket.wait_closed()
clients.remove(websocket)
async def recognize_loop():
global audio_queue
model = Model(args.model)
audio_queue = asyncio.Queue()
with sd.RawInputStream(samplerate=args.samplerate, blocksize = 2000, device=args.device, dtype='int16',
channels=1, callback=callback) as device:
logging.info("Running recognition")
rec = KaldiRecognizer(model, device.samplerate)
while True:
data = await audio_queue.get()
if rec.AcceptWaveform(data):
result = rec.Result()
logging.info(result)
websockets.broadcast(clients, result)
async def main():
global args
global clients
global loop
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-l', '--list-devices', action='store_true',
help='show list of audio devices and exit')
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(description="ASR Server",
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument('-m', '--model', type=str, metavar='MODEL_PATH',
help='Path to the model', default='model')
parser.add_argument('-i', '--interface', type=str, metavar='INTERFACE',
help='Bind interface', default='0.0.0.0')
parser.add_argument('-p', '--port', type=int, metavar='PORT',
help='Port', default=2700)
parser.add_argument('-d', '--device', type=int_or_str,
help='input device (numeric ID or substring)')
parser.add_argument('-r', '--samplerate', type=int, help='sampling rate', default=16000)
args = parser.parse_args(remaining)
logging.basicConfig(level=logging.INFO)
loop = asyncio.get_event_loop()
clients = set()
logging.info("Listening on %s:%d", args.interface, args.port)
await asyncio.gather(
websockets.serve(client_loop, args.interface, args.port),
recognize_loop())
if __name__ == '__main__':
asyncio.run(main())
| 32.733333 | 107 | 0.637135 | 341 | 2,946 | 5.384164 | 0.439883 | 0.029412 | 0.055556 | 0.022876 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009914 | 0.246775 | 2,946 | 89 | 108 | 33.101124 | 0.817485 | 0.041073 | 0 | 0 | 0 | 0 | 0.112691 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028986 | false | 0 | 0.144928 | 0 | 0.202899 | 0.028986 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33d2208ec8fb2c7353028d27618bbea1b163e588 | 10,955 | py | Python | docker/utils.py | lanfanb/OpenLane | ebad315d1def25d9d253eb2ec1c56d7b4e59d7ca | [
"Apache-2.0"
] | null | null | null | docker/utils.py | lanfanb/OpenLane | ebad315d1def25d9d253eb2ec1c56d7b4e59d7ca | [
"Apache-2.0"
] | null | null | null | docker/utils.py | lanfanb/OpenLane | ebad315d1def25d9d253eb2ec1c56d7b4e59d7ca | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import sys
import json
import click
import shutil
import pathlib
import tempfile
import subprocess
import urllib.error
import urllib.parse
import urllib.request
SUPPORTED_ARCHITECTURES = {"amd64", "arm64v8", "ppc64le"}
CI_ARCHITECTURES = {"amd64", "arm64v8"}
SUPPORTED_OPERATING_SYSTEMS = {"centos-7"}
def current_docker_platform() -> str:
import platform
arch = platform.machine()
if arch in ["x86_64", "amd64"]:
return "amd64"
elif arch in ["aarch64", "arm64"]:
return "arm64v8"
elif arch in ["ppc64le"]:
return "ppc64le"
else:
print(
f"Unsupported architecture '{platform.machine()}' Falling back to x86-64 for Docker.",
file=sys.stderr,
)
return "amd64"
def test_manifest_exists(repository, tag) -> str:
url = f"https://index.docker.io/v1/repositories/{repository}/tags/{tag}"
req = urllib.request.Request(url, headers={"Accept": "application/json"})
status = None
try:
with urllib.request.urlopen(req) as res:
status = int(res.status)
except urllib.error.HTTPError as e:
status = int(e.code)
return status is not None and status >= 200 and status < 300
@click.group()
def cli():
pass
@click.command()
@click.option("-R", "--registry", default="docker.io")
@click.option("-r", "--repository", default="efabless/openlane-tools")
@click.option(
"-o",
"--os",
"operating_system",
required=True,
type=click.Choice(SUPPORTED_OPERATING_SYSTEMS),
)
@click.option(
"-m",
"--architecture",
required=True,
type=click.Choice(SUPPORTED_ARCHITECTURES),
)
@click.argument("tool")
def pull_if_doesnt_exist(registry, repository, operating_system, architecture, tool):
"""
Requires *actual* Docker. Podman won't cut it.
"""
def get_tag_for(os, arch=None):
return (
subprocess.check_output(
[
"python3",
"../dependencies/tool.py",
tool,
f"--docker-tag-for-os={os}",
]
+ ([f"--docker-arch={arch}"] if arch is not None else [])
)
.decode("utf8")
.rstrip()
)
image_tag = None
skip_manifest = None
if tool == "build-base":
image_tag = os.getenv("BUILD_BASE_TAG")
skip_manifest = True
elif tool == "run-base":
image_tag = os.getenv("RUN_BASE_TAG")
skip_manifest = True
else:
image_tag = get_tag_for(operating_system, architecture)
skip_manifest = False
image = f"{repository}:{image_tag}"
images = (
subprocess.check_output(["docker", "images", image])
.decode("utf8")
.rstrip()
.split("\n")[1:]
)
if len(images) >= 1:
print(f"[*] Found {image}.")
return
print(f"[*] {image} not found, pulling...")
if test_manifest_exists(repository, image_tag):
subprocess.call(["docker", "pull", image])
print(f"[*] Pulled {image}.")
else:
if os.getenv("BUILD_IF_CANT_PULL") != "1":
print(f"[*] {image} not found in the repository.")
exit(os.EX_UNAVAILABLE)
else:
print(f"[*] {image} not found in the repository, building...")
env = os.environ.copy()
env["BUILD_ARCH"] = architecture
subprocess.check_call(["make", f"build-{tool}"], env=env)
print(f"Built {image}.")
if os.getenv("BUILD_IF_CANT_PULL_THEN_PUSH") != "1":
return
# Not needed for buildx, but won't hurt
print(f"[*] Pushing {image} to the container repository...")
subprocess.check_call(["docker", "push", image])
print(f"[*] Pushed {image}.")
if skip_manifest:
return
manifest_tag = get_tag_for(operating_system)
manifest_name = f"{repository}:{manifest_tag}"
print(f"[*] Trying to create multi-arch manifest {manifest_name}...")
arch_images = []
for arch in CI_ARCHITECTURES:
print(f"[*] Verifying if the image for {arch} has been pushed...")
arch_image_tag = get_tag_for(operating_system, arch)
arch_image = f"{repository}:{arch_image_tag}"
if not test_manifest_exists(repository, arch_image_tag):
print(f"[*] {arch_image} not yet pushed. Aborting multi-arch manifest.")
exit(os.EX_OK)
arch_images.append(arch_image)
print("[*] All images verified, creating and pushing manifest...")
subprocess.call(["docker", "manifest", "rm", manifest_name])
subprocess.check_call(["docker", "manifest", "create", manifest_name, *arch_images])
subprocess.check_call(
[
"docker",
"manifest",
"push",
manifest_name,
]
)
print("[*] Done.")
cli.add_command(pull_if_doesnt_exist)
@click.command()
@click.option("-r", "--repository", required=True)
@click.option(
"-o",
"--os",
"operating_system",
required=True,
type=click.Choice(SUPPORTED_OPERATING_SYSTEMS),
)
@click.argument("tools", nargs=-1)
def process_dockerfile_tpl(repository, operating_system, tools):
image_tags = [
(
subprocess.check_output(
[
"python3",
"../dependencies/tool.py",
f"--docker-tag-for-os={operating_system}",
tool,
]
)
.decode("utf8")
.rstrip()
)
for tool in tools
]
image_names = [f"{repository}:{tag}" for tag in image_tags]
from_lines = [
f"FROM {name}-${{ARCH}} as container{i}" for i, name in enumerate(image_names)
]
copy_lines = [
f"COPY --from=container{i} /build /build" for i, _ in enumerate(image_names)
]
template = open("./openlane/Dockerfile.tpl").read()
parts = template.split("# <from>")
parts.insert(1, "\n".join(from_lines))
from_filled = "\n".join(parts)
parts = from_filled.split("# <copy>")
parts.insert(1, "\n".join(copy_lines))
final = "\n".join(parts)
print(final)
cli.add_command(process_dockerfile_tpl)
@click.command()
@click.option(
"--filter", default=".", help="regular expression to match submodule paths"
)
@click.argument("repository")
@click.argument("commit")
def fetch_submodules_from_tarballs(filter, repository, commit):
"""
Must be run from inside an extracted repository tarball.
Given the repository's URL and commit, which are available, a table of the
git submodules with their repositories, commits and paths is constructed and
then promptly downloaded and extracted using only the GitHub APIs (and curl),
no git involved.
This makes things much faster than having to clone an repo's entire history then
its submodule's entire history.
"""
repository_path_info: urllib.parse.SplitResult = urllib.parse.urlsplit(repository)
# 1. Get Commits Of Submodules
api_result = None
try:
api_result = subprocess.check_output(
[
"curl",
"--fail",
"-s",
"-L",
"-H",
"Accept: application/vnd.github.v3+json",
f"https://api.github.com/repos{repository_path_info.path}/git/trees/{commit}?recursive=True",
]
)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(os.EX_DATAERR)
api_result_parsed = json.loads(api_result)
api_result_tree = api_result_parsed["tree"]
submodules = [element for element in api_result_tree if element["type"] == "commit"]
shas_by_path = {submodule["path"]: submodule["sha"] for submodule in submodules}
# 2. Get Submodule Manifest
api_result = None
try:
api_result = subprocess.check_output(
[
"curl",
"--fail",
"-s",
"-L",
f"https://raw.githubusercontent.com/{repository_path_info.path}/{commit}/.gitmodules",
]
)
except Exception as e:
print(e, file=sys.stderr)
sys.exit(os.EX_DATAERR)
gitmodules = api_result.decode("utf8")
section_line_rx = re.compile(r"\[\s*submodule\s+\"([\w\-\.\/]+)\"\]")
key_value_line_rx = re.compile(r"(\w+)\s*=\s*(.+)")
submodules_by_name = {}
current = {} # First one is discarded
for line in gitmodules.split("\n"):
section_match = section_line_rx.search(line)
if section_match is not None:
name = section_match[1]
submodules_by_name[name] = {}
current = submodules_by_name[name]
kvl_match = key_value_line_rx.search(line)
if kvl_match is not None:
key, value = kvl_match[1], kvl_match[2]
current[key] = value
for name, submodule in submodules_by_name.items():
submodule["commit"] = shas_by_path.get(submodule["path"])
if submodule["url"].endswith(".git"):
submodule["url"] = submodule["url"][:-4]
# 3. Extract Submodules
temp_dir = tempfile.gettempdir()
filter_rx = re.compile(filter, flags=re.I)
for (name, values) in submodules_by_name.items():
path = values["path"]
if filter_rx.match(path) is None:
print(f"Skipping {path}...", flush=True)
continue
else:
print(f"Expanding {path}...", flush=True)
name_fs = re.sub(r"\/", "_", name)
tarball = os.path.join(temp_dir, f"{name_fs}.tar.gz")
url = values["url"]
commit = values["commit"]
url = os.path.join(url, "tarball", commit)
print(f"Downloading {url} to {path}...", file=sys.stderr)
subprocess.check_call(["curl", "-sL", "-o", tarball, url])
shutil.rmtree(path, ignore_errors=True)
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
subprocess.check_call(
["tar", "-xzf", tarball, "--strip-components=1", "-C", path]
)
cli.add_command(fetch_submodules_from_tarballs)
@click.command("current-docker-platform")
def current_docker_platform_cmd():
print(current_docker_platform(), end="")
cli.add_command(current_docker_platform_cmd)
if __name__ == "__main__":
cli()
| 29.448925 | 109 | 0.601917 | 1,317 | 10,955 | 4.854214 | 0.271071 | 0.014078 | 0.017832 | 0.013139 | 0.176443 | 0.114344 | 0.104489 | 0.072267 | 0.06163 | 0.06163 | 0 | 0.009271 | 0.261524 | 10,955 | 371 | 110 | 29.528302 | 0.780964 | 0.106162 | 0 | 0.245421 | 0 | 0.003663 | 0.209774 | 0.034877 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029304 | false | 0.003663 | 0.047619 | 0.003663 | 0.10989 | 0.076923 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33d2ba64d8c2f1ea5e7dad3f01f1cb0fa2938a91 | 32,467 | py | Python | torchrec/datasets/criteo.py | xing-liu/torchrec | 82ffde7a69fdb9c66b79a753d6f03afa5db3f73e | [
"BSD-3-Clause"
] | 814 | 2022-02-23T17:24:14.000Z | 2022-03-31T16:52:23.000Z | torchrec/datasets/criteo.py | xing-liu/torchrec | 82ffde7a69fdb9c66b79a753d6f03afa5db3f73e | [
"BSD-3-Clause"
] | 89 | 2022-02-23T17:29:56.000Z | 2022-03-31T23:44:13.000Z | torchrec/datasets/criteo.py | xing-liu/torchrec | 82ffde7a69fdb9c66b79a753d6f03afa5db3f73e | [
"BSD-3-Clause"
] | 68 | 2022-02-23T17:42:17.000Z | 2022-03-28T06:39:55.000Z | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import time
from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple, Union
import numpy as np
import torch
import torch.utils.data.datapipes as dp
from iopath.common.file_io import PathManager, PathManagerFactory
from pyre_extensions import none_throws
from torch.utils.data import IterableDataset, IterDataPipe
from torchrec.datasets.utils import (
Batch,
LoadFiles,
PATH_MANAGER_KEY,
ReadLinesFromCSV,
safe_cast,
)
from torchrec.sparse.jagged_tensor import KeyedJaggedTensor
FREQUENCY_THRESHOLD = 3
INT_FEATURE_COUNT = 13
CAT_FEATURE_COUNT = 26
DAYS = 24
DEFAULT_LABEL_NAME = "label"
DEFAULT_INT_NAMES: List[str] = [f"int_{idx}" for idx in range(INT_FEATURE_COUNT)]
DEFAULT_CAT_NAMES: List[str] = [f"cat_{idx}" for idx in range(CAT_FEATURE_COUNT)]
DEFAULT_COLUMN_NAMES: List[str] = [
DEFAULT_LABEL_NAME,
*DEFAULT_INT_NAMES,
*DEFAULT_CAT_NAMES,
]
TOTAL_TRAINING_SAMPLES = 4195197692 # Number of rows across days 0-22 (day 23 is used for validation and testing)
COLUMN_TYPE_CASTERS: List[Callable[[Union[int, str]], Union[int, str]]] = [
lambda val: safe_cast(val, int, 0),
*(lambda val: safe_cast(val, int, 0) for _ in range(INT_FEATURE_COUNT)),
*(lambda val: safe_cast(val, str, "") for _ in range(CAT_FEATURE_COUNT)),
]
def _default_row_mapper(example: List[str]) -> Dict[str, Union[int, str]]:
column_names = reversed(DEFAULT_COLUMN_NAMES)
column_type_casters = reversed(COLUMN_TYPE_CASTERS)
return {
next(column_names): next(column_type_casters)(val) for val in reversed(example)
}
class CriteoIterDataPipe(IterDataPipe):
"""
IterDataPipe that can be used to stream either the Criteo 1TB Click Logs Dataset
(https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/) or the
Kaggle/Criteo Display Advertising Dataset
(https://www.kaggle.com/c/criteo-display-ad-challenge/) from the source TSV
files.
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example::
datapipe = CriteoIterDataPipe(
("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
)
datapipe = dp.iter.Batcher(datapipe, 100)
datapipe = dp.iter.Collator(datapipe)
batch = next(iter(datapipe))
"""
def __init__(
self,
paths: Iterable[str],
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> None:
self.paths = paths
self.row_mapper = row_mapper
self.open_kw: Any = open_kw # pyre-ignore[4]
# pyre-ignore[3]
def __iter__(self) -> Iterator[Any]:
worker_info = torch.utils.data.get_worker_info()
paths = self.paths
if worker_info is not None:
paths = (
path
for (idx, path) in enumerate(paths)
if idx % worker_info.num_workers == worker_info.id
)
datapipe = LoadFiles(paths, mode="r", **self.open_kw)
datapipe = ReadLinesFromCSV(datapipe, delimiter="\t")
if self.row_mapper:
datapipe = dp.iter.Mapper(datapipe, self.row_mapper)
yield from datapipe
def criteo_terabyte(
paths: Iterable[str],
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
"""`Criteo 1TB Click Logs <https://ailab.criteo.com/download-criteo-1tb-click-logs-dataset/>`_ Dataset
Args:
paths (Iterable[str]): local paths to TSV files that constitute the Criteo 1TB
dataset.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each
split TSV line.
open_kw: options to pass to underlying invocation of
iopath.common.file_io.PathManager.open.
Example::
datapipe = criteo_terabyte(
("/home/datasets/criteo/day_0.tsv", "/home/datasets/criteo/day_1.tsv")
)
datapipe = dp.iter.Batcher(datapipe, 100)
datapipe = dp.iter.Collator(datapipe)
batch = next(iter(datapipe))
"""
return CriteoIterDataPipe(paths, row_mapper=row_mapper, **open_kw)
def criteo_kaggle(
path: str,
*,
# pyre-ignore[2]
row_mapper: Optional[Callable[[List[str]], Any]] = _default_row_mapper,
# pyre-ignore[2]
**open_kw,
) -> IterDataPipe:
"""`Kaggle/Criteo Display Advertising <https://www.kaggle.com/c/criteo-display-ad-challenge/>`_ Dataset
Args:
root (str): local path to train or test dataset file.
row_mapper (Optional[Callable[[List[str]], Any]]): function to apply to each split TSV line.
open_kw: options to pass to underlying invocation of iopath.common.file_io.PathManager.open.
Example::
train_datapipe = criteo_kaggle(
"/home/datasets/criteo_kaggle/train.txt",
)
example = next(iter(train_datapipe))
test_datapipe = criteo_kaggle(
"/home/datasets/criteo_kaggle/test.txt",
)
example = next(iter(test_datapipe))
"""
return CriteoIterDataPipe((path,), row_mapper=row_mapper, **open_kw)
class BinaryCriteoUtils:
"""
Utility functions used to preprocess, save, load, partition, etc. the Criteo
dataset in a binary (numpy) format.
"""
@staticmethod
def tsv_to_npys(
in_file: str,
out_dense_file: str,
out_sparse_file: str,
out_labels_file: str,
path_manager_key: str = PATH_MANAGER_KEY,
) -> None:
"""
Convert one Criteo tsv file to three npy files: one for dense (np.float32), one
for sparse (np.int32), and one for labels (np.int32).
Args:
in_file (str): Input tsv file path.
out_dense_file (str): Output dense npy file path.
out_sparse_file (str): Output sparse npy file path.
out_labels_file (str): Output labels npy file path.
path_manager_key (str): Path manager key used to load from different
filesystems.
Returns:
None.
"""
def row_mapper(row: List[str]) -> Tuple[List[int], List[int], int]:
label = safe_cast(row[0], int, 0)
dense = [safe_cast(row[i], int, 0) for i in range(1, 1 + INT_FEATURE_COUNT)]
sparse = [
int(safe_cast(row[i], str, "0") or "0", 16)
for i in range(
1 + INT_FEATURE_COUNT, 1 + INT_FEATURE_COUNT + CAT_FEATURE_COUNT
)
]
return dense, sparse, label # pyre-ignore[7]
dense, sparse, labels = [], [], []
for (row_dense, row_sparse, row_label) in CriteoIterDataPipe(
[in_file], row_mapper=row_mapper
):
dense.append(row_dense)
sparse.append(row_sparse)
labels.append(row_label)
# PyTorch tensors can't handle uint32, but we can save space by not
# using int64. Numpy will automatically handle dense values >= 2 ** 31.
dense_np = np.array(dense, dtype=np.int32)
del dense
sparse_np = np.array(sparse, dtype=np.int32)
del sparse
labels_np = np.array(labels, dtype=np.int32)
del labels
# Log is expensive to compute at runtime.
dense_np += 3
dense_np = np.log(dense_np, dtype=np.float32)
# To be consistent with dense and sparse.
labels_np = labels_np.reshape((-1, 1))
path_manager = PathManagerFactory().get(path_manager_key)
for (fname, arr) in [
(out_dense_file, dense_np),
(out_sparse_file, sparse_np),
(out_labels_file, labels_np),
]:
with path_manager.open(fname, "wb") as fout:
np.save(fout, arr)
@staticmethod
def get_shape_from_npy(
path: str, path_manager_key: str = PATH_MANAGER_KEY
) -> Tuple[int, ...]:
"""
Returns the shape of an npy file using only its header.
Args:
path (str): Input npy file path.
path_manager_key (str): Path manager key used to load from different
filesystems.
Returns:
shape (Tuple[int, ...]): Shape tuple.
"""
path_manager = PathManagerFactory().get(path_manager_key)
with path_manager.open(path, "rb") as fin:
np.lib.format.read_magic(fin)
shape, _order, _dtype = np.lib.format.read_array_header_1_0(fin)
return shape
@staticmethod
def get_file_idx_to_row_range(
lengths: List[int],
rank: int,
world_size: int,
) -> Dict[int, Tuple[int, int]]:
"""
Given a rank, world_size, and the lengths (number of rows) for a list of files,
return which files and which portions of those files (represented as row ranges
- all range indices are inclusive) should be handled by the rank. Each rank
will be assigned the same number of rows.
The ranges are determined in such a way that each rank deals with large
continuous ranges of files. This enables each rank to reduce the amount of data
it needs to read while avoiding seeks.
Args:
lengths (List[int]): A list of row counts for each file.
rank (int): rank.
world_size (int): world size.
Returns:
output (Dict[int, Tuple[int, int]]): Mapping of which files to the range in
those files to be handled by the rank. The keys of this dict are indices
of lengths.
"""
# All ..._g variables are globals indices (meaning they range from 0 to
# total_length - 1). All ..._l variables are local indices (meaning they range
# from 0 to lengths[i] - 1 for the ith file).
total_length = sum(lengths)
rows_per_rank = total_length // world_size
# Global indices that rank is responsible for. All ranges (left, right) are
# inclusive.
rank_left_g = rank * rows_per_rank
rank_right_g = (rank + 1) * rows_per_rank - 1
output = {}
# Find where range (rank_left_g, rank_right_g) intersects each file's range.
file_left_g, file_right_g = -1, -1
for idx, length in enumerate(lengths):
file_left_g = file_right_g + 1
file_right_g = file_left_g + length - 1
# If the ranges overlap.
if rank_left_g <= file_right_g and rank_right_g >= file_left_g:
overlap_left_g, overlap_right_g = max(rank_left_g, file_left_g), min(
rank_right_g, file_right_g
)
# Convert overlap in global numbers to (local) numbers specific to the
# file.
overlap_left_l = overlap_left_g - file_left_g
overlap_right_l = overlap_right_g - file_left_g
output[idx] = (overlap_left_l, overlap_right_l)
return output
@staticmethod
def load_npy_range(
fname: str,
start_row: int,
num_rows: int,
path_manager_key: str = PATH_MANAGER_KEY,
mmap_mode: bool = False,
) -> np.ndarray:
"""
Load part of an npy file.
NOTE: Assumes npy represents a numpy array of ndim 2.
Args:
fname (str): path string to npy file.
start_row (int): starting row from the npy file.
num_rows (int): number of rows to get from the npy file.
path_manager_key (str): Path manager key used to load from different
filesystems.
Returns:
output (np.ndarray): numpy array with the desired range of data from the
supplied npy file.
"""
path_manager = PathManagerFactory().get(path_manager_key)
with path_manager.open(fname, "rb") as fin:
np.lib.format.read_magic(fin)
shape, _order, dtype = np.lib.format.read_array_header_1_0(fin)
if len(shape) == 2:
total_rows, row_size = shape
else:
raise ValueError("Cannot load range for npy with ndim == 2.")
if not (0 <= start_row < total_rows):
raise ValueError(
f"start_row ({start_row}) is out of bounds. It must be between 0 "
f"and {total_rows - 1}, inclusive."
)
if not (start_row + num_rows <= total_rows):
raise ValueError(
f"num_rows ({num_rows}) exceeds number of available rows "
f"({total_rows}) for the given start_row ({start_row})."
)
if mmap_mode:
data = np.load(fname, mmap_mode="r")
data = data[start_row : start_row + num_rows]
else:
offset = start_row * row_size * dtype.itemsize
fin.seek(offset, os.SEEK_CUR)
num_entries = num_rows * row_size
data = np.fromfile(fin, dtype=dtype, count=num_entries)
return data.reshape((num_rows, row_size))
@staticmethod
def sparse_to_contiguous(
in_files: List[str],
output_dir: str,
frequency_threshold: int = FREQUENCY_THRESHOLD,
columns: int = CAT_FEATURE_COUNT,
path_manager_key: str = PATH_MANAGER_KEY,
output_file_suffix: str = "_contig_freq.npy",
) -> None:
"""
Convert all sparse .npy files to have contiguous integers. Store in a separate
.npy file. All input files must be processed together because columns
can have matching IDs between files. Hence, they must be transformed
together. Also, the transformed IDs are not unique between columns. IDs
that appear less than frequency_threshold amount of times will be remapped
to have a value of 1.
Example transformation, frequenchy_threshold of 2:
day_0_sparse.npy
| col_0 | col_1 |
-----------------
| abc | xyz |
| iop | xyz |
day_1_sparse.npy
| col_0 | col_1 |
-----------------
| iop | tuv |
| lkj | xyz |
day_0_sparse_contig.npy
| col_0 | col_1 |
-----------------
| 1 | 2 |
| 2 | 2 |
day_1_sparse_contig.npy
| col_0 | col_1 |
-----------------
| 2 | 1 |
| 1 | 2 |
Args:
in_files List[str]: Input directory of npy files.
out_dir (str): Output directory of processed npy files.
frequency_threshold: IDs occuring less than this frequency will be remapped to a value of 1.
path_manager_key (str): Path manager key used to load from different filesystems.
Returns:
None.
"""
# Load each .npy file of sparse features. Transformations are made along the columns.
# Thereby, transpose the input to ease operations.
# E.g. file_to_features = {"day_0_sparse": [array([[3,6,7],[7,9,3]]}
file_to_features: Dict[str, np.ndarray] = {}
for f in in_files:
name = os.path.basename(f).split(".")[0]
file_to_features[name] = np.load(f).transpose()
print(f"Successfully loaded file: {f}")
# Iterate through each column in each file and map the sparse ids to contiguous ids.
for col in range(columns):
print(f"Processing column: {col}")
# Iterate through each row in each file for the current column and determine the
# frequency of each sparse id.
sparse_to_frequency: Dict[int, int] = {}
if frequency_threshold > 1:
for f in file_to_features:
for _, sparse in enumerate(file_to_features[f][col]):
if sparse in sparse_to_frequency:
sparse_to_frequency[sparse] += 1
else:
sparse_to_frequency[sparse] = 1
# Iterate through each row in each file for the current column and remap each
# sparse id to a contiguous id. The contiguous ints start at a value of 2 so that
# infrequenct IDs (determined by the frequency_threshold) can be remapped to 1.
running_sum = 2
sparse_to_contiguous_int: Dict[int, int] = {}
for f in file_to_features:
print(f"Processing file: {f}")
for i, sparse in enumerate(file_to_features[f][col]):
if sparse not in sparse_to_contiguous_int:
# If the ID appears less than frequency_threshold amount of times
# remap the value to 1.
if (
frequency_threshold > 1
and sparse_to_frequency[sparse] < frequency_threshold
):
sparse_to_contiguous_int[sparse] = 1
else:
sparse_to_contiguous_int[sparse] = running_sum
running_sum += 1
# Re-map sparse value to contiguous in place.
file_to_features[f][col][i] = sparse_to_contiguous_int[sparse]
path_manager = PathManagerFactory().get(path_manager_key)
for f, features in file_to_features.items():
output_file = os.path.join(output_dir, f + output_file_suffix)
with path_manager.open(output_file, "wb") as fout:
print(f"Writing file: {output_file}")
# Transpose back the features when saving, as they were transposed when loading.
np.save(fout, features.transpose())
@staticmethod
def shuffle(
input_dir_labels_and_dense: str,
input_dir_sparse: str,
output_dir_shuffled: str,
rows_per_day: Dict[int, int],
output_dir_full_set: Optional[str] = None,
days: int = DAYS,
int_columns: int = INT_FEATURE_COUNT,
sparse_columns: int = CAT_FEATURE_COUNT,
path_manager_key: str = PATH_MANAGER_KEY,
) -> None:
"""
Shuffle the dataset. Expects the files to be in .npy format and the data
to be split by day and by dense, sparse and label data.
Dense data must be in: day_x_dense.npy
Sparse data must be in: day_x_sparse.npy
Labels data must be in: day_x_labels.npy
The dataset will be reconstructed, shuffled and then split back into
separate dense, sparse and labels files.
Args:
input_dir_labels_and_dense (str): Input directory of labels and dense npy files.
input_dir_sparse (str): Input directory of sparse npy files.
output_dir_shuffled (str): Output directory for shuffled labels, dense and sparse npy files.
rows_per_day Dict[int, int]: Number of rows in each file.
output_dir_full_set (str): Output directory of the full dataset, if desired.
days (int): Number of day files.
int_columns (int): Number of columns with dense features.
columns (int): Total number of columns.
path_manager_key (str): Path manager key used to load from different filesystems.
"""
total_rows = sum(rows_per_day.values())
columns = int_columns + sparse_columns + 1 # add 1 for label column
full_dataset = np.zeros((total_rows, columns), dtype=np.float32)
curr_first_row = 0
curr_last_row = 0
for d in range(0, days):
curr_last_row += rows_per_day[d]
# dense
path_to_file = os.path.join(
input_dir_labels_and_dense, f"day_{d}_dense.npy"
)
data = np.load(path_to_file)
print(
f"Day {d} dense- {curr_first_row}-{curr_last_row} loaded files - {time.time()} - {path_to_file}"
)
full_dataset[curr_first_row:curr_last_row, 0:int_columns] = data
del data
# sparse
path_to_file = os.path.join(input_dir_sparse, f"day_{d}_sparse.npy")
data = np.load(path_to_file)
print(
f"Day {d} sparse- {curr_first_row}-{curr_last_row} loaded files - {time.time()} - {path_to_file}"
)
full_dataset[curr_first_row:curr_last_row, int_columns : columns - 1] = data
del data
# labels
path_to_file = os.path.join(
input_dir_labels_and_dense, f"day_{d}_labels.npy"
)
data = np.load(path_to_file)
print(
f"Day {d} labels- {curr_first_row}-{curr_last_row} loaded files - {time.time()} - {path_to_file}"
)
full_dataset[curr_first_row:curr_last_row, columns - 1 :] = data
del data
curr_first_row = curr_last_row
path_manager = PathManagerFactory().get(path_manager_key)
# Save the full dataset
if output_dir_full_set is not None:
full_output_file = os.path.join(output_dir_full_set, "full.npy")
with path_manager.open(full_output_file, "wb") as fout:
print(f"Writing full set file: {full_output_file}")
np.save(fout, full_dataset)
print("Shuffling dataset")
np.random.shuffle(full_dataset)
# Slice and save each portion into dense, sparse and labels
curr_first_row = 0
curr_last_row = 0
for d in range(0, days):
curr_last_row += rows_per_day[d]
# write dense columns
shuffled_dense_file = os.path.join(
output_dir_shuffled, f"day_{d}_dense.npy"
)
with path_manager.open(shuffled_dense_file, "wb") as fout:
print(
f"Writing rows {curr_first_row}-{curr_last_row-1} dense file: {shuffled_dense_file}"
)
np.save(fout, full_dataset[curr_first_row:curr_last_row, 0:int_columns])
# write sparse columns
shuffled_sparse_file = os.path.join(
output_dir_shuffled, f"day_{d}_sparse.npy"
)
with path_manager.open(shuffled_sparse_file, "wb") as fout:
print(
f"Writing rows {curr_first_row}-{curr_last_row-1} sparse file: {shuffled_sparse_file}"
)
np.save(
fout,
full_dataset[
curr_first_row:curr_last_row, int_columns : columns - 1
].astype(np.int32),
)
# write labels columns
shuffled_labels_file = os.path.join(
output_dir_shuffled, f"day_{d}_labels.npy"
)
with path_manager.open(shuffled_labels_file, "wb") as fout:
print(
f"Writing rows {curr_first_row}-{curr_last_row-1} labels file: {shuffled_labels_file}"
)
np.save(
fout,
full_dataset[curr_first_row:curr_last_row, columns - 1 :].astype(
np.int32
),
)
curr_first_row = curr_last_row
class InMemoryBinaryCriteoIterDataPipe(IterableDataset):
"""
Datapipe designed to operate over binary (npy) versions of Criteo datasets. Loads
the entire dataset into memory to prevent disk speed from affecting throughout. Each
rank reads only the data for the portion of the dataset it is responsible for.
The torchrec/datasets/scripts/preprocess_criteo.py script can be used to convert
the Criteo tsv files to the npy files expected by this dataset.
Args:
dense_paths (List[str]): List of path strings to dense npy files.
sparse_paths (List[str]): List of path strings to sparse npy files.
labels_paths (List[str]): List of path strings to labels npy files.
batch_size (int): batch size.
rank (int): rank.
world_size (int): world size.
shuffle_batches (bool): Whether to shuffle batches
hashes (Optional[int]): List of max categorical feature value for each feature.
Length of this list should be CAT_FEATURE_COUNT.
path_manager_key (str): Path manager key used to load from different
filesystems.
Example::
template = "/home/datasets/criteo/1tb_binary/day_{}_{}.npy"
datapipe = InMemoryBinaryCriteoIterDataPipe(
dense_paths=[template.format(0, "dense"), template.format(1, "dense")],
sparse_paths=[template.format(0, "sparse"), template.format(1, "sparse")],
labels_paths=[template.format(0, "labels"), template.format(1, "labels")],
batch_size=1024,
rank=torch.distributed.get_rank(),
world_size=torch.distributed.get_world_size(),
)
batch = next(iter(datapipe))
"""
def __init__(
self,
dense_paths: List[str],
sparse_paths: List[str],
labels_paths: List[str],
batch_size: int,
rank: int,
world_size: int,
shuffle_batches: bool = False,
mmap_mode: bool = False,
hashes: Optional[List[int]] = None,
path_manager_key: str = PATH_MANAGER_KEY,
) -> None:
self.dense_paths = dense_paths
self.sparse_paths = sparse_paths
self.labels_paths = labels_paths
self.batch_size = batch_size
self.rank = rank
self.world_size = world_size
self.shuffle_batches = shuffle_batches
self.mmap_mode = mmap_mode
self.hashes = hashes
self.path_manager_key = path_manager_key
self.path_manager: PathManager = PathManagerFactory().get(path_manager_key)
self._load_data_for_rank()
self.num_rows_per_file: List[int] = [a.shape[0] for a in self.dense_arrs]
self.num_batches: int = sum(self.num_rows_per_file) // batch_size
# These values are the same for the KeyedJaggedTensors in all batches, so they
# are computed once here. This avoids extra work from the KeyedJaggedTensor sync
# functions.
self._num_ids_in_batch: int = CAT_FEATURE_COUNT * batch_size
self.keys: List[str] = DEFAULT_CAT_NAMES
self.lengths: torch.Tensor = torch.ones(
(self._num_ids_in_batch,), dtype=torch.int32
)
self.offsets: torch.Tensor = torch.arange(
0, self._num_ids_in_batch + 1, dtype=torch.int32
)
self.stride = batch_size
self.length_per_key: List[int] = CAT_FEATURE_COUNT * [batch_size]
self.offset_per_key: List[int] = [
batch_size * i for i in range(CAT_FEATURE_COUNT + 1)
]
self.index_per_key: Dict[str, int] = {
key: i for (i, key) in enumerate(self.keys)
}
def _load_data_for_rank(self) -> None:
file_idx_to_row_range = BinaryCriteoUtils.get_file_idx_to_row_range(
lengths=[
BinaryCriteoUtils.get_shape_from_npy(
path, path_manager_key=self.path_manager_key
)[0]
for path in self.dense_paths
],
rank=self.rank,
world_size=self.world_size,
)
self.dense_arrs, self.sparse_arrs, self.labels_arrs = [], [], []
for arrs, paths in zip(
[self.dense_arrs, self.sparse_arrs, self.labels_arrs],
[self.dense_paths, self.sparse_paths, self.labels_paths],
):
for idx, (range_left, range_right) in file_idx_to_row_range.items():
arrs.append(
BinaryCriteoUtils.load_npy_range(
paths[idx],
range_left,
range_right - range_left + 1,
path_manager_key=self.path_manager_key,
mmap_mode=self.mmap_mode,
)
)
# When mmap_mode is enabled, the hash is applied in def __iter__, which is
# where samples are batched during training.
# Otherwise, the ML dataset is preloaded, and the hash is applied here in
# the preload stage, as shown:
if not self.mmap_mode and self.hashes is not None:
hashes_np = np.array(self.hashes).reshape((1, CAT_FEATURE_COUNT))
for sparse_arr in self.sparse_arrs:
sparse_arr %= hashes_np
def _np_arrays_to_batch(
self, dense: np.ndarray, sparse: np.ndarray, labels: np.ndarray
) -> Batch:
if self.shuffle_batches:
# Shuffle all 3 in unison
shuffler = np.random.permutation(len(dense))
dense = dense[shuffler]
sparse = sparse[shuffler]
labels = labels[shuffler]
return Batch(
dense_features=torch.from_numpy(dense),
sparse_features=KeyedJaggedTensor(
keys=self.keys,
# transpose + reshape(-1) incurs an additional copy.
values=torch.from_numpy(sparse.transpose(1, 0).reshape(-1)),
lengths=self.lengths,
offsets=self.offsets,
stride=self.stride,
length_per_key=self.length_per_key,
offset_per_key=self.offset_per_key,
index_per_key=self.index_per_key,
),
labels=torch.from_numpy(labels.reshape(-1)),
)
def __iter__(self) -> Iterator[Batch]:
# Invariant: buffer never contains more than batch_size rows.
buffer: Optional[List[np.ndarray]] = None
def append_to_buffer(
dense: np.ndarray, sparse: np.ndarray, labels: np.ndarray
) -> None:
nonlocal buffer
if buffer is None:
buffer = [dense, sparse, labels]
else:
for idx, arr in enumerate([dense, sparse, labels]):
buffer[idx] = np.concatenate((buffer[idx], arr))
# Maintain a buffer that can contain up to batch_size rows. Fill buffer as
# much as possible on each iteration. Only return a new batch when batch_size
# rows are filled.
file_idx = 0
row_idx = 0
batch_idx = 0
while batch_idx < self.num_batches:
buffer_row_count = 0 if buffer is None else none_throws(buffer)[0].shape[0]
if buffer_row_count == self.batch_size:
yield self._np_arrays_to_batch(*none_throws(buffer))
batch_idx += 1
buffer = None
else:
rows_to_get = min(
self.batch_size - buffer_row_count,
self.num_rows_per_file[file_idx] - row_idx,
)
slice_ = slice(row_idx, row_idx + rows_to_get)
dense_inputs = self.dense_arrs[file_idx][slice_, :]
sparse_inputs = self.sparse_arrs[file_idx][slice_, :]
target_labels = self.labels_arrs[file_idx][slice_, :]
if self.mmap_mode and self.hashes is not None:
sparse_inputs = sparse_inputs % np.array(self.hashes).reshape(
(1, CAT_FEATURE_COUNT)
)
append_to_buffer(
dense_inputs,
sparse_inputs,
target_labels,
)
row_idx += rows_to_get
if row_idx >= self.num_rows_per_file[file_idx]:
file_idx += 1
row_idx = 0
def __len__(self) -> int:
return self.num_batches
| 39.069795 | 114 | 0.59137 | 4,111 | 32,467 | 4.444904 | 0.127706 | 0.030701 | 0.028348 | 0.013025 | 0.363378 | 0.301866 | 0.277568 | 0.232255 | 0.2073 | 0.185301 | 0 | 0.009355 | 0.321773 | 32,467 | 830 | 115 | 39.116867 | 0.820481 | 0.328118 | 0 | 0.197002 | 0 | 0.006424 | 0.053838 | 0.012802 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038544 | false | 0 | 0.023555 | 0.002141 | 0.087794 | 0.025696 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33d8f9547baee0b5df90b1229cfe43f902dcdeb7 | 1,693 | py | Python | xrpc_tests/serde/test_walk.py | andreycizov/python-xrpc | ed403ae74d5e89e0ebac68bcc58591d6b32742ff | [
"Apache-2.0"
] | null | null | null | xrpc_tests/serde/test_walk.py | andreycizov/python-xrpc | ed403ae74d5e89e0ebac68bcc58591d6b32742ff | [
"Apache-2.0"
] | null | null | null | xrpc_tests/serde/test_walk.py | andreycizov/python-xrpc | ed403ae74d5e89e0ebac68bcc58591d6b32742ff | [
"Apache-2.0"
] | null | null | null | import sys
import unittest
from typing import NamedTuple, Optional, Dict
from xrpc.const import SERVER_SERDE_INST
from xrpc.serde.abstract import SerdeSet, SerdeStepContext
from xrpc.serde.types import CallableArgsWrapper
class Simple2(NamedTuple):
y: Optional[str] = 'asd'
class Simple(NamedTuple):
x: Optional[int]
z: Simple2
class TestWalk(unittest.TestCase):
def test_empty(self):
i = SERVER_SERDE_INST
x = SerdeSet.walk(i, Simple, SerdeStepContext(mod=sys.modules[__name__]))
y = x.struct(i)
z = y.deserialize(Simple, {'x': 5, 'z': {'y': 'abc'}})
def test_caller(self):
i = SERVER_SERDE_INST
class Simpleton(NamedTuple):
x: int
def a(a: int, b: str = 'abc', *cs: Dict[str, str], g: str, **kwargs: int):
pass
class A:
def a(self, a: int, c: Simpleton, b: str = 'abc', *cs: Dict[str, str], g: str, **kwargs: int):
pass
obj = A()
wrapper = CallableArgsWrapper.from_func(a)
wrapper2 = CallableArgsWrapper.from_func_cls(obj, A.a)
x1 = SerdeSet.walk(i, wrapper, SerdeStepContext(mod=sys.modules[__name__]))
x2 = SerdeSet.walk(i, wrapper2, SerdeStepContext(mod=sys.modules[__name__]))
x = x1.merge(x2)
y = x.struct(i)
z = y.deserialize(wrapper, [[5, 'asd', {'a': 'a'}, {'b': 'c'}], {'g': 'abc', 'd': 5}])
args, kwargs = z
a(*args, **kwargs)
#
z = y.deserialize(wrapper2, [[5, {'x': 5}, 'asd', {'a': 'a'}, {'b': 'c'}], {'g': 'abc', 'd': 5}])
zb = y.serialize(wrapper2, z)
args, kwargs = z
obj.a(*args, **kwargs)
| 24.185714 | 106 | 0.56645 | 221 | 1,693 | 4.235294 | 0.285068 | 0.008547 | 0.048077 | 0.092949 | 0.30235 | 0.153846 | 0.153846 | 0.106838 | 0.106838 | 0.106838 | 0 | 0.012945 | 0.269935 | 1,693 | 69 | 107 | 24.536232 | 0.744337 | 0 | 0 | 0.2 | 0 | 0 | 0.023641 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0.05 | 0.15 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33db0d368565ea1d5ffe9547a593c86507c344b4 | 8,873 | py | Python | TrainingExtensions/torch/src/python/aimet_torch/svd/svd.py | lipovsek/aimet | 236fb02cc6c45e65c067030416c49a09ace82045 | [
"BSD-3-Clause"
] | 945 | 2020-04-30T02:23:55.000Z | 2022-03-31T08:44:32.000Z | TrainingExtensions/torch/src/python/aimet_torch/svd/svd.py | lipovsek/aimet | 236fb02cc6c45e65c067030416c49a09ace82045 | [
"BSD-3-Clause"
] | 563 | 2020-05-01T03:07:22.000Z | 2022-03-30T05:35:58.000Z | TrainingExtensions/torch/src/python/aimet_torch/svd/svd.py | lipovsek/aimet | 236fb02cc6c45e65c067030416c49a09ace82045 | [
"BSD-3-Clause"
] | 186 | 2020-04-30T00:55:26.000Z | 2022-03-30T09:54:51.000Z | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2018, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Interface of the AIMET SVD model-compression feature """
# Import AIMET specific modules
from aimet_torch.svd import svd_impl
from aimet_torch.svd.svd_intf_defs_deprecated import LayerSelectionScheme, RankSelectionScheme
class Svd:
""" Top-level SVD interface class to be invoked by AIMET users """
@staticmethod
def _check_params_and_throw(kw_args, expected_params, not_expected_params):
for param in expected_params:
if param not in kw_args:
raise ValueError("Expected param: {} is missing".format(param))
for param in not_expected_params:
if param in kw_args:
raise ValueError("Unexpected param: {} found".format(param))
@staticmethod
def _validate_layer_rank_params(model, layer_selection_scheme, rank_selection_scheme, **kwargs):
""" Validates the Layer Selection and Rank Selection parameters passed to the compress_model() function.
Args:
layer_selection_scheme (required): Enum argument. Options available: manual, top_n_layers, top_x_percent.
rank_selection_scheme (required): Enum argument. Options available: manual, auto
**kwargs (required): The Layer Selection and Rank Selection parameters.
Raises:
ValueError: When an invalid parameter is passed.
"""
# Validate the Layer Selection parameters
if layer_selection_scheme == LayerSelectionScheme.manual and rank_selection_scheme == rank_selection_scheme.auto:
Svd._check_params_and_throw(kwargs,
['layers_to_compress'],
['num_layers', 'percent_thresh'])
if layer_selection_scheme == LayerSelectionScheme.top_n_layers:
Svd._check_params_and_throw(kwargs,
['num_layers'],
['percent_thresh', 'layers_to_compress'])
num_layers = kwargs['num_layers']
# modules() always returns the model itself as the first iterable entry
num_modules_in_model = sum(1 for _ in model.modules()) - 1
if num_layers < 1 or num_layers > num_modules_in_model:
raise ValueError("KW argument num_layers: {} out-of-range".format(num_layers))
if layer_selection_scheme == LayerSelectionScheme.top_x_percent:
Svd._check_params_and_throw(kwargs,
['percent_thresh'],
['num_layers', 'layers_to_compress'])
percent = kwargs['percent_thresh']
if percent < 0 or percent > 100:
raise ValueError("KW argument percent_thresh: {} out-of-range".format(percent))
# Validate the Rank Selection parameters
if rank_selection_scheme == RankSelectionScheme.manual:
Svd._check_params_and_throw(kwargs,
['layer_rank_list'],
['error_margin', 'num_rank_indices'])
if rank_selection_scheme == RankSelectionScheme.auto:
Svd._check_params_and_throw(kwargs,
['error_margin', 'num_rank_indices'],
['layer_rank_list'])
@staticmethod
def compress_model(model, run_model, run_model_iterations, input_shape,
compression_type, cost_metric, layer_selection_scheme,
rank_selection_scheme, **kw_layer_rank_params):
"""
Runs rank selection on the model, and compresses it using the method and parameters provided
:param model: The model which needs to be compressed
:param run_model: The evaluation function that needs to be passed for one forward pass
:param run_model_iterations: The number of iterations of forward pass for the run_model
:param input_shape: Shape of the input to the model
:param compression_type: Enum argument. Options available: svd , ssvd.
:param cost_metric: Enum argument. Options available: mac, memory
:param layer_selection_scheme: Enum argument. Options available: manual, top_n_layers, top_x_percent
:param rank_selection_scheme: Enum argument. Options available: manual, auto
:param kw_layer_rank_params: Params for layer and rank selection. Params depend on modes selected
:return: compressed model and Model statistics
**Note regarding kw_layer_rank_params**:
- If the layer_selection_scheme is manual then user has to specify the list of layers by using- layers_to_compress= [list of layers],
- If the layer_selection_scheme is top_n_layers then the user has to specify the number of layers as num_layers= <number>
- If the layer_selection_scheme is top_x_percent then the user has to specify percentage threshold by using percent_thresh= <number>
- If the mode is manual then user has to specify the layers and the respective ranks by specifying a list as layer_rank = [[layer, rank]]
- If the mode is auto then user has to specify maximum rank till the optimum rank search has to happen as max_ranks_error_margin= [maximum rank, error margin]
"""
Svd._validate_layer_rank_params(model, layer_selection_scheme, rank_selection_scheme, **kw_layer_rank_params)
# Sanity check for run_model_iterations
if run_model_iterations <= 0:
raise ValueError("run_model_iterations: {} unexpected value. "
"Expect at least 1 iteration".format(run_model_iterations))
# Instantiate the SVD impl class
if rank_selection_scheme == rank_selection_scheme.auto:
svd_obj = svd_impl.SvdImpl(model, run_model, run_model_iterations, input_shape,
compression_type, cost_metric,
layer_selection_scheme,
**kw_layer_rank_params)
compressed_model, stats = svd_obj.compress_net(rank_selection_scheme=rank_selection_scheme,
**kw_layer_rank_params)
elif rank_selection_scheme == rank_selection_scheme.manual:
layers_to_compress = [layer for layer, _ in kw_layer_rank_params['layer_rank_list']]
svd_obj = svd_impl.SvdImpl(model, run_model, run_model_iterations, input_shape,
compression_type, cost_metric,
LayerSelectionScheme.manual,
layers_to_compress=layers_to_compress)
compressed_model, stats = svd_obj.compress_net(rank_selection_scheme=rank_selection_scheme,
**kw_layer_rank_params)
return compressed_model, stats
| 55.45625 | 167 | 0.648822 | 1,047 | 8,873 | 5.264565 | 0.26361 | 0.078919 | 0.058599 | 0.040639 | 0.369739 | 0.308237 | 0.252358 | 0.203919 | 0.162373 | 0.162373 | 0 | 0.002949 | 0.273865 | 8,873 | 159 | 168 | 55.805031 | 0.852553 | 0.471994 | 0 | 0.265625 | 0 | 0 | 0.104328 | 0.004784 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046875 | false | 0 | 0.03125 | 0 | 0.109375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33dd0ef7334bc05b76d6858442d6a802f26e47aa | 948 | py | Python | migrations/versions/650_change_service_id_datatype_change_orders_service_id_to_string.py | pebblecode/cirrus-marketplace-api | 64d9e3be8705a2fe64c964b16947e9877885de7b | [
"MIT"
] | null | null | null | migrations/versions/650_change_service_id_datatype_change_orders_service_id_to_string.py | pebblecode/cirrus-marketplace-api | 64d9e3be8705a2fe64c964b16947e9877885de7b | [
"MIT"
] | null | null | null | migrations/versions/650_change_service_id_datatype_change_orders_service_id_to_string.py | pebblecode/cirrus-marketplace-api | 64d9e3be8705a2fe64c964b16947e9877885de7b | [
"MIT"
] | null | null | null | """Change orders.service_id to String
Revision ID: 650_change_service_id_datatype
Revises: 640_add_orders_table
Create Date: 2016-07-01 13:15:29.629574
"""
# revision identifiers, used by Alembic.
revision = '650_change_service_id_datatype'
down_revision = '640_add_orders_table'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(u'orders_service_id_fkey', 'orders', type_='foreignkey')
op.alter_column('orders', 'service_id', type_=sa.String())
op.create_foreign_key(None, 'orders', 'services', ['service_id'], ['service_id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'orders', type_='foreignkey')
op.create_foreign_key(u'orders_service_id_fkey', 'orders', 'services', ['service_id'], ['id'])
### end Alembic commands ###
| 31.6 | 98 | 0.721519 | 128 | 948 | 5.0625 | 0.421875 | 0.125 | 0.092593 | 0.055556 | 0.339506 | 0.259259 | 0.179012 | 0.179012 | 0.179012 | 0.179012 | 0 | 0.039168 | 0.138186 | 948 | 29 | 99 | 32.689655 | 0.753978 | 0.35654 | 0 | 0 | 0 | 0 | 0.350087 | 0.12825 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.181818 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33de28b32660b01bc6a1d8881d5ea5df8352484b | 1,254 | py | Python | v_server/urls.py | binalyadav/VigilantServer | 3b0d31d760789ffddf78a8988db9b77aecaf8910 | [
"MIT"
] | null | null | null | v_server/urls.py | binalyadav/VigilantServer | 3b0d31d760789ffddf78a8988db9b77aecaf8910 | [
"MIT"
] | null | null | null | v_server/urls.py | binalyadav/VigilantServer | 3b0d31d760789ffddf78a8988db9b77aecaf8910 | [
"MIT"
] | null | null | null | from django.urls import include, path, re_path
from rest_framework import routers
from . import views
from .utils import *
router = routers.SimpleRouter()
router.register(r'users', views.UserViewSet)
router.register(r'groups', views.GroupViewSet)
router.register(r'organizations', views.OrganizationViewSet)
router.register(r'endpoints', views.EndpointViewSet)
router.register(r'logs', views.LogsViewSet)
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('setOrg/', views.OrganizationViewSet.setOrganizationUser, name='setOrgUser'),
path('getLogs/', views.LogsViewSet.getLogsByType, name='getLogsByType'),
path('userCount/', views.UserViewSet.getCount, name='userCount'),
path('organizationCount/', views.OrganizationViewSet.getCount,
name='organizationCount'),
path('endpointCount/', views.EndpointViewSet.getCount, name='endpointCount'),
path('getLogsByDateTimeRange/', views.LogsViewSet.getLogsByDateTimeRange,
name='getLogsByDateTimeRange'),
path('vserver/<path:endpoint>', makeRequest, name='makeRequest'),
path('', include(router.urls)),
path('', include('rest_framework.urls', namespace='rest_framework'))
]
| 46.444444 | 87 | 0.754386 | 131 | 1,254 | 7.19084 | 0.427481 | 0.07431 | 0.079618 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.111643 | 1,254 | 26 | 88 | 48.230769 | 0.845601 | 0.082137 | 0 | 0 | 0 | 0 | 0.233449 | 0.059233 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.173913 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33de738d5420e9582d24dcf1b949315e220cc17d | 9,241 | py | Python | Milestone1/_deepzoom_coastpilot.py | jaybo/CoastPilot | a4c2751a82769ad093d93144731f85b51843087f | [
"MIT"
] | null | null | null | Milestone1/_deepzoom_coastpilot.py | jaybo/CoastPilot | a4c2751a82769ad093d93144731f85b51843087f | [
"MIT"
] | null | null | null | Milestone1/_deepzoom_coastpilot.py | jaybo/CoastPilot | a4c2751a82769ad093d93144731f85b51843087f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
##########################################################################################
#### Import libraries
import requests
import os
from zipfile import ZipFile
import xml.etree.ElementTree as ET
import pandas as pd
import glob
import numpy as np
import re
import shutil
##### All of the functions that will be needed for the script
# Return just the text portion of the paragraph
def plain_text_paragraph(paragraph):
s1 = re.sub('<.*?>', '', paragraph)
s2 = re.sub("\t", '', s1)
s_out = re.sub("\(.*?\)", '', s2)
return(s_out)
# Return the Source IDs present in paragraphs
def return_geo_ids(paragraph):
paragraph = paragraph.lower() # some of them are capitalized
geo_ids = re.findall('<cp_geo_loc.*?</cp_geo_loc>', paragraph)
geo_ids2 = [re.findall('source_id=\".*?\"', ids) for ids in geo_ids]
geo_ids3 = [re.findall('\".*?\"', ids) for sublist in geo_ids2 for ids in sublist]
geo_ids4 = [output for sublist in geo_ids3 for output in sublist]
output_ids = [output[1:-1] for output in geo_ids4]
return(output_ids)
# Make a dataframe from each entry
def entry_to_df(paragraph):
sourceid_df = pd.DataFrame(return_geo_ids(paragraph), columns=['source_id'])
sourceid_df['paragraph'] = plain_text_paragraph(paragraph)
return(sourceid_df)
# See if the string is a digit or not
def test_num(x):
if str(x).isdigit() == True:
return(int(x))
else:
return(np.nan)
# Same, but testing to see if it's a name
def test_name(x):
if str(x).isdigit() == True:
return(np.nan)
else:
return(str(x))
# Make a list of files (There are 10 coast pilot publications)
url_list = list()
for n in range(1,11):
url_list.append('https://nauticalcharts.noaa.gov/publications/coast-pilot/files/cp' + str(n) +
'/CPB' + str(n) + '_WEB.zip')
# Make blank data frames to export the ones from the loop
txt_master_df = []
loc_master_df = []
##########################################################################################
# Big loop:
for url in url_list: # Loop through each URL
coastpilot_number = url.split("/")[6] # Just the number of the publication
print("Working on " + coastpilot_number)
# Download the URL
# Make a folder to put the unzipped data in
output_zip_path = coastpilot_number + '.zip'
output_folder = os.path.join("zip", coastpilot_number)
if(os.path.exists("zip") == False): # Make the "zip" folder if it doesn't exist
os.mkdir("zip")
if(os.path.exists(output_folder) == False): # Don't make it if it doesn't already exist
os.mkdir(output_folder)
# Download the file
#print("Requesting URL")
rq = requests.get(url, allow_redirects=True)
#print("Downloading URL")
open(output_zip_path, 'wb').write(rq.content)
# Unzip the file
#print("Unzipping")
with ZipFile(output_zip_path, 'r') as zipObj:
zipObj.extractall(output_folder)
# To save space, delete the zipped file
if os.path.exists(output_zip_path):
os.remove(output_zip_path)
# Get the names of all chapter files within this folder
f_list = sorted(glob.glob(os.path.join(output_folder, "*_C*.xml")))
#print("Parsing XML")
# MAIN LOOP:
# Loop through each chapter and pull the locations and text
for file in f_list:
chapter_short = file.split("_")[1] # Cut out the "CXX" part of the name (Chapter)
#print("working on", chapter_short)
# Open the chapter .xml
doc_xml = ET.parse(file)
root = doc_xml.getroot()
# Get some values to give some information about the other fields
chapter_title = ET.tostring(root, encoding='utf8').decode('utf8').split("chapterTitle>")[1].split("</")[0]
booktitlenum = root.attrib["Number"]
booktitle = root.attrib['Title']
bookyear = root.attrib['Year']
bookedition = root.attrib['Year']
bookchapternum = root.attrib['ChapterNo']
##### Step 1 -
# First, get the location information
# To do this, parse out the 'CP_GEO_LOC' and put them into a list
output_list = list()
for el in root.iter('CP_GEO_LOC'):
output_list.append(el.attrib)
# Since some of the attribute names are capitalized,
# we need to make them all lowercase in order to ensure compatability
output_list_c = list()
for entry in output_list:
output_list_c.append({k.lower(): v for k, v in entry.items()})
# Convert the list into a dataframe
output_df = pd.DataFrame.from_dict(output_list_c)
# Add some other fields to tell us where this came from, what edition, etc.
output_df["chapter_title"] = chapter_title
output_df["book_title"] = booktitle
output_df["book_year"] = bookyear
output_df["book_edition"] = bookedition
output_df["book_chapter_number"] = bookchapternum
# Append the df to the master
if 'output_df' in locals(): # If it exists
if output_df.shape[0] > 0: # If it has at least one row
loc_master_df.append(output_df)
##### Step 2 -
# Return the text information
# First, turn the xml into a string, and separate it into a list
all_text = ET.tostring(root, encoding='utf8').decode('utf8')
all_text = all_text.split('\n')
# Make a blank df to put the output into
source_text_df = pd.DataFrame(columns=['source_id', 'paragraph'])
for entry in all_text:
entry_df = entry_to_df(entry)
if(entry_df.shape[0] > 0):
source_text_df = source_text_df.append(entry_df, ignore_index=True)
# Append the df to the master
if 'source_text_df' in locals(): # If it exists
if source_text_df.shape[0] > 0: # If it has at least one row
txt_master_df.append(source_text_df)
# When completed, remove the unzipped folder
if(os.path.exists(output_folder) == True):
shutil.rmtree(output_folder)
# On the last one, remove the 'zip' folder
if url_list.index(url) == (len(url_list) - 1):
shutil.rmtree('zip')
##########################################################################################
# Now that this is done, combine all of the df's in the location master
loc_master_df = pd.concat(loc_master_df, ignore_index=True)
# There is an error in the table which occasionally switches the county_name and county_numeric
# To fix it, make two columns and combine into one with the numeric values
c_num1 = loc_master_df["county_name"].apply(test_num)
c_num2 = loc_master_df["county_numeric"].apply(test_num)
num_out = c_num1.fillna(c_num2)
# Do the same thing with the county_name
c_name1 = loc_master_df["county_name"].apply(test_name)
c_name2 = loc_master_df["county_numeric"].apply(test_name)
name_out = c_name1.fillna(c_name2)
# Overwrite the values in the columns
loc_master_df["county_name"] = name_out
loc_master_df["county_numeric"] = num_out
# Drop duplicate source id's
loc_master_df = loc_master_df.drop_duplicates('source_id')
# Add a 'elev_in_ft' column
loc_master_df['elev_in_ft'] = loc_master_df.elev_in_m.apply(lambda x: pd.to_numeric(x) * 3.28084)
# If desired, export the df to a file
#loc_master_df.to_csv("loc_output_all.csv", index=False)
##########################################################################################
# Also combine the text master
txt_master_df = pd.concat(txt_master_df, ignore_index=True)
# There are some instances where the source_id is repeated with different paragraphs
# This will combine them into one paragraph
txt_master_df = txt_master_df.groupby('source_id')['paragraph'].apply(' '.join).reset_index()
# If desired, export the master_df to a file
#txt_master_df.to_csv("output_all_text.csv", index=False)
##########################################################################################
# Merge the two tables based on the source_id column
df_all = pd.merge(loc_master_df, txt_master_df, on='source_id', how='left')
# Fill in NA values
df_all = df_all.fillna(" ")
# If desired, write df_all to the working directory
#df_all.to_csv("df_all.csv", index=False)
##########################################################################################
##### Separate the tables into a mapbox output and a deepzoom output
# Mapbox output is just the lat/long and source ID
mapbox_output = df_all[['source_id', 'lat_dec', 'long_dec']]
mapbox_output.to_csv('_mapbox_output.csv', index=False)
# DeepZoom output has all other information, including the text paragraph
dz_output = df_all[['source_id', 'feature_name', 'feature_class', 'lat_dec', 'long_dec',
'elev_in_m', 'elev_in_ft', 'paragraph']]
dz_output.to_csv('_deepzoom_database_output.csv', index=False)
##########################################################################################
# DO SOMETHING WITH THE DATA HERE
########################################################################################## | 38.991561 | 114 | 0.617249 | 1,301 | 9,241 | 4.191391 | 0.244427 | 0.036677 | 0.032276 | 0.018705 | 0.131487 | 0.092426 | 0.072621 | 0.010636 | 0.010636 | 0.010636 | 0 | 0.006663 | 0.204199 | 9,241 | 237 | 115 | 38.991561 | 0.734838 | 0.312412 | 0 | 0.051282 | 0 | 0 | 0.117286 | 0.010105 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042735 | false | 0 | 0.076923 | 0 | 0.119658 | 0.008547 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33e202595bcff5408c85f89714403151350da0e9 | 7,411 | py | Python | main.py | YOULOF2/Speedest | 437ec1e3ce274ab58e1f35b04c57f42f8247819b | [
"MIT"
] | null | null | null | main.py | YOULOF2/Speedest | 437ec1e3ce274ab58e1f35b04c57f42f8247819b | [
"MIT"
] | null | null | null | main.py | YOULOF2/Speedest | 437ec1e3ce274ab58e1f35b04c57f42f8247819b | [
"MIT"
] | null | null | null | from playsound import playsound
from renit import reinit_app
from shape_creators import *
from testbrain import TestBrain
from tkinter import ttk
from ttkthemes import ThemedTk
import math
REC_WIDTH = 600
REC_HEIGHT = 200
RECTANGLE_COLOUR = "#3fc1c9"
RECTANGLE_TEXT_COLOUR = "#364f6b"
BACKGROUND_COLOUR = "#f5f5f5"
RED = "#fc5185"
YELLOW = "#ffde7d"
WIDGET_WIDTH = 95
font_data = ("TkMenuFont", 20, "normal")
window = ThemedTk(theme="adapta")
window.title("Speedest")
window.config(padx=100, pady=100, background=BACKGROUND_COLOUR)
brain = TestBrain()
# ----------------------------------------------------------------------------------------------------------------------
def show_time(event):
time = str(math.ceil(float(scale_variable.get())))
time_val_label.config(text=f"{time}s")
def start_test():
playsound("sounds/select_sound.mp3", block=False)
controlls_frame.grid_remove()
test_frame.grid(row=1, column=0, pady=(20, 0))
test_entery.focus()
brain.get_scentence()
test_text_canvas.itemconfig(test_text, text=brain.scentence)
print(math.ceil(float(scale_variable.get())))
brain.difficulty_time = math.ceil(float(scale_variable.get()))
brain.start_timer()
def change_text(event):
brain.check_timer()
if event.keysym == "space":
brain.get_scentence()
test_text_canvas.itemconfig(test_text, text=brain.scentence)
if brain.is_test_complete["state"]:
window.focus_set()
test_entery.grid_remove()
show_result_btn.grid(row=2, column=0)
brain.end_timer()
test_entery_label.config(text=brain.is_test_complete["message"])
test_text_canvas.itemconfig(test_text, text=brain.is_test_complete["message"])
else:
brain.index += 1
brain.words_left -= 1
test_entery_label.config(text=f"{str(brain.words_left)} words left")
def show_result():
playsound("sounds/select_sound.mp3", block=False)
window.focus_set()
brain.check_answer(str(test_entery.get()))
test_frame.grid_remove()
test_text_canvas.grid_remove()
test_result_frame.grid(row=0, column=0)
title_canvas.itemconfig(title_text, text=brain.is_test_complete["message"])
errors_canvas.itemconfig(errors_text, text=f"{brain.user_score['errors']} wrong words.")
speed_canvas.itemconfig(speed_text, text=f"{brain.user_score['typing_speed']} words/second")
def go_home():
reinit_app(window)
# ----------------------------------------------------------------------------------------------------------------------
# Create Canvas that contains the rectabgle and the test text
test_text_canvas = tk.Canvas(window, width=REC_WIDTH, height=REC_HEIGHT, background=BACKGROUND_COLOUR,
highlightthickness=0)
rec = round_rectangle(test_text_canvas, 0, 0, REC_WIDTH, REC_HEIGHT, radius=50, fill=RECTANGLE_COLOUR,
width=test_text_canvas.winfo_width())
test_text = test_text_canvas.create_text(REC_WIDTH / 2, REC_HEIGHT / 2, fill=RECTANGLE_TEXT_COLOUR,
font=f"{'Futura'} {font_data[1]} {'bold'}",
text="Welcome to Speedy")
test_text_canvas.grid(row=0, column=0)
# ----------------------------------------------------------------------------------------------------------------------
# The controls of the start window
controlls_frame = ttk.Frame(window)
scale_variable = tk.DoubleVar()
difficulty_frame = ttk.Frame(controlls_frame)
difficulty_label = ttk.Label(difficulty_frame, text="Choose test time:", font=("Futura", 10, "normal"))
time_val_label = ttk.Label(difficulty_frame, font=("Futura", 8, "normal"))
difficulty_scale = ttk.Scale(difficulty_frame, variable=scale_variable, length=REC_WIDTH - 140, orient=tk.HORIZONTAL,
from_=10, to=180, command=show_time)
difficulty_scale.set(60)
difficulty_label.grid(row=0, column=0)
difficulty_scale.grid(row=0, column=1, padx=(10, 0))
time_val_label.grid(row=0, column=2)
difficulty_frame.grid(row=1, column=0, pady=(20, 0))
start_button = ttk.Button(controlls_frame, width=WIDGET_WIDTH, text="Start Test", command=start_test)
start_button.grid(row=2, column=0, pady=(20, 0))
controlls_frame.grid(row=1, column=0, pady=(20, 0))
# ----------------------------------------------------------------------------------------------------------------------
test_frame = ttk.Frame(window)
test_entery_label = ttk.Label(test_frame, text="Start typing the words above.", font=(font_data[0], 10, font_data[2]))
test_entery = ttk.Entry(test_frame, width=WIDGET_WIDTH)
test_entery.bind("<KeyPress>", change_text)
show_result_btn = ttk.Button(test_frame, width=WIDGET_WIDTH, text="Show results", command=show_result)
test_entery_label.grid(row=0, column=0)
test_entery.grid(row=1, column=0)
# ----------------------------------------------------------------------------------------------------------------------
REC_HEIGHT = REC_HEIGHT
REC_WIDTH = REC_WIDTH
small_rec_width = REC_WIDTH / 2
font = "Futura 20 bold"
test_result_frame = ttk.Frame(window)
# Button to home
home_btn = ttk.Button(test_result_frame, command=go_home, text="Redo Test", width=100)
# Title canvas that displays the brain.is_test_complete["message"]
title_canvas = tk.Canvas(test_result_frame, width=REC_WIDTH, height=REC_HEIGHT,
background=BACKGROUND_COLOUR,
highlightthickness=0)
title_rec = round_rectangle(title_canvas, 0, 0, REC_WIDTH, REC_HEIGHT, radius=50,
fill=RECTANGLE_COLOUR,
width=REC_WIDTH)
title_text = title_canvas.create_text(REC_WIDTH / 2, REC_HEIGHT / 2,
fill=RECTANGLE_TEXT_COLOUR,
font=font)
# Errors canvas that displays the brain.user_score["errors"]
errors_canvas = tk.Canvas(test_result_frame, width=small_rec_width, height=REC_HEIGHT,
background=BACKGROUND_COLOUR,
highlightthickness=0)
errors_rec = round_rectangle(errors_canvas, 0, 0, small_rec_width, REC_HEIGHT, radius=50,
fill=RED,
width=small_rec_width)
errors_text = errors_canvas.create_text(small_rec_width / 2, REC_HEIGHT / 2,
fill=RECTANGLE_TEXT_COLOUR,
font=font)
# Speed canvas which displayes the brain.user_score["typing_speed"]
speed_canvas = tk.Canvas(test_result_frame, width=small_rec_width, height=REC_HEIGHT,
background=BACKGROUND_COLOUR,
highlightthickness=0)
speed_rec = round_rectangle(speed_canvas, 0, 0, small_rec_width, REC_HEIGHT, radius=50,
fill=YELLOW,
width=small_rec_width)
speed_text = speed_canvas.create_text(small_rec_width / 2, REC_HEIGHT / 2,
fill=RECTANGLE_TEXT_COLOUR,
font=font)
title_canvas.grid(row=0, column=0, columnspan=2)
errors_canvas.grid(row=1, column=0)
speed_canvas.grid(row=1, column=1)
home_btn.grid(row=2, column=0, columnspan=2)
# ----------------------------------------------------------------------------------------------------------------------
window.mainloop()
| 42.83815 | 120 | 0.612603 | 901 | 7,411 | 4.772475 | 0.174251 | 0.03907 | 0.029302 | 0.022791 | 0.461628 | 0.359302 | 0.307442 | 0.253953 | 0.244419 | 0.23814 | 0 | 0.022705 | 0.191742 | 7,411 | 172 | 121 | 43.087209 | 0.695159 | 0.136419 | 0 | 0.178295 | 0 | 0 | 0.069986 | 0.02051 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03876 | false | 0 | 0.054264 | 0 | 0.093023 | 0.007752 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33e224dce84cd098e6d5bb11fe55ee39e1bc3fea | 1,687 | py | Python | pyvast/vast.py | knapperzbusch/vast | 9d2af995254519b47febe2062adbc55965055cbe | [
"BSD-3-Clause"
] | null | null | null | pyvast/vast.py | knapperzbusch/vast | 9d2af995254519b47febe2062adbc55965055cbe | [
"BSD-3-Clause"
] | 1 | 2019-11-29T12:43:41.000Z | 2019-11-29T12:43:41.000Z | pyvast/vast.py | knapperzbusch/vast | 9d2af995254519b47febe2062adbc55965055cbe | [
"BSD-3-Clause"
] | null | null | null | """python vast module
Example:
Disclaimer: `await` does not work in the python3.7 repl,
use either python3.8 or ipython.
Create a connector to a VAST server:
> from pyvast import VAST
> vast = VAST(app="/opt/tenzir/bin/vast")
Test if the connector works:
> await vast.connect()
Extract some Data:
> data = await vast.query(":addr == 192.168.1.104")
"""
import asyncio
import logging
import pyarrow
async def spawn(*args):
"""Spawns a process asynchronously."""
proc = await asyncio.create_subprocess_exec(*args, stdout=asyncio.subprocess.PIPE)
return proc
class VAST:
"""A VAST node handle"""
def __init__(self, app="vast", endpoint="localhost:42000"):
self.logger = logging.getLogger("vast")
self.app = app
self.endpoint = endpoint
self.logger.debug("connecting to vast on %s", self.endpoint)
async def query(self, expression):
"""Extracts data from VAST according to a query"""
self.logger.debug("running query %s", expression)
proc = await spawn(self.app, "export", "arrow", expression)
# This cannot be avoided, but the reader does not create a second copy,
# so we can only do better with memory mapping, i.e. Plasma.
output = (await proc.communicate())[0]
reader = pyarrow.ipc.open_stream(output)
table = reader.read_all()
return table
async def connect(self):
"""Checks if the endpoint can be connected to"""
proc = await spawn(self.app, "status")
await proc.communicate()
result = False
if proc.returncode == 0:
result = True
return result
| 30.125 | 86 | 0.637226 | 221 | 1,687 | 4.828054 | 0.524887 | 0.026242 | 0.028116 | 0.033739 | 0.039363 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01668 | 0.253705 | 1,687 | 55 | 87 | 30.672727 | 0.830818 | 0.315353 | 0 | 0 | 0 | 0 | 0.079602 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.115385 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33e4cd7219f4ca477ecd9a9016025d803e4d41b9 | 633 | py | Python | Point charges/main.py | NoahSchiro/3d-physics | bb1700f968016953b5781f3266882377f35d3ac2 | [
"MIT"
] | null | null | null | Point charges/main.py | NoahSchiro/3d-physics | bb1700f968016953b5781f3266882377f35d3ac2 | [
"MIT"
] | 4 | 2021-09-01T21:03:52.000Z | 2021-09-09T13:32:14.000Z | Point charges/main.py | NoahSchiro/3d-physics | bb1700f968016953b5781f3266882377f35d3ac2 | [
"MIT"
] | null | null | null | from electric_field import *
from physics_engine import *
import math
# Constant variables
scene.width = 1600
scene.height = 800
# Create field
my_field = electric_field()
my_charge = point_charge(0.001, -1E-9, 10, 10, 10)
while True:
# Set frame rate
rate(20)
# This allows the charge to bounce around
if my_charge.pos[0] >= 10:
my_charge.apply_force([-0.1, -0.1, -0.1])
if my_charge.pos[0] <= -10:
my_charge.apply_force([0.1, 0.1, 0.1])
# Calculates the influence of the charge on space
my_field.calculate_field([my_charge])
# Moves the charge
my_charge.update_position() | 22.607143 | 53 | 0.672986 | 102 | 633 | 4.009804 | 0.480392 | 0.136919 | 0.02934 | 0.03912 | 0.195599 | 0.195599 | 0.195599 | 0.195599 | 0.195599 | 0.195599 | 0 | 0.078313 | 0.21327 | 633 | 28 | 54 | 22.607143 | 0.742972 | 0.238547 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33e727d76b8ef633ed4385538bc9aa5adf0eb38a | 17,733 | py | Python | tests/opwen_email_client/domain/email/test_store.py | resonancellc/opwen-webapp | 20d008415727016a908921cc6bbbb55dc9dfffe1 | [
"Apache-2.0"
] | null | null | null | tests/opwen_email_client/domain/email/test_store.py | resonancellc/opwen-webapp | 20d008415727016a908921cc6bbbb55dc9dfffe1 | [
"Apache-2.0"
] | null | null | null | tests/opwen_email_client/domain/email/test_store.py | resonancellc/opwen-webapp | 20d008415727016a908921cc6bbbb55dc9dfffe1 | [
"Apache-2.0"
] | 1 | 2019-12-18T14:35:46.000Z | 2019-12-18T14:35:46.000Z | from abc import ABCMeta
from abc import abstractmethod
from unittest import TestCase
from typing import Iterable
from typing import List
from opwen_email_client.domain.email.store import EmailStore
class Base(object):
class EmailStoreTests(TestCase, metaclass=ABCMeta):
page_size = 10
@abstractmethod
def create_email_store(self, restricted=None) -> EmailStore:
raise NotImplementedError
def setUp(self):
self.email_store = self.create_email_store()
def given_emails(self, *emails: dict) -> List[dict]:
self.email_store.create(emails)
return list(emails)
def assertContainsEmail(self, expected: dict, collection: Iterable[dict]):
def cleanup(email):
email = {key: value for (key, value) in email.items() if value}
email['from'] = email.get('from', '').lower() or None
email['to'] = [_.lower() for _ in email.get('to', [])] or None
email['cc'] = [_.lower() for _ in email.get('cc', [])] or None
email['bcc'] = [_.lower() for _ in email.get('bcc', [])] or None
return email
self.assertIn(cleanup(expected), [cleanup(actual) for actual in collection])
def test_creates_email_id(self):
email = self.given_emails({'to': ['foo@bar.com']})[0]
self.assertIsNotNone(email.get('_uid'), 'email id was not set')
def test_filters_restricted_inboxes(self):
restricted1 = 'restricted@test.com'
allowed1, allowed2 = 'allowed1@bar.com', 'allowed2@baz.com'
self.email_store = self.create_email_store({restricted1: {allowed1, allowed2}})
kept = self.given_emails(
{'to': [restricted1], 'from': allowed1},
{'to': [restricted1], 'from': allowed1},
{'cc': [restricted1], 'from': allowed2},
{'bcc': [restricted1, allowed1], 'from': allowed2},
)
self.given_emails(
{'to': [restricted1], 'from': 'unknown1@baz.com'},
{'cc': [restricted1], 'from': 'unknown2@baz.com'},
{'cc': [restricted1], 'from': 'unknown2@baz.com'},
{'bcc': [restricted1], 'from': 'unknown3@baz.com'},
)
results = self.email_store.inbox(restricted1, page=1)
results = list(results)
self.assertEqual(len(results), 4)
self.assertContainsEmail(kept[0], results)
self.assertContainsEmail(kept[1], results)
self.assertContainsEmail(kept[2], results)
self.assertContainsEmail(kept[3], results)
def test_does_not_overwrite_email_id(self):
emails1 = self.given_emails(
{'to': ['foo@bar.com']},
{'to': ['foo@bar.com']},
)
emails2 = self.given_emails(
{'to': ['bar@bar.com']},
*emails1,
)
results = self.email_store.inbox('foo@bar.com', page=1)
self.assertEqual(len(list(results)), 2)
results = self.email_store.inbox('bar@bar.com', page=1)
self.assertEqual(len(list(results)), 1)
ids1 = [_['_uid'] for _ in emails1]
ids2 = [_['_uid'] for _ in emails2[1:]]
self.assertEqual(ids1, ids2)
def test_create_with_existing_attachment(self):
self.given_emails({
'to': ['foo@bar.com'], 'attachments': [
{'_uid': 'a1', 'filename': 'a1', 'content': b'a1'},
{'_uid': 'a2', 'filename': 'a2', 'content': b'a2'},
]
})
self.given_emails({
'to': ['foo@bar.com'], 'attachments': [
{'_uid': 'a1', 'filename': 'a1', 'content': b'a1'},
{'_uid': 'a3', 'filename': 'a3', 'content': b'a3'},
]
})
results = self.email_store.inbox('foo@bar.com', page=1)
self.assertEqual(len(list(results)), 2)
def test_inbox(self):
emails = self.given_emails(
{'to': ['Foo@bar.com'], 'sent_at': '2017-09-10 11:11'},
{'to': ['foo@bar.com'], 'sent_at': '2017-09-10 11:11'},
{'cc': ['foo@bar.com'], 'sent_at': '2017-09-10 11:11'},
{'bcc': ['foo@bar.com'], 'sent_at': '2017-09-10 11:11'},
{'from': 'foo@bar.com', 'sent_at': '2017-09-10 11:11'},
{'from': 'baz@bar.com', 'sent_at': '2017-09-10 11:11'},
)
results = self.email_store.inbox('foo@bar.com', page=1)
results = list(results)
self.assertEqual(len(results), 4)
self.assertContainsEmail(emails[0], results)
self.assertContainsEmail(emails[1], results)
self.assertContainsEmail(emails[2], results)
self.assertContainsEmail(emails[3], results)
def test_inbox_paginated(self):
self.given_emails(*[{
'to': ['Foo@bar.com'],
'subject': 'email{}'.format(i),
'sent_at': '2017-09-10 11:11',
} for i in range(self.page_size + 3)])
results = self.email_store.inbox('foo@bar.com', page=1)
results = list(results)
self.assertEqual(len(results), self.page_size)
results = self.email_store.inbox('foo@bar.com', page=2)
results = list(results)
self.assertEqual(len(results), 3)
def test_unread(self):
self.given_emails(
{'to': ['Foo@bar.com'], 'sent_at': '2017-09-10 11:11', 'read': False},
{'to': ['foo@bar.com'], 'sent_at': '2017-09-10 11:11', 'read': True},
{'to': ['bar@bar.com'], 'sent_at': '2017-09-10 11:11', 'read': True},
{'to': ['baz@bar.com'], 'sent_at': '2017-09-10 11:11'},
)
result = self.email_store.has_unread('foo@bar.com')
self.assertTrue(result)
result = self.email_store.has_unread('bar@bar.com')
self.assertFalse(result)
result = self.email_store.has_unread('baz@bar.com')
self.assertTrue(result)
def test_outbox(self):
emails = self.given_emails(
{'from': 'foo@bar.com'},
{'from': 'foo@bar.com', 'sent_at': '2017-09-10 11:11'},
{'from': 'foo@bar.com', 'sent_at': None},
{'from': 'Foo@bar.com', 'sent_at': None},
{'to': ['foo@bar.com']},
{'cc': ['foo@bar.com']},
{'bcc': ['foo@bar.com']},
{'from': 'baz@bar.com', 'sent_at': '2017-09-10 11:11'},
)
results = self.email_store.outbox('foo@bar.com', page=1)
results = list(results)
self.assertEqual(len(results), 3)
self.assertContainsEmail(emails[0], results)
self.assertContainsEmail(emails[2], results)
self.assertContainsEmail(emails[3], results)
def test_pending(self):
emails = self.given_emails(
{'from': 'foo@bar.com'},
{'from': 'foo@bar.com', 'sent_at': '2017-09-10 11:11'},
{'from': 'foo@bar.com', 'sent_at': None},
{'from': 'baz@bar.com'},
{'from': 'baz@bar.com', 'sent_at': '2017-09-10 11:11'},
)
results = self.email_store.pending(page=1)
results = list(results)
self.assertEqual(len(results), 3)
self.assertContainsEmail(emails[0], results)
self.assertContainsEmail(emails[2], results)
self.assertContainsEmail(emails[3], results)
def test_pending_without_pagination(self):
self.given_emails(*[{
'from': 'foo@bar.com',
'subject': 'email{}'.format(i),
'sent_at': None,
} for i in range(self.page_size + 3)])
results = self.email_store.pending(page=None)
results = list(results)
self.assertEqual(len(results), 13)
def test_num_pending(self):
count = self.email_store.num_pending()
self.assertEqual(count, 0)
self.given_emails(
{'from': 'foo@bar.com'},
{'from': 'foo@bar.com', 'sent_at': '2017-09-10 11:11'},
{'from': 'foo@bar.com', 'sent_at': None},
{'from': 'baz@bar.com'},
{'from': 'baz@bar.com', 'sent_at': '2017-09-10 11:11'},
)
count = self.email_store.num_pending()
self.assertEqual(count, 3)
def test_sent(self):
emails = self.given_emails(
{'from': 'foo@bar.com'},
{'from': 'foo@bar.com', 'sent_at': '2017-09-10 11:11'},
{'from': 'foo@bar.com', 'sent_at': None},
{'to': ['foo@bar.com']},
{'cc': ['foo@bar.com']},
{'bcc': ['foo@bar.com']},
{'from': 'baz@bar.com', 'sent_at': '2017-09-10 11:11'},
)
results = self.email_store.sent('foo@bar.com', page=1)
results = list(results)
self.assertEqual(len(results), 1)
self.assertContainsEmail(emails[1], results)
def test_search_for_sender(self):
emails = self.given_emails(
{'to': ['foo@bar.com'], 'from': 'baz@bar.com'},
{'to': ['foo@bar.com'], 'from': 'fuz@bar.com'},
{'to': ['baz@bar.com'], 'from': 'fuz@bar.com'},
)
results = self.email_store.search('foo@bar.com', page=1, query='fuz')
results = list(results)
self.assertEqual(len(results), 1)
self.assertContainsEmail(emails[1], results)
def test_search_for_body(self):
emails = self.given_emails(
{'to': ['foo@bar.com'], 'body': 'bar koala bar'},
{'to': ['foo@bar.com'], 'body': 'baz'},
{'from': 'foo@bar.com', 'body': 'koala'},
{'cc': ['foo@bar.com'], 'body': 'koala'},
{'bcc': ['foo@bar.com'], 'body': 'koala'},
{'to': ['baz@bar.com'], 'body': 'baz'},
)
results = self.email_store.search('foo@bar.com', page=1, query='koala')
results = list(results)
self.assertEqual(len(results), 4)
self.assertContainsEmail(emails[0], results)
self.assertContainsEmail(emails[2], results)
self.assertContainsEmail(emails[3], results)
self.assertContainsEmail(emails[4], results)
def test_search_for_subject(self):
emails = self.given_emails(
{'to': ['foo@bar.com'], 'subject': 'bar koala bar'},
{'to': ['foo@bar.com'], 'subject': 'baz'},
{'from': 'foo@bar.com', 'subject': 'koala'},
{'cc': ['foo@bar.com'], 'subject': 'koala'},
{'bcc': ['foo@bar.com'], 'subject': 'koala'},
{'to': ['baz@bar.com'], 'subject': 'baz'},
)
results = self.email_store.search('foo@bar.com', page=1, query='koala')
results = list(results)
self.assertEqual(len(results), 4)
self.assertContainsEmail(emails[0], results)
self.assertContainsEmail(emails[2], results)
self.assertContainsEmail(emails[3], results)
self.assertContainsEmail(emails[4], results)
def test_search_without_query(self):
self.given_emails(
{'to': ['foo@bar.com'], 'subject': 'bar foo bar'},
{'to': ['baz@bar.com'], 'subject': 'baz'},
)
results = self.email_store.search('foo@bar.com', page=1, query=None)
results = list(results)
self.assertEqual(results, [])
def test_mark_sent(self):
emails = self.given_emails(
{'to': ['foo@bar.com'], 'subject': 'foo'},
{'to': ['baz@bar.com'], 'subject': 'bar'},
)
self.email_store.mark_sent(emails)
pending = self.email_store.pending(page=1)
pending = list(pending)
self.assertEqual(pending, [])
def test_mark_read(self):
emails = self.given_emails(
{'to': ['foo@bar.com'], 'subject': 'foo'},
{'to': ['baz@bar.com'], 'subject': 'bar'},
)
self.email_store.mark_read('foo@bar.com', emails)
read_emails = self.email_store.inbox('foo@bar.com', page=1)
read_emails = list(read_emails)
unchanged_emails = self.email_store.inbox('baz@bar.com', page=1)
unchanged_emails = list(unchanged_emails)
self.assertTrue(all(email.get('read') for email in read_emails))
self.assertTrue(not any(email.get('read') for email in unchanged_emails))
def test_delete(self):
emails = self.given_emails(
{'to': ['foo@bar.com'], 'subject': 'deleted1'},
{'to': ['foo@bar.com'], 'subject': 'deleted2'},
{'to': ['foo@bar.com'], 'subject': 'not-deleted1'},
{'to': ['foo@bar.com'], 'subject': 'not-deleted2'},
{'to': ['baz@bar.com'], 'subject': 'bar'},
)
self.email_store.delete('foo@bar.com', emails[:2])
deleted_emails = self.email_store.inbox('foo@bar.com', page=1)
deleted_emails = list(deleted_emails)
unchanged_emails = self.email_store.inbox('baz@bar.com', page=1)
unchanged_emails = list(unchanged_emails)
self.assertEqual(len(deleted_emails), 2)
self.assertEqual(deleted_emails[0]['_uid'], emails[2]['_uid'])
self.assertEqual(deleted_emails[1]['_uid'], emails[3]['_uid'])
self.assertEqual(len(unchanged_emails), 1)
self.assertEqual(unchanged_emails[0]['_uid'], emails[4]['_uid'])
def test_get(self):
given = self.given_emails(
{
'to': ['foo@bar.com'], 'subject': 'foo', 'attachments':
[{'filename': 'foo.txt', 'content': b'foo.txt', 'cid': None}]
},
{'to': ['baz@bar.com'], 'subject': 'bar'},
)
actual = self.email_store.get(given[0]['_uid'])
self.assertEqual(actual, given[0])
def test_get_with_separate_attachments(self):
self.given_emails(
{'_type': 'email', '_uid': 'e1', 'to': ['foo@bar.com'], 'subject': 'foo'},
{'_type': 'email', '_uid': 'e2', 'to': ['baz@bar.com'], 'subject': 'bar'},
{'_type': 'email', '_uid': 'e3', 'to': ['buz@buz.com'], 'subject': 'buz'},
{'_type': 'email', '_uid': 'e4', 'to': ['buz@foo.com'], 'subject': 'foobuz'},
{
'_type': 'attachment', '_uid': 'a1', 'emails': ['e1', 'e4'], 'filename': 'foo.txt', 'content':
b'foo.txt', 'cid': None
},
{
'_type': 'attachment', '_uid': 'a2', 'emails': ['e3'], 'filename': 'bar.txt', 'content': b'bar.txt',
'cid': None
},
)
actual1 = self.email_store.get('e1')
actual4 = self.email_store.get('e4')
self.assertEqual(actual1.get('attachments'),
[{'_uid': 'a1', 'filename': 'foo.txt', 'content': b'foo.txt', 'cid': None}])
self.assertEqual(actual1.get('attachments'), actual4.get('attachments'))
actual3 = self.email_store.get('e3')
self.assertEqual(actual3.get('attachments'),
[{'_uid': 'a2', 'filename': 'bar.txt', 'content': b'bar.txt', 'cid': None}])
actual2 = self.email_store.get('e2')
self.assertIsNone(actual2.get('attachments'))
def test_get_without_match(self):
self.given_emails(
{'to': ['foo@bar.com'], 'subject': 'foo'},
{'to': ['baz@bar.com'], 'subject': 'bar'},
)
actual = self.email_store.get('uid-does-not-exist')
self.assertIsNone(actual)
def test_get_attachment(self):
self.given_emails(
{
'_type': 'attachment', '_uid': 'a1', 'emails': ['e1', 'e4'], 'filename': 'foo.txt', 'content':
b'foo.txt', 'cid': None
},
{
'_type': 'attachment', '_uid': 'a2', 'emails': ['e3'], 'filename': 'bar.txt', 'content': b'bar.txt',
'cid': None
},
)
actual = self.email_store.get_attachment('a1')
self.assertEqual(actual['_uid'], 'a1')
actual = self.email_store.get_attachment('a2')
self.assertEqual(actual['_uid'], 'a2')
def test_get_attachment_without_match(self):
self.given_emails(
{
'_type': 'attachment', '_uid': 'a1', 'emails': ['e1', 'e4'], 'filename': 'foo.txt', 'content':
b'foo.txt', 'cid': None
},
{
'_type': 'attachment', '_uid': 'a2', 'emails': ['e3'], 'filename': 'bar.txt', 'content': b'bar.txt',
'cid': None
},
)
actual = self.email_store.get_attachment('uid-does-not-exist')
self.assertIsNone(actual)
| 39.582589 | 120 | 0.498788 | 1,919 | 17,733 | 4.484106 | 0.081292 | 0.069727 | 0.074259 | 0.035793 | 0.703312 | 0.652063 | 0.598722 | 0.553167 | 0.539221 | 0.472865 | 0 | 0.032893 | 0.326228 | 17,733 | 447 | 121 | 39.671141 | 0.687312 | 0 | 0 | 0.415473 | 0 | 0 | 0.184515 | 0 | 0 | 0 | 0 | 0 | 0.186246 | 1 | 0.083095 | false | 0 | 0.017192 | 0 | 0.111748 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33e744f33625829e440fa00f41cd61908cd2efcb | 1,513 | py | Python | src/functions/floodControl.py | dinesh-0602/TorrentSeedr | 51bc75f34e9b07feba39af91021655914be85b6e | [
"MIT"
] | 32 | 2021-08-20T04:05:26.000Z | 2022-03-27T17:35:47.000Z | src/functions/floodControl.py | rajV2/TorrentSeedr | 51bc75f34e9b07feba39af91021655914be85b6e | [
"MIT"
] | null | null | null | src/functions/floodControl.py | rajV2/TorrentSeedr | 51bc75f34e9b07feba39af91021655914be85b6e | [
"MIT"
] | 32 | 2021-08-21T14:21:46.000Z | 2022-03-30T02:05:02.000Z | import time
from src.objs import *
#: Flood prevention
def floodControl(message, userLanguage):
userId = message.from_user.id
if userId == config['adminId']:
return True
#! If the user is not banned
if not dbSql.getSetting(userId, 'blockTill', table='flood') - int(time.time()) > 0:
lastMessage = dbSql.getSetting(userId, 'lastMessage', table='flood')
called = True if type(message) == telebot.types.CallbackQuery else False
messageDate = int(time.time()) if called else message.date
#! Spam detected
if messageDate - lastMessage < 1:
#! If the user is already warned, block for 5 minutes
if dbSql.getSetting(userId, 'warned', table='flood'):
bot.send_message(message.message.chat.id if called else message.chat.id, language['blockedTooFast'][userLanguage])
dbSql.setSetting(userId, 'blockTill', int(time.time())+300, table='flood')
dbSql.setSetting(userId, 'warned', 0, table='flood')
#! If the user is not warned, warn for the first time
else:
bot.send_message(message.message.chat.id if called else message.chat.id, language['warningTooFast'][userLanguage])
dbSql.setSetting(userId, 'warned', 1, table='flood')
return False
#! No spam
else:
dbSql.setSetting(userId, 'lastMessage', messageDate, table='flood')
return True
| 40.891892 | 130 | 0.610707 | 172 | 1,513 | 5.354651 | 0.343023 | 0.076004 | 0.05646 | 0.035831 | 0.175896 | 0.145494 | 0.145494 | 0.145494 | 0.145494 | 0.145494 | 0 | 0.007319 | 0.277594 | 1,513 | 36 | 131 | 42.027778 | 0.835316 | 0.114342 | 0 | 0.181818 | 0 | 0 | 0.095952 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33e7f51187f7b1f15f67e176d3d2914a0ae8b45e | 3,449 | py | Python | src/legacy/unit/basedimension.py | bieniekmateusz/forcebalance | 593791866e622ab4eae23ce29a0bed27499a118d | [
"BSD-3-Clause"
] | 98 | 2015-03-31T06:42:14.000Z | 2022-03-13T12:07:37.000Z | src/legacy/unit/basedimension.py | bieniekmateusz/forcebalance | 593791866e622ab4eae23ce29a0bed27499a118d | [
"BSD-3-Clause"
] | 121 | 2015-07-13T15:57:02.000Z | 2022-03-24T20:07:10.000Z | src/legacy/unit/basedimension.py | bieniekmateusz/forcebalance | 593791866e622ab4eae23ce29a0bed27499a118d | [
"BSD-3-Clause"
] | 66 | 2015-04-06T03:05:04.000Z | 2022-02-26T05:11:59.000Z | #!/bin/env python
"""
Module simtk.unit.basedimension
BaseDimension class for use by units and quantities.
BaseDimensions are things like "length" and "mass".
This is part of the OpenMM molecular simulation toolkit originating from
Simbios, the NIH National Center for Physics-Based Simulation of
Biological Structures at Stanford, funded under the NIH Roadmap for
Medical Research, grant U54 GM072970. See https://simtk.org.
Portions copyright (c) 2012 Stanford University and the Authors.
Authors: Christopher M. Bruns
Contributors: Peter Eastman
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from builtins import object
__author__ = "Christopher M. Bruns"
__version__ = "0.6"
class BaseDimension(object):
'''
A physical dimension such as length, mass, or temperature.
It is unlikely the user will need to create new ones.
'''
# Keep deterministic order of dimensions
_index_by_name = {
'mass': 1,
'length': 2,
'time': 3,
'temperature': 4,
'amount': 5,
'charge': 6,
'luminous intensity': 7,
'angle': 8,
}
_next_unused_index = 9
def __init__(self, name):
"""Create a new BaseDimension.
Each new BaseDimension is assumed to be independent of all other BaseDimensions.
Use the existing BaseDimensions in simtk.dimension instead of creating
new ones.
"""
self.name = name
if not self.name in BaseDimension._index_by_name.keys():
BaseDimension._index_by_name[name] = BaseDimension._next_unused_index
BaseDimension._next_unused_index += 1
self._index = BaseDimension._index_by_name[name]
def __lt__(self, other):
"""
The implicit order of BaseDimensions is the order in which they were created.
This method is used for using BaseDimensions as hash keys, and also affects
the order in which units appear in multi-dimensional Quantities.
Returns True if self < other, False otherwise.
"""
return self._index < other._index
def __hash__(self):
"""
Needed for using BaseDimensions as hash keys.
"""
return self._index
def __repr__(self):
return 'BaseDimension("%s")' % self.name
# run module directly for testing
if __name__=='__main__':
# Test the examples in the docstrings
import doctest, sys
doctest.testmod(sys.modules[__name__])
| 34.148515 | 88 | 0.71354 | 466 | 3,449 | 5.150215 | 0.48927 | 0.036667 | 0.018333 | 0.03 | 0.05 | 0.026667 | 0 | 0 | 0 | 0 | 0 | 0.008959 | 0.223253 | 3,449 | 100 | 89 | 34.49 | 0.886898 | 0.669469 | 0 | 0 | 0 | 0 | 0.10989 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.066667 | 0.033333 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33eea147dcf11c511b4a9c8d6be444a952495a54 | 1,962 | py | Python | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/lab_announcement_properties_fragment_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/lab_announcement_properties_fragment_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/lab_announcement_properties_fragment_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class LabAnnouncementPropertiesFragment(Model):
"""Properties of a lab's announcement banner.
:param title: The plain text title for the lab announcement
:type title: str
:param markdown: The markdown text (if any) that this lab displays in the
UI. If left empty/null, nothing will be shown.
:type markdown: str
:param enabled: Is the lab announcement active/enabled at this time?.
Possible values include: 'Enabled', 'Disabled'
:type enabled: str or ~azure.mgmt.devtestlabs.models.EnableStatus
:param expiration_date: The time at which the announcement expires (null
for never)
:type expiration_date: datetime
:param expired: Has this announcement expired?
:type expired: bool
"""
_attribute_map = {
'title': {'key': 'title', 'type': 'str'},
'markdown': {'key': 'markdown', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'str'},
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'expired': {'key': 'expired', 'type': 'bool'},
}
def __init__(self, *, title: str=None, markdown: str=None, enabled=None, expiration_date=None, expired: bool=None, **kwargs) -> None:
super(LabAnnouncementPropertiesFragment, self).__init__(**kwargs)
self.title = title
self.markdown = markdown
self.enabled = enabled
self.expiration_date = expiration_date
self.expired = expired
| 40.875 | 137 | 0.625382 | 221 | 1,962 | 5.479638 | 0.479638 | 0.069364 | 0.029728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003159 | 0.19317 | 1,962 | 47 | 138 | 41.744681 | 0.761845 | 0.552497 | 0 | 0 | 0 | 0 | 0.172457 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33eea834853e14b35733d601c03bdedc405549d4 | 30,432 | py | Python | target_extraction/dataset_parsers.py | apmoore1/target-extraction | 4139ecdc432411fcc4ed2723f4165e7dae93544d | [
"Apache-2.0"
] | 5 | 2019-07-27T13:57:47.000Z | 2021-06-16T13:17:44.000Z | target_extraction/dataset_parsers.py | apmoore1/target-extraction | 4139ecdc432411fcc4ed2723f4165e7dae93544d | [
"Apache-2.0"
] | 26 | 2019-05-01T11:56:35.000Z | 2020-06-18T16:06:40.000Z | target_extraction/dataset_parsers.py | apmoore1/target-extraction | 4139ecdc432411fcc4ed2723f4165e7dae93544d | [
"Apache-2.0"
] | 1 | 2019-07-11T07:16:09.000Z | 2019-07-11T07:16:09.000Z | '''
This module contains all the functions that will parse a particular dataset
into a `target_extraction.data_types.TargetTextCollection` object.
Functions:
1. semeval_2014
'''
import json
from pathlib import Path
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element
from xml.etree.ElementTree import ParseError
from typing import List, Union, Dict, Any, Optional
import tempfile
import zipfile
import tarfile
from allennlp.common.file_utils import cached_path
import requests
from target_extraction.data_types import TargetTextCollection, TargetText
from target_extraction.data_types_util import Span
CACHE_DIRECTORY = Path(Path.home(), '.bella_tdsa')
def _semeval_extract_data(sentence_tree: Element, conflict: bool
) -> TargetTextCollection:
'''
:param sentence_tree: The root element of the XML tree that has come
from a SemEval XML formatted XML File.
:param conflict: Whether or not to include targets or categories that
have the `conflict` sentiment value. True is to include
conflict targets and categories.
:returns: The SemEval data formatted into a
`target_extraction.data_types.TargetTextCollection` object.
'''
target_text_collection = TargetTextCollection()
for sentence in sentence_tree:
text_id = sentence.attrib['id']
targets: List[str] = []
target_sentiments: List[Union[str, int]] = []
spans: List[Span] = []
category_sentiments: List[Union[str, int]] = []
categories: List[str] = []
for data in sentence:
if data.tag == 'text':
text = data.text
text = text.replace(u'\xa0', u' ')
elif data.tag == 'aspectTerms':
for target in data:
# If it is a conflict sentiment and conflict argument True
# skip this target
target_sentiment = target.attrib['polarity']
if not conflict and target_sentiment == 'conflict':
continue
targets.append(target.attrib['term'].replace(u'\xa0', u' '))
target_sentiments.append(target_sentiment)
span_from = int(target.attrib['from'])
span_to = int(target.attrib['to'])
spans.append(Span(span_from, span_to))
elif data.tag == 'aspectCategories':
for category in data:
# If it is a conflict sentiment and conflict argument True
# skip this category
category_sentiment = category.attrib['polarity']
if not conflict and category_sentiment == 'conflict':
continue
categories.append(category.attrib['category'])
category_sentiments.append(category.attrib['polarity'])
elif data.tag == 'Opinions':
for opinion in data:
category_target_sentiment = opinion.attrib['polarity']
if not conflict and category_target_sentiment == 'conflict':
continue
# Handle the case where some of the SemEval 16 files do
# not contain targets and are only category sentiment files
if 'target' in opinion.attrib:
# Handle the case where there is a category but no
# target
target_text = opinion.attrib['target'].replace(u'\xa0', u' ')
span_from = int(opinion.attrib['from'])
span_to = int(opinion.attrib['to'])
# Special cases for poor annotation in SemEval 2016
# task 5 subtask 1 Restaurant dataset
if text_id == 'DBG#2:15' and target_text == 'NULL':
span_from = 0
span_to = 0
if text_id == "en_Patsy'sPizzeria_478231878:2"\
and target_text == 'NULL':
span_to = 0
if text_id == "en_MercedesRestaurant_478010602:1" \
and target_text == 'NULL':
span_to = 0
if text_id == "en_MiopostoCaffe_479702043:9" \
and target_text == 'NULL':
span_to = 0
if text_id == "en_MercedesRestaurant_478010600:1" \
and target_text == 'NULL':
span_from = 0
span_to = 0
if target_text == 'NULL':
target_text = None
# Special cases for poor annotation in SemEval 2016
# task 5 subtask 1 Restaurant dataset
if text_id == '1490757:0':
target_text = 'restaurant'
if text_id == 'TR#1:0' and span_from == 27:
target_text = 'spot'
if text_id == 'TFS#5:26':
target_text = "environment"
if text_id == 'en_SchoonerOrLater_477965850:10':
target_text = 'Schooner or Later'
targets.append(target_text)
spans.append(Span(span_from, span_to))
categories.append(opinion.attrib['category'])
target_sentiments.append(category_target_sentiment)
target_text_kwargs = {'targets': targets, 'spans': spans, 'text_id': text_id,
'target_sentiments': target_sentiments,
'categories': categories, 'text': text,
'category_sentiments': category_sentiments}
for key in target_text_kwargs:
if not target_text_kwargs[key]:
target_text_kwargs[key] = None
target_text = TargetText(**target_text_kwargs)
target_text_collection.add(target_text)
return target_text_collection
def semeval_2014(data_fp: Path, conflict: bool) -> TargetTextCollection:
'''
The sentiment labels are the following: 1. negative, 2. neutral,
3. positive, and 4. conflict. conflict will not appear if the argument
`conflict` is False.
:param data_fp: Path to the SemEval 2014 formatted file.
:param conflict: Whether or not to include targets or categories that
have the `conflict` sentiment value. True is to include
conflict targets and categories.
:returns: The SemEval 2014 data formatted into a
`target_extraction.data_types.TargetTextCollection` object.
:raises SyntaxError: If the File passed is detected as not a SemEval
formatted file.
:raises `xml.etree.ElementTree.ParseError`: If the File passed is
not formatted correctly e.g.
mismatched tags
'''
tree = ET.parse(data_fp)
sentences = tree.getroot()
if sentences.tag != 'sentences':
raise SyntaxError('The root of all semeval xml files should '
f'be sentences and not {sentences.tag}')
return _semeval_extract_data(sentences, conflict)
def semeval_2016(data_fp: Path, conflict: bool) -> TargetTextCollection:
'''
This is only for subtask 1 files where the review is broken down into
sentences. Furthermore if the data contains targets and not just categories
the targets and category sentiments are linked and are all stored in the
`targets_sentiments` further as some of the datasets only contain category
information to make it the same across domains the sentiment values here
will always be in the targets_sentiments field.
The sentiment labels are the following: 1. negative, 2. neutral,
3. positive, and 4. conflict. conflict will not appear if the argument
`conflict` is False.
:param data_fp: Path to the SemEval 2016 formatted file.
:param conflict: Whether or not to include targets and categories that
have the `conflict` sentiment value. True is to include
conflict targets and categories.
:returns: The SemEval 2016 data formatted into a
`target_extraction.data_types.TargetTextCollection` object.
:raises SyntaxError: If the File passed is detected as not a SemEval
formatted file.
:raises `xml.etree.ElementTree.ParseError`: If the File passed is
not formatted correctly e.g.
mismatched tags
'''
tree = ET.parse(data_fp)
reviews = tree.getroot()
if reviews.tag != 'Reviews':
raise SyntaxError('The root of all semeval xml files should '
f'be Reviews and not {reviews.tag}')
all_target_texts: List[TargetText] = []
for review in reviews:
if len(review) != 1:
raise SyntaxError('The number of `sentences` tags under the '
'`review` tag should be just 1 and not '
f'{len(review)}')
sentences = review[0]
review_target_texts = list(_semeval_extract_data(sentences,
conflict).values())
all_target_texts.extend(review_target_texts)
return TargetTextCollection(all_target_texts)
def download_election_folder(cache_dir: Optional[Path] = None) -> Path:
'''
Downloads the data for the Election Twitter dataset by
`Wang et al, 2017 <https://www.aclweb.org/anthology/E17-1046>` that can be found
`here <https://figshare.com/articles/EACL_2017_-_Multi-target_UK_election_Twitter_sentiment_corpus/4479563/1>`_
This is then further used in the following functions
:func:`target_extraction.dataset_parsers.wang_2017_election_twitter_train`
and
:func:`target_extraction.dataset_parsers.wang_2017_election_twitter_test`
as a way to get the data.
:param cache_dir: The directory where all of the data is stored for
this code base. If None then the cache directory is
`dataset_parsers.CACHE_DIRECTORY`
:returns: The Path to the `Wang 2017 Election Twitter` folder within the
`cache_dir`.
:raises FileNotFoundError: If not all of files where downloaded the first
time. Will require the user to delete either
the cache directory or the
`Wang 2017 Election Twitter` folder within the
cache directory.
'''
def untar_folder(tar_file: Path, folder_to_extract_to: Path) -> None:
with tarfile.open(tar_file) as _tar_file:
_tar_file.extractall(folder_to_extract_to)
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
dataset_folder_fp = Path(cache_dir, 'Wang 2017 Election Twitter')
annotation_fp = Path(dataset_folder_fp, 'annotations')
tweets_fp = Path(dataset_folder_fp, 'tweets')
test_id_fp = Path(dataset_folder_fp, 'test_id.txt')
train_id_fp = Path(dataset_folder_fp, 'train_id.txt')
if dataset_folder_fp.exists():
# The following Paths must exist in the folder for it to be the correct
# downloaded directory else raises FileExistsError
path_to_exist = [annotation_fp, tweets_fp, test_id_fp, train_id_fp]
for _path in path_to_exist:
if not _path.exists():
file_not_err = (f'The following file is not found {_path} '
'and should exist as currently in the '
'corresponding data directory to resolve this'
' problem please either delete this whole '
f'directory {dataset_folder_fp} or use a '
'different cache directory other '
f'than {cache_dir}')
raise FileNotFoundError(file_not_err)
# As the folder exists and contains all of the data return as we should
# not download something for the sake of downloading it
return dataset_folder_fp
dataset_folder_fp.mkdir(parents=True, exist_ok=True)
download_url = 'http://ndownloader.figshare.com/articles/4479563/versions/1'
response = requests.get(download_url, stream=True)
with tempfile.NamedTemporaryFile('wb+') as download_file:
for chunk in response.iter_content(chunk_size=128):
download_file.write(chunk)
with zipfile.ZipFile(download_file) as download_zip:
download_zip.extractall(dataset_folder_fp)
# Need to un tar annotations and tweet folders
annotation_tar_file = Path(dataset_folder_fp, 'annotations.tar.gz')
tweet_tar_file = Path(dataset_folder_fp, 'tweets.tar.gz')
untar_folder(annotation_tar_file, annotation_fp)
untar_folder(tweet_tar_file, tweets_fp)
return dataset_folder_fp
def _wang_2017_election_parser(train: bool, cache_dir: Optional[Path] = None
) -> TargetTextCollection:
'''
Parser for the Election Twitter dataset by
`Wang et al, 2017 <https://www.aclweb.org/anthology/E17-1046>` that can be found
`here <https://figshare.com/articles/EACL_2017_-_Multi-target_UK_election_Twitter_sentiment_corpus/4479563/1>`_
:param train: Whether to return the Train data. If False returns the
test data.
:param cache_dir: The directory where all of the data is stored for
this code base. If None then the cache directory is
`dataset_parsers.CACHE_DIRECTORY`
:returns: Either the training or test dataset of the Election Twitter
dataset.
:raises FileNotFoundError: If not all of files where downloaded the first
time. Will require the user to delete either
the cache directory or the
`Wang 2017 Election Twitter` folder within the
cache directory.
'''
def get_tweet_data(tweet_folder: Path) -> Dict[str, Dict[str, Any]]:
'''
:param tweet_folder: Directory containing files where each one represents
a Tweet and some target dependent sentiment data.
:returns: A dictionary of tweet IDs as keys and JSON data representing
the tweet target dependent sentiment data as values
'''
data = {}
for file_path in tweet_folder.iterdir():
file_name = file_path.stem
tweet_id = file_name.lstrip('5')
with open(file_path, 'r') as tweet_data:
data[tweet_id] = json.load(tweet_data)
return data
def parse_tweet(tweet_data: Dict[str, Any], annotation_data: Dict[str, Any],
tweet_id: str) -> TargetText:
'''
:params tweet_data: Data containing the Tweet information
:params annotation_data: Data containing the annotation data on the
Tweet
:params tweet_id: ID of the Tweet
:returns: The Tweet data in
:class:`target_extraction.data_types.TargetText` format
:raises ValueError: If the Target offset cannot be found.
'''
def get_offsets(from_offset: int, tweet_text: str, target: str) -> Span:
offset_shifts = [0, -1, 1]
for offset_shift in offset_shifts:
from_offset_shift = from_offset + offset_shift
to_offset = from_offset_shift + len(target)
offsets = Span(from_offset_shift, to_offset)
offset_text = tweet_text[from_offset_shift : to_offset].lower()
if offset_text == target.lower():
return offsets
raise ValueError(f'Offset {from_offset} does not match target text'
f' {target}. Full text {tweet_text}\nid {tweet_id}')
target_id = str(tweet_id)
target_text = tweet_data['content']
target_categories = None
target_category_sentiments = None
targets = []
target_spans = []
target_sentiments = []
for entity in tweet_data['entities']:
target_sentiment = annotation_data['items'][str(entity['id'])]
if target_sentiment == 'doesnotapply':
continue
target = entity['entity']
target_span = get_offsets(entity['offset'], target_text, target)
# Take the target from the text as sometimes the original label
# is lower cased when it should not be according to the text.
target = target_text[target_span.start: target_span.end]
targets.append(target)
target_spans.append(target_span)
target_sentiments.append(target_sentiment)
return TargetText(target_text, target_id, targets, target_spans,
target_sentiments, target_categories,
target_category_sentiments)
def get_data(tweet_id_file: Path, all_tweet_data: Dict[str, Dict[str, Any]],
all_annotation_data: Dict[str, Dict[str, Any]]
) -> TargetTextCollection:
'''
:params tweet_id_file: File Path containing a Tweet id on each new line
:params all_tweet_data: Dictionary containing data about the Tweet where
the keys are Tweet ID's and values a Dict of
information about the Tweet.
:param all_annotation_data: Dictionary containing annotation data about
the Tweet where the keys are Tweet ID's
and values are the annotation data about
the Tweet in a form of a Dict.
:returns: The Twitter data into a
:class:`target_extraction.data_types.TargetTextCollection`
object.
'''
targets = []
with tweet_id_file.open('r') as tweet_id_data:
for tweet_id in tweet_id_data:
tweet_id = tweet_id.strip()
tweet_data = all_tweet_data[tweet_id]
anno_data = all_annotation_data[tweet_id]
targets.append(parse_tweet(tweet_data, anno_data, tweet_id))
return TargetTextCollection(targets)
data_fp = download_election_folder(cache_dir)
tweets_folder = Path(data_fp, 'tweets', 'tweets')
annotations_folder = Path(data_fp, 'annotations', 'annotations')
tweet_data = get_tweet_data(tweets_folder)
annotation_data = get_tweet_data(annotations_folder)
ids_file = Path(data_fp, 'train_id.txt')
if not train:
ids_file = Path(data_fp, 'test_id.txt')
return get_data(ids_file, tweet_data, annotation_data)
def wang_2017_election_twitter_train(cache_dir: Optional[Path] = None
) -> TargetTextCollection:
'''
The data for this function when downloaded is stored within:
`Path(cache_dir, 'Wang 2017 Election Twitter')`
:param cache_dir: The directory where all of the data is stored for
this code base. If None then the cache directory is
`dataset_parsers.CACHE_DIRECTORY`
:returns: The Training dataset of the Election Twitter dataset by
`Wang et al, 2017 <https://www.aclweb.org/anthology/E17-1046>`
that can be found
`here <https://figshare.com/articles/EACL_2017_-_Multi-target_UK_election_Twitter_sentiment_corpus/4479563/1>`_
:raises FileNotFoundError: If not all of files where downloaded the first
time. Will require the user to delete either
the cache directory or the
`Wang 2017 Election Twitter` folder within the
cache directory.
'''
return _wang_2017_election_parser(train=True, cache_dir=cache_dir)
def wang_2017_election_twitter_test(cache_dir: Optional[Path] = None
) -> TargetTextCollection:
'''
The data for this function when downloaded is stored within:
`Path(cache_dir, 'Wang 2017 Election Twitter')`
:param cache_dir: The directory where all of the data is stored for
this code base. If None then the cache directory is
`dataset_parsers.CACHE_DIRECTORY`
:returns: The Test dataset of the Election Twitter dataset by
`Wang et al, 2017 <https://www.aclweb.org/anthology/E17-1046>`
that can be found
`here <https://figshare.com/articles/EACL_2017_-_Multi-target_UK_election_Twitter_sentiment_corpus/4479563/1>`_
:raises FileNotFoundError: If not all of files where downloaded the first
time. Will require the user to delete either
the cache directory or the
`Wang 2017 Election Twitter` folder within the
cache directory.
'''
return _wang_2017_election_parser(train=False, cache_dir=cache_dir)
def multi_aspect_multi_sentiment_atsa(dataset: str,
cache_dir: Optional[Path] = None,
original: bool = True
) -> TargetTextCollection:
'''
The data for this function when downloaded is stored within:
`Path(cache_dir, 'Jiang 2019 MAMS ATSA')
:NOTE: That as each sentence/`TargetText` object has to have
a `text_id`, as no ids exist in this dataset the ids are created
based on when the sentence occurs in the dataset e.g. the first
sentence/`TargetText` object id is '0'
:param dataset: Either `train`, `val` or `test`, determines the dataset that
is returned.
:param cache_dir: The directory where all of the data is stored for
this code base. If None then the cache directory is
`dataset_parsers.CACHE_DIRECTORY`
:param original: This does not affect `val` or `test`. If True then it will
download the original training data from the `original paper
<https://www.aclweb.org/anthology/D19-1654.pdf>`_ . Else
it will download the cleaned Training dataset version. The
cleaned version only contains a few sample differences
but these differences are with respect to overlapping
targets. See this `notebook for full differences
<https://github.com/apmoore1/target-extraction/blob/master/tutorials/Difference_between_MAMS_ATSA_original_and_MAMS_ATSA_cleaned.ipynb>`_:
:returns: The `train`, `val`, or `test` dataset from the
Multi-Aspect-Multi-Sentiment dataset (MAMS) ATSA version.
Dataset came from the `A Challenge Dataset and Effective Models
for Aspect-Based Sentiment Analysis, EMNLP 2019
<https://www.aclweb.org/anthology/D19-1654.pdf>`_
:raises ValueError: If the `dataset` value is not `train`, `val`, or `test`
'''
accepted_datasets = {'train', 'val', 'test'}
if dataset not in accepted_datasets:
raise ValueError('dataset has to be one of these values '
f'{accepted_datasets}, not {dataset}')
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
data_folder = Path(cache_dir, 'Jiang 2019 MAMS ATSA')
data_folder.mkdir(parents=True, exist_ok=True)
dataset_url = {'train': 'https://github.com/siat-nlp/MAMS-for-ABSA/raw/master/data/MAMS-ATSA/raw/train.xml',
'val': 'https://github.com/siat-nlp/MAMS-for-ABSA/raw/master/data/MAMS-ATSA/raw/val.xml',
'test': 'https://github.com/siat-nlp/MAMS-for-ABSA/raw/master/data/MAMS-ATSA/raw/test.xml'}
url = dataset_url[dataset]
if dataset == 'train' and not original:
url = 'https://raw.githubusercontent.com/apmoore1/target-extraction/master/data/MAMS/MAMS_ATSA_cleaned_train.xml'
data_fp = Path(cached_path(url, cache_dir=data_folder))
# Parsing the data
target_text_collection = TargetTextCollection()
tree = ET.parse(data_fp)
sentences = tree.getroot()
for sentence_id, sentence in enumerate(sentences):
targets: List[str] = []
target_sentiments: List[Union[str, int]] = []
spans: List[Span] = []
for data in sentence:
if data.tag == 'text':
text = data.text
text = text.replace(u'\xa0', u' ')
elif data.tag == 'aspectTerms':
for target in data:
target_sentiment = target.attrib['polarity']
target_sentiments.append(target_sentiment)
targets.append(target.attrib['term'].replace(u'\xa0', u' '))
span_from = int(target.attrib['from'])
span_to = int(target.attrib['to'])
spans.append(Span(span_from, span_to))
else:
raise ValueError(f'This tag {data.tag} should not occur '
'within a sentence tag')
target_text_kwargs = {'targets': targets, 'spans': spans,
'text_id': f'{dataset}${str(sentence_id)}',
'target_sentiments': target_sentiments,
'categories': None, 'text': text,
'category_sentiments': None}
for key in target_text_kwargs:
if not target_text_kwargs[key]:
target_text_kwargs[key] = None
target_text = TargetText(**target_text_kwargs)
target_text_collection.add(target_text)
return target_text_collection
def multi_aspect_multi_sentiment_acsa(dataset: str,
cache_dir: Optional[Path] = None
) -> TargetTextCollection:
'''
The data for this function when downloaded is stored within:
`Path(cache_dir, 'Jiang 2019 MAMS ACSA')
:NOTE: That as each sentence/`TargetText` object has to have
a `text_id`, as no ids exist in this dataset the ids are created
based on when the sentence occurs in the dataset e.g. the first
sentence/`TargetText` object id is '0'
For reference this dataset has 8 different aspect categories.
:param dataset: Either `train`, `val` or `test`, determines the dataset that
is returned.
:param cache_dir: The directory where all of the data is stored for
this code base. If None then the cache directory is
`dataset_parsers.CACHE_DIRECTORY`
:returns: The `train`, `val`, or `test` dataset from the
Multi-Aspect-Multi-Sentiment dataset (MAMS) ACSA version.
Dataset came from the `A Challenge Dataset and Effective Models
for Aspect-Based Sentiment Analysis, EMNLP 2019
<https://www.aclweb.org/anthology/D19-1654.pdf>`_
:raises ValueError: If the `dataset` value is not `train`, `val`, or `test`
'''
accepted_datasets = {'train', 'val', 'test'}
if dataset not in accepted_datasets:
raise ValueError('dataset has to be one of these values '
f'{accepted_datasets}, not {dataset}')
if cache_dir is None:
cache_dir = CACHE_DIRECTORY
data_folder = Path(cache_dir, 'Jiang 2019 MAMS ACSA')
data_folder.mkdir(parents=True, exist_ok=True)
dataset_url = {'train': 'https://github.com/siat-nlp/MAMS-for-ABSA/raw/master/data/MAMS-ACSA/raw/train.xml',
'val': 'https://github.com/siat-nlp/MAMS-for-ABSA/raw/master/data/MAMS-ACSA/raw/val.xml',
'test': 'https://github.com/siat-nlp/MAMS-for-ABSA/raw/master/data/MAMS-ACSA/raw/test.xml'}
url = dataset_url[dataset]
data_fp = Path(cached_path(url, cache_dir=data_folder))
# Parsing the data
category_text_collection = TargetTextCollection()
tree = ET.parse(data_fp)
sentences = tree.getroot()
for sentence_id, sentence in enumerate(sentences):
categories: List[str] = []
category_sentiments: List[Union[str, int]] = []
for data in sentence:
if data.tag == 'text':
text = data.text
text = text.replace(u'\xa0', u' ')
elif data.tag == 'aspectCategories':
for category in data:
category_sentiment = category.attrib['polarity']
category_sentiments.append(category_sentiment)
categories.append(category.attrib['category'].replace(u'\xa0', u' '))
else:
raise ValueError(f'This tag {data.tag} should not occur '
'within a sentence tag')
category_text_kwargs = {'targets': None, 'spans': None,
'text_id': f'{dataset}${str(sentence_id)}',
'target_sentiments': None,
'categories': categories, 'text': text,
'category_sentiments': category_sentiments}
for key in category_text_kwargs:
if not category_text_kwargs[key]:
category_text_kwargs[key] = None
category_text = TargetText(**category_text_kwargs)
category_text_collection.add(category_text)
return category_text_collection
| 50.889632 | 159 | 0.592961 | 3,522 | 30,432 | 4.955423 | 0.118967 | 0.021773 | 0.013751 | 0.015814 | 0.611471 | 0.548043 | 0.519166 | 0.50043 | 0.489944 | 0.464046 | 0 | 0.017123 | 0.33215 | 30,432 | 597 | 160 | 50.974874 | 0.841616 | 0.383544 | 0 | 0.4081 | 0 | 0.021807 | 0.144581 | 0.011843 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043614 | false | 0 | 0.040498 | 0 | 0.127726 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33ef41e085b8b97dcad376973e5a2651938e30d6 | 309 | py | Python | 1672-richest-customer-wealth/1672-richest-customer-wealth.py | marzy-bn/Leetcode_2022 | 07d6b9050279e82f610ed4a54209b33db3e3f8f9 | [
"MIT"
] | null | null | null | 1672-richest-customer-wealth/1672-richest-customer-wealth.py | marzy-bn/Leetcode_2022 | 07d6b9050279e82f610ed4a54209b33db3e3f8f9 | [
"MIT"
] | null | null | null | 1672-richest-customer-wealth/1672-richest-customer-wealth.py | marzy-bn/Leetcode_2022 | 07d6b9050279e82f610ed4a54209b33db3e3f8f9 | [
"MIT"
] | null | null | null | class Solution:
def maximumWealth(self, accounts: List[List[int]]) -> int:
max_acc = 0
for account in accounts:
sum_acc = 0
for num in account:
sum_acc += num
if max_acc < sum_acc:
max_acc = sum_acc
return max_acc | 30.9 | 62 | 0.514563 | 39 | 309 | 3.871795 | 0.461538 | 0.15894 | 0.092715 | 0.15894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011111 | 0.417476 | 309 | 10 | 63 | 30.9 | 0.827778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33f0ba344a37591b7f4944804b11701aa4ff2ca0 | 6,973 | py | Python | campaign/backends/mailgun_api.py | tiktn/django-campaign | 7f6872a39d1b70d968a2384a6af87e7760fbca51 | [
"BSD-3-Clause"
] | 53 | 2015-01-28T09:42:57.000Z | 2022-03-26T15:32:16.000Z | campaign/backends/mailgun_api.py | tiktn/django-campaign | 7f6872a39d1b70d968a2384a6af87e7760fbca51 | [
"BSD-3-Clause"
] | 20 | 2015-02-11T02:47:12.000Z | 2022-03-18T03:06:54.000Z | campaign/backends/mailgun_api.py | tiktn/django-campaign | 7f6872a39d1b70d968a2384a6af87e7760fbca51 | [
"BSD-3-Clause"
] | 25 | 2015-04-29T15:55:17.000Z | 2022-01-21T15:46:51.000Z | from __future__ import unicode_literals
import logging
import json
import requests
from django.template import Context
from django.conf import settings
from django.urls import reverse
from django.contrib.sites.models import Site
from django import template
from campaign.backends.base import BaseBackend
from campaign.context import MailContext
logger = logging.getLogger('django.campaign')
class MailgunApiBackend(BaseBackend):
"""
Send your campaigns through the Mailgun Email Service.
This backend assumes, that your Mailgun API-Key is configured in::
settings.MAILGUN_API_KEY
And you need to change the CAMPAIGN_CONTEXT_PROCESSORS setting. The
default 'campaign.context_processors.recipient' needs to be removed in
favour of the 'campaign.context_processors.recipient_dict'!
If no sending address is specified in the database, the From-Email is
determined from the following settings in this order::
settings.MAILGUN_API_FROM_EMAIL # only used by this backend
settings.CAMPAIGN_FROM_EMAIL # used by all backends that support it
settings.DEFAULT_FROM_EMAIL # used by django
You can provide additional values for the API call via::
settings.MAILGUN_API_SETTINGS
(Defaults to "{}")
For example to setup tracking you can set it to::
MAILGUN_API_SETTINGS = {
"o:tracking": "yes",
"o:tracking-opens": "yes",
"o:tracking-clicks": "yes"
}
These settings will override the django-campaign defaults.
If no sending name is specified in the database, the from header is
either determined from the CAMPAIGN_FROM_HEADERS setting or only the
plain email address is used.
To specify a from-header (with display-name) for a specific address
the following setting can be used::
settings.CAMPAIGN_FROM_HEADERS
Example configuration::
CAMPAIGN_FROM_HEADERS = {
"newsletter@example.com": "Example Newsletter <newsletter@example.com>",
"no-reply@test.com": "Test Sender <no-reply@test.com>"
}
These From-Headers are used, when an email is sent with the matching
address. The setting is optional.
Please note, that all Django-Template constructs in the MailTemplate are
evaluated only once for all recipients. Personalizations happens at
Mailgun, where each message is processed with 'recipient_vars'.
It might be a good idea to wrap the recipient_vars placeholders in
`{% if not viewed_online %}` conditionals, otherwise the raw placeholders
will be displayed in the web-view of the newsletter.
"""
BATCH_SIZE = 1000
def send_campaign(self, campaign, fail_silently=False):
from campaign.models import BlacklistEntry
subject = campaign.template.subject
text_template = template.Template(campaign.template.plain)
if campaign.template.html is not None and campaign.template.html != "":
html_template = template.Template(campaign.template.html)
else:
html_template = None
success_count = 0
recipients = []
recipient_vars = {}
for recipient_list in campaign.recipients.all():
for recipient in recipient_list.object_list():
# never send mail to blacklisted email addresses
recipient_email = getattr(recipient, recipient_list.email_field_name)
if not BlacklistEntry.objects.filter(email=recipient_email).count() and not recipient_email in recipients:
recipients.append(recipient_email)
context = MailContext(recipient)
if campaign.online:
context.update({
'view_online_url': reverse("campaign_view_online", kwargs={
'object_id': campaign.pk}),
'site_url': Site.objects.get_current().domain,
'recipient_email': recipient_email
})
the_recipient_vars = {}
for k, v in context.flatten().items():
the_recipient_vars.update({k: v})
recipient_vars.update({recipient_email: the_recipient_vars})
# assemble recipient data into batches of self.BATCH_SIZE
batches = []
batch_r = []
batch_v = {}
for r in recipients:
batch_r.append(r)
batch_v[r] = recipient_vars[r]
if len(batch_r) >= self.BATCH_SIZE:
batches.append((batch_r, batch_v))
batch_r = []
batch_v = {}
if len(batch_r):
batches.append((batch_r, batch_v))
for recipients, recipient_vars in batches:
from_email = self.get_from_email(campaign)
from_domain = from_email.split('@')[-1]
from_header = self.get_from_header(campaign, from_email)
api_url = getattr(settings, 'MAILGUN_API_URL', 'https://api.mailgun.net/v3/%s/messages') % from_domain
auth = ("api", settings.MAILGUN_API_KEY)
data = {
'to': recipients,
'from': from_header,
'recipient-variables': json.dumps(recipient_vars),
'subject': subject,
'text': text_template.render(Context()),
}
if html_template:
data['html'] = html_template.render(Context())
# update data with user supplied values from settings
data.update(getattr(settings, 'MAILGUN_API_SETTINGS', {}))
try:
result = requests.post(api_url, auth=auth, data=data)
if result.status_code == 200:
success_count += len(recipients)
else:
raise Exception(result.text)
except Exception as e:
logger.error('Mailgun error: %s - %s' % (e.__class__, e))
if not fail_silently:
raise e
return success_count
def get_from_email(self, campaign):
if hasattr(settings, 'MAILGUN_API_FROM_EMAIL'):
from_email = settings.MAILGUN_API_FROM_EMAIL
else:
from_email = getattr(settings, 'CAMPAIGN_FROM_EMAIL', settings.DEFAULT_FROM_EMAIL)
try:
from_email = campaign.newsletter.from_email or from_email
except:
pass
return from_email
def get_from_header(self, campaign, from_email):
try:
from_name = campaign.newsletter.from_name or None
except:
from_name = None
if from_name:
from_header = "%s <%s>" % (from_name, from_email)
else:
from_header = getattr(settings, 'CAMPAIGN_FROM_HEADERS', {}).get(from_email, from_email)
return from_header
backend = MailgunApiBackend()
| 37.28877 | 122 | 0.625699 | 808 | 6,973 | 5.216584 | 0.272277 | 0.04911 | 0.034164 | 0.011388 | 0.083274 | 0.026572 | 0.014709 | 0 | 0 | 0 | 0 | 0.002044 | 0.298437 | 6,973 | 186 | 123 | 37.489247 | 0.859567 | 0.316076 | 0 | 0.147059 | 0 | 0 | 0.063126 | 0.00936 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0.009804 | 0.117647 | 0 | 0.196078 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33f262ac354b5b387c54d37c6e16bd4e1f39f3dc | 1,444 | py | Python | GUI_tkinter_all/gui_oop_intro.py | dogancantorun8/python-application | 3ef972e52bb6950108cde36974ceaf5c3cde3667 | [
"MIT"
] | null | null | null | GUI_tkinter_all/gui_oop_intro.py | dogancantorun8/python-application | 3ef972e52bb6950108cde36974ceaf5c3cde3667 | [
"MIT"
] | null | null | null | GUI_tkinter_all/gui_oop_intro.py | dogancantorun8/python-application | 3ef972e52bb6950108cde36974ceaf5c3cde3667 | [
"MIT"
] | null | null | null | import tkinter as tk
class GUI:
def __init__(self,master):
self.master=master #penceremin ustundeki configler icin master tanımımı kullandım
self.label_name=tk.Label(master,text='Adı soyadı:',font=('Arial',10))
self.label_name.place(x=10,y=10)
#text fieldlerdan tkinter uygulamalarında gui olarak bahsedilir
self.entry_name=tk.Entry(master,font=('Arial',10),justify=tk.CENTER) #inputları almak icin kullanıyorum
self.entry_name.place(x=10,y=32 ,width=200)
self.button_ok=tk.Button(self.master,text='ok',command=self.button_ok_handler)
self.button_ok.place(x=10,y=70,width=60,height=60)
self.button_cancel = tk.Button(self.master, text='cancel', command=self.button_cancel_handler)
self.button_cancel.place(x=160, y=70, width=60, height=60)
def button_ok_handler(self):
#print('ok')
self.master['bg']='yellow' #arkaplan rengimi degistirdim
print(self.entry_name.get()) #textfieldda yazan bilgiyi get ile alıp backende bastım
self.entry_name.delete(0,tk.END) #textfielda giris olduktan sonra fieldi temizler
def button_cancel_handler(self):
print('cancel')
self.master['bg'] = 'lightblue'
root=tk.Tk() #rootu yarattım
root.geometry('600x480')
root.title('Sample TK ')
gui=GUI(root) #burada rootu gui classına vererek root üzerinde yapacağım tüm configleri mastere vermiş oldum
root.mainloop() | 39.027027 | 111 | 0.707756 | 206 | 1,444 | 4.854369 | 0.441748 | 0.06 | 0.052 | 0.027 | 0.106 | 0.036 | 0 | 0 | 0 | 0 | 0 | 0.032609 | 0.171745 | 1,444 | 37 | 112 | 39.027027 | 0.803512 | 0.280471 | 0 | 0 | 0 | 0 | 0.068865 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.041667 | 0 | 0.208333 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33f3de99c68b0762c69ffc423533d91bb34f53c2 | 4,053 | py | Python | Function/local_functions_20200813.py | JoneYu45/Global_investigation | 2d9d86111517919d1581d35f34c425f4fda729c0 | [
"MIT"
] | null | null | null | Function/local_functions_20200813.py | JoneYu45/Global_investigation | 2d9d86111517919d1581d35f34c425f4fda729c0 | [
"MIT"
] | null | null | null | Function/local_functions_20200813.py | JoneYu45/Global_investigation | 2d9d86111517919d1581d35f34c425f4fda729c0 | [
"MIT"
] | null | null | null | #Import modules
import numpy as np
from sklearn.linear_model import ElasticNetCV
import pandas as pd
import time
from joblib import Parallel, delayed
import multiprocessing
#Define functions
def make_weights(E_dist, theta):
w = [np.exp(-1 * theta * E_dist[i] / np.mean(E_dist)) for i in range(E_dist.shape[0])]
return w
def weight_data(sub_block, w):
wp = np.empty(shape=sub_block.shape)
for i in range(sub_block.shape[0]):
for j in range(sub_block.shape[1]):
wp[i, j] = sub_block[i, j] * w[i]
return wp
def select_frequent_dominate_genera(input, dominate_threshold, zero_frequency_threshold, select):
# Process data
rawdata = pd.read_csv(input).iloc[:, 1:]
abundance = rawdata
# Calculate the relative abundance profile
read_num = np.sum(rawdata, axis=1)
for i in range(rawdata.shape[0]):
abundance.iloc[i, :] = rawdata.iloc[i, :] / read_num[i] * 100
# Process or not
if select == True:
# Select the most frequent and dominate genera
dominate = np.where(np.mean(abundance, axis=0) > dominate_threshold)
wanted_abundance = abundance.iloc[:, dominate[0]]
frequency = np.where(
(wanted_abundance == 0).astype(int).sum(axis=0) / abundance.shape[0] * 100 < zero_frequency_threshold)
wanted_abundance = wanted_abundance.iloc[:, frequency[0]]
else:
wanted_abundance = abundance
#Output selection
return wanted_abundance
def Elastic_net_fitting(block, target_otu, interest_otu, theta, train_len, cv, iteration, l_grid, output_dir):
##Select data and fitting
print('Start fitting.')
lib = range(block.shape[0])
coefs = np.empty(shape=(block.shape[0], block.shape[1] - 1))
fit_results = np.empty(shape=(block.shape[0], 13))
for ipred in lib:
print('\r', 'Complete percentage: %.2f%%' % (ipred / len(lib) * 100), end="", flush=True)
sub_block = np.delete(block, ipred, axis=0)
q = block[lib[ipred], :]
###Calculate weights
E_dist = np.sqrt(np.sum(np.array(sub_block[:, 1:] - q[:, 1:]) ** 2, axis=1))
w = make_weights(E_dist, theta)
###Weighted predictors and responses
X_wp = weight_data(sub_block[:, 1:], w)
Y_wp = np.ravel(weight_data(sub_block[:, 0], w))
X_target = block[ipred, 1:]
Y_target = block[ipred, 0]
##Split training and test data
pick_test = np.random.choice(range(X_wp.shape[0]), size=train_len, replace=False)
X_train = np.append(np.delete(X_wp, pick_test, axis=0), X_target, axis=0)
X_test = X_wp[pick_test, :]
Y_train = np.append(np.delete(Y_wp, pick_test, axis=0), Y_target)
Y_test = Y_wp[pick_test]
###Fit function
regr = ElasticNetCV(cv=cv, random_state=0, max_iter=iteration,
l1_ratio=[(i + 1) * l_grid for i in range(int(1 / l_grid))])
regr.fit(X_train, Y_train)
rmse = np.sqrt(np.mean((regr.predict(X_train) - Y_train) ** 2))
rmse_o = np.sqrt(np.mean((regr.predict(X_test) - Y_test) ** 2))
coefs[ipred, :] = regr.coef_
fit_results[ipred, :] = regr.intercept_, regr.alpha_, regr.l1_ratio_, rmse, np.std(Y_train), rmse_o, np.std(
Y_test), regr.score(X_test, Y_test), regr.score(X_train, Y_train), max(Y_train), min(Y_train), max(
Y_test), min(Y_test)
print('\r', 'Complete percentage: %.2f%%' % ((ipred + 1) / len(lib) * 100), end="", flush=True)
# Output results
coefs = pd.DataFrame(data=coefs)
coefs.to_csv('/'.join([output_dir,'coefs/%s_%s_%s_fit_results.csv' % (interest_otu, target_otu, theta)]))
fit_results = pd.DataFrame(
columns=['Intercept', 'Best alpha', 'Best l1_ratio', 'RMSE', 'Std', 'RMSE_o', 'Std_o', 'Test set score',
'Test set score_train', 'ymax_train', 'ymin_train', 'ymax_test', 'ymin_test'],
data=fit_results)
fit_results.to_csv('/'.join([output_dir,'fit_result/%s_%s_%s_fit_results.csv' % (interest_otu, target_otu, theta)])) | 44.538462 | 120 | 0.635332 | 605 | 4,053 | 4.049587 | 0.25124 | 0.029388 | 0.009796 | 0.017959 | 0.204082 | 0.114286 | 0.053061 | 0.033469 | 0.033469 | 0.033469 | 0 | 0.017638 | 0.21663 | 4,053 | 91 | 120 | 44.538462 | 0.754016 | 0.071058 | 0 | 0 | 0 | 0 | 0.069693 | 0.017356 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0 | 0.089552 | 0 | 0.19403 | 0.044776 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33f3e2d22b81c258c7bdcf256c47921cf209baa9 | 16,634 | py | Python | tests/asr/test_speech_commands.py | borisdayma/NeMo | 88f6c5b93574adb219185d5ded14b6393c485ea0 | [
"Apache-2.0"
] | null | null | null | tests/asr/test_speech_commands.py | borisdayma/NeMo | 88f6c5b93574adb219185d5ded14b6393c485ea0 | [
"Apache-2.0"
] | null | null | null | tests/asr/test_speech_commands.py | borisdayma/NeMo | 88f6c5b93574adb219185d5ded14b6393c485ea0 | [
"Apache-2.0"
] | null | null | null | # ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2020 NVIDIA. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import shutil
import tarfile
import unittest
from ruamel.yaml import YAML
import nemo
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.parts import AudioLabelDataset, WaveformFeaturizer, collections, parsers, perturb
from nemo.core import DeviceType
from tests.common_setup import NeMoUnitTest
logging = nemo.logging
freq = 16000
class TestSpeechCommandsPytorch(NeMoUnitTest):
labels = [
"cat",
"dog",
]
manifest_filepath = os.path.abspath(
os.path.join(os.path.dirname(__file__), "../data/speech_commands/train_manifest.json")
)
featurizer_config = {
'window': 'hann',
'dither': 1e-05,
'normalize': 'per_feature',
'frame_splicing': 1,
'int_values': False,
'window_stride': 0.01,
'sample_rate': freq,
'features': 64,
'n_fft': 512,
'window_size': 0.02,
}
yaml = YAML(typ="safe")
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
data_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), "../data/"))
logging.info("Looking up for test speech command data")
if not os.path.exists(os.path.join(data_folder, "speech_commands")):
logging.info(
"Extracting speech commands data to: {0}".format(os.path.join(data_folder, "speech_commands"))
)
tar = tarfile.open(os.path.join(data_folder, "speech_commands.tar.xz"), "r:xz")
tar.extractall(path=data_folder)
tar.close()
else:
logging.info("Speech Command data found in: {0}".format(os.path.join(data_folder, "speech_commands")))
@classmethod
def tearDownClass(cls) -> None:
super().tearDownClass()
data_folder = os.path.abspath(os.path.join(os.path.dirname(__file__), "../data/"))
logging.info("Looking up for test ASR data")
if os.path.exists(os.path.join(data_folder, "speech_commands")):
shutil.rmtree(os.path.join(data_folder, "speech_commands"))
def test_pytorch_audio_dataset_with_perturbation(self):
perturbations = [
perturb.WhiteNoisePerturbation(min_level=-90, max_level=-46),
perturb.ShiftPerturbation(min_shift_ms=-5.0, max_shift_ms=5.0),
]
# Execute perturbations with 100% probability
prob_perturb = [(1.0, p) for p in perturbations]
audio_augmentor = perturb.AudioAugmentor(prob_perturb)
featurizer = WaveformFeaturizer(
sample_rate=self.featurizer_config['sample_rate'],
int_values=self.featurizer_config['int_values'],
augmentor=audio_augmentor,
)
ds = AudioLabelDataset(manifest_filepath=self.manifest_filepath, labels=self.labels, featurizer=featurizer,)
for i in range(len(ds)):
logging.info(ds[i])
# logging.info(ds[i][0].shape)
# self.assertEqual(freq, ds[i][0].shape[0])
def test_dataloader(self):
batch_size = 2
dl = nemo_asr.AudioToSpeechLabelDataLayer(
# featurizer_config=self.featurizer_config,
manifest_filepath=self.manifest_filepath,
labels=self.labels,
batch_size=batch_size,
# placement=DeviceType.GPU,
sample_rate=16000,
)
for ind, data in enumerate(dl.data_iterator):
# With num_workers update, this is no longer true
# Moving to GPU is handled by AudioPreprocessor
# data is on GPU
# self.assertTrue(data[0].is_cuda)
# self.assertTrue(data[1].is_cuda)
# self.assertTrue(data[2].is_cuda)
# self.assertTrue(data[3].is_cuda)
# first dimension is batch
self.assertTrue(data[0].size(0) == batch_size)
self.assertTrue(data[1].size(0) == batch_size)
self.assertTrue(data[2].size(0) == batch_size)
self.assertTrue(data[3].size(0) == batch_size)
def test_trim_silence(self):
batch_size = 2
normal_dl = nemo_asr.AudioToSpeechLabelDataLayer(
# featurizer_config=self.featurizer_config,
manifest_filepath=self.manifest_filepath,
labels=self.labels,
batch_size=batch_size,
# placement=DeviceType.GPU,
drop_last=False,
shuffle=False,
)
trimmed_dl = nemo_asr.AudioToSpeechLabelDataLayer(
# featurizer_config=self.featurizer_config,
manifest_filepath=self.manifest_filepath,
trim_silence=True,
labels=self.labels,
batch_size=batch_size,
# placement=DeviceType.GPU,
drop_last=False,
shuffle=False,
)
for norm, trim in zip(normal_dl.data_iterator, trimmed_dl.data_iterator):
for point in range(batch_size):
self.assertTrue(norm[1][point].data >= trim[1][point].data)
def test_audio_preprocessors(self):
batch_size = 2
dl = nemo_asr.AudioToSpeechLabelDataLayer(
# featurizer_config=self.featurizer_config,
manifest_filepath=self.manifest_filepath,
labels=self.labels,
batch_size=batch_size,
# placement=DeviceType.GPU,
drop_last=False,
shuffle=False,
)
installed_torchaudio = True
try:
import torchaudio
except ModuleNotFoundError:
installed_torchaudio = False
with self.assertRaises(ModuleNotFoundError):
to_spectrogram = nemo_asr.AudioToSpectrogramPreprocessor(n_fft=400, window=None)
with self.assertRaises(ModuleNotFoundError):
to_mfcc = nemo_asr.AudioToMFCCPreprocessor(n_mfcc=15)
if installed_torchaudio:
to_spectrogram = nemo_asr.AudioToSpectrogramPreprocessor(n_fft=400, window=None)
to_mfcc = nemo_asr.AudioToMFCCPreprocessor(n_mfcc=15)
to_melspec = nemo_asr.AudioToMelSpectrogramPreprocessor(features=50)
for batch in dl.data_iterator:
input_signals, seq_lengths, _, _ = batch
input_signals = input_signals.to(to_melspec._device)
seq_lengths = seq_lengths.to(to_melspec._device)
melspec = to_melspec.forward(input_signals, seq_lengths)
if installed_torchaudio:
spec = to_spectrogram.forward(input_signals, seq_lengths)
mfcc = to_mfcc.forward(input_signals, seq_lengths)
# Check that number of features is what we expect
self.assertTrue(melspec[0].shape[1] == 50)
if installed_torchaudio:
self.assertTrue(spec[0].shape[1] == 201) # n_fft // 2 + 1 bins
self.assertTrue(mfcc[0].shape[1] == 15)
# @unittest.skip("Init parameters of nemo_asr.AudioToMelSpectrogramPreprocessor are invalid")
def test_jasper_training(self):
with open(
os.path.abspath(os.path.join(os.path.dirname(__file__), "../data/quartznet_speech_recognition.yaml"))
) as file:
jasper_model_definition = self.yaml.load(file)
dl = nemo_asr.AudioToSpeechLabelDataLayer(
# featurizer_config=self.featurizer_config,
manifest_filepath=self.manifest_filepath,
labels=self.labels,
batch_size=2,
)
pre_process_params = pre_process_params = {
'frame_splicing': 1,
'features': 64,
'window_size': 0.02,
'n_fft': 512,
'dither': 1e-05,
'window': 'hann',
'sample_rate': 16000,
'normalize': 'per_feature',
'window_stride': 0.01,
}
preprocessing = nemo_asr.AudioToMelSpectrogramPreprocessor(**pre_process_params)
jasper_encoder = nemo_asr.JasperEncoder(**jasper_model_definition['JasperEncoder'],)
jasper_decoder = nemo_asr.JasperDecoderForClassification(
feat_in=jasper_model_definition['JasperEncoder']['jasper'][-1]['filters'], num_classes=len(self.labels)
)
ce_loss = nemo_asr.CrossEntropyLossNM()
# DAG
audio_signal, a_sig_length, targets, targets_len = dl()
processed_signal, p_length = preprocessing(input_signal=audio_signal, length=a_sig_length)
encoded, encoded_len = jasper_encoder(audio_signal=processed_signal, length=p_length)
# logging.info(jasper_encoder)
log_probs = jasper_decoder(encoder_output=encoded)
loss = ce_loss(logits=log_probs, labels=targets)
callback = nemo.core.SimpleLossLoggerCallback(
tensors=[loss], print_func=lambda x: logging.info(f'Train Loss: {str(x[0].item())}'),
)
# Instantiate an optimizer to perform `train` action
optimizer = self.nf.get_trainer()
optimizer.train(
[loss], callbacks=[callback], optimizer="sgd", optimization_params={"num_epochs": 10, "lr": 0.0003},
)
# @unittest.skip("Init parameters of nemo_asr.AudioToMelSpectrogramPreprocessor are invalid")
def test_double_jasper_training(self):
with open(
os.path.abspath(os.path.join(os.path.dirname(__file__), "../data/quartznet_speech_recognition.yaml"))
) as file:
jasper_model_definition = self.yaml.load(file)
dl = nemo_asr.AudioToSpeechLabelDataLayer(
# featurizer_config=self.featurizer_config,
manifest_filepath=self.manifest_filepath,
labels=self.labels,
batch_size=2,
)
pre_process_params = {
'frame_splicing': 1,
'features': 64,
'window_size': 0.02,
'n_fft': 512,
'dither': 1e-05,
'window': 'hann',
'sample_rate': 16000,
'normalize': 'per_feature',
'window_stride': 0.01,
}
preprocessing = nemo_asr.AudioToMelSpectrogramPreprocessor(**pre_process_params)
jasper_encoder1 = nemo_asr.JasperEncoder(**jasper_model_definition['JasperEncoder'],)
jasper_encoder2 = nemo_asr.JasperEncoder(**jasper_model_definition['JasperEncoder'],)
# mx_max1 = nemo.backends.pytorch.common.other.SimpleCombiner(mode="max")
# mx_max2 = nemo.backends.pytorch.common.other.SimpleCombiner(mode="max")
jasper_decoder1 = nemo_asr.JasperDecoderForClassification(
feat_in=jasper_model_definition['JasperEncoder']['jasper'][-1]['filters'], num_classes=len(self.labels)
)
jasper_decoder2 = nemo_asr.JasperDecoderForClassification(
feat_in=jasper_model_definition['JasperEncoder']['jasper'][-1]['filters'], num_classes=len(self.labels)
)
ce_loss = nemo_asr.CrossEntropyLossNM()
# DAG
audio_signal, a_sig_length, targets, targets_len = dl()
processed_signal, p_length = preprocessing(input_signal=audio_signal, length=a_sig_length)
encoded1, encoded_len1 = jasper_encoder1(audio_signal=processed_signal, length=p_length)
encoded2, encoded_len2 = jasper_encoder2(audio_signal=processed_signal, length=p_length)
logits1 = jasper_decoder1(encoder_output=encoded1)
logits2 = jasper_decoder2(encoder_output=encoded2)
loss = ce_loss(logits=logits1, labels=targets,)
callback = nemo.core.SimpleLossLoggerCallback(
tensors=[loss], print_func=lambda x: logging.info(str(x[0].item()))
)
# Instantiate an optimizer to perform `train` action
optimizer = self.nf.get_trainer()
optimizer.train(
[loss], callbacks=[callback], optimizer="sgd", optimization_params={"num_epochs": 10, "lr": 0.0003},
)
def test_stft_conv(self):
with open(
os.path.abspath(os.path.join(os.path.dirname(__file__), "../data/quartznet_speech_recognition.yaml"))
) as file:
jasper_model_definition = self.yaml.load(file)
dl = nemo_asr.AudioToSpeechLabelDataLayer(
manifest_filepath=self.manifest_filepath, labels=self.labels, batch_size=2,
)
pre_process_params = {
'frame_splicing': 1,
'features': 64,
'window_size': 0.02,
'n_fft': 512,
'dither': 1e-05,
'window': 'hann',
'sample_rate': 16000,
'normalize': 'per_feature',
'window_stride': 0.01,
'stft_conv': True,
}
preprocessing = nemo_asr.AudioToMelSpectrogramPreprocessor(**pre_process_params)
jasper_encoder = nemo_asr.JasperEncoder(**jasper_model_definition['JasperEncoder'],)
jasper_decoder = nemo_asr.JasperDecoderForClassification(
feat_in=jasper_model_definition['JasperEncoder']['jasper'][-1]['filters'], num_classes=len(self.labels)
)
ce_loss = nemo_asr.CrossEntropyLossNM()
# DAG
audio_signal, a_sig_length, targets, targets_len = dl()
processed_signal, p_length = preprocessing(input_signal=audio_signal, length=a_sig_length)
encoded, encoded_len = jasper_encoder(audio_signal=processed_signal, length=p_length)
# logging.info(jasper_encoder)
logits = jasper_decoder(encoder_output=encoded)
loss = ce_loss(logits=logits, labels=targets)
callback = nemo.core.SimpleLossLoggerCallback(
tensors=[loss], print_func=lambda x: logging.info(str(x[0].item()))
)
# Instantiate an optimizer to perform `train` action
optimizer = self.nf.get_trainer()
optimizer.train(
[loss], callbacks=[callback], optimizer="sgd", optimization_params={"num_epochs": 10, "lr": 0.0003},
)
def test_jasper_eval(self):
with open(
os.path.abspath(os.path.join(os.path.dirname(__file__), "../data/quartznet_speech_recognition.yaml"))
) as file:
jasper_model_definition = self.yaml.load(file)
dl = nemo_asr.AudioToSpeechLabelDataLayer(
manifest_filepath=self.manifest_filepath, labels=self.labels, batch_size=2,
)
pre_process_params = {
'frame_splicing': 1,
'features': 64,
'window_size': 0.02,
'n_fft': 512,
'dither': 1e-05,
'window': 'hann',
'sample_rate': 16000,
'normalize': 'per_feature',
'window_stride': 0.01,
}
preprocessing = nemo_asr.AudioToMelSpectrogramPreprocessor(**pre_process_params)
jasper_encoder = nemo_asr.JasperEncoder(**jasper_model_definition['JasperEncoder'],)
jasper_decoder = nemo_asr.JasperDecoderForClassification(
feat_in=jasper_model_definition['JasperEncoder']['jasper'][-1]['filters'], num_classes=len(self.labels)
)
ce_loss = nemo_asr.CrossEntropyLossNM()
# DAG
audio_signal, a_sig_length, targets, targets_len = dl()
processed_signal, p_length = preprocessing(input_signal=audio_signal, length=a_sig_length)
encoded, encoded_len = jasper_encoder(audio_signal=processed_signal, length=p_length)
# logging.info(jasper_encoder)
logits = jasper_decoder(encoder_output=encoded)
loss = ce_loss(logits=logits, labels=targets,)
from nemo.collections.asr.helpers import (
process_classification_evaluation_batch,
process_classification_evaluation_epoch,
)
eval_callback = nemo.core.EvaluatorCallback(
eval_tensors=[loss, logits, targets],
user_iter_callback=lambda x, y: process_classification_evaluation_batch(x, y, top_k=[1]),
user_epochs_done_callback=process_classification_evaluation_epoch,
)
# Instantiate an optimizer to perform `train` action
self.nf.eval(callbacks=[eval_callback])
| 41.899244 | 116 | 0.639113 | 1,835 | 16,634 | 5.548229 | 0.191826 | 0.023377 | 0.028877 | 0.033396 | 0.665062 | 0.641784 | 0.641784 | 0.611924 | 0.578529 | 0.565662 | 0 | 0.018402 | 0.251894 | 16,634 | 396 | 117 | 42.005051 | 0.799743 | 0.12763 | 0 | 0.526846 | 0 | 0 | 0.08806 | 0.015841 | 0 | 0 | 0 | 0 | 0.033557 | 1 | 0.033557 | false | 0 | 0.040268 | 0 | 0.090604 | 0.010067 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33f527e6618f06c1a9c11b30446518af5b81e11c | 594 | py | Python | locale/pot/api/core/pointsets-7.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 4 | 2020-08-07T08:19:19.000Z | 2020-12-04T09:51:11.000Z | locale/pot/api/core/pointsets-7.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 19 | 2020-08-06T00:24:30.000Z | 2022-03-30T19:22:24.000Z | locale/pot/api/core/pointsets-7.py | tkoyama010/pyvista-doc-translations | 23bb813387b7f8bfe17e86c2244d5dd2243990db | [
"MIT"
] | 1 | 2021-03-09T07:50:40.000Z | 2021-03-09T07:50:40.000Z | # Label the Z position
values = grid.points[:, 2]
# Create plotting class and add the unstructured grid
plotter = pv.Plotter()
# color mesh according to z value
plotter.add_mesh(grid, scalars=values,
scalar_bar_args={'title': 'Z Position'},
show_edges=True)
# Add labels to points on the yz plane (where x == 0)
mask = grid.points[:, 0] == 0
plotter.add_point_labels(points[mask], values[mask].tolist(), font_size=24)
# add some text to the plot
plotter.add_text('Example showing plot labels')
plotter.view_vector((-6, -3, -4), (0.,-1., 0.))
plotter.show() | 31.263158 | 75 | 0.675084 | 92 | 594 | 4.26087 | 0.576087 | 0.076531 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024793 | 0.185185 | 594 | 19 | 76 | 31.263158 | 0.785124 | 0.306397 | 0 | 0 | 0 | 0 | 0.103194 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33f6489ece71e9453add661efec4fd1216138fa6 | 5,676 | py | Python | paasta_tools/native_mesos_scheduler.py | yuanxu-li/paasta | 5b04f45659293f873c65111a9d1d0909aeed4019 | [
"Apache-2.0"
] | null | null | null | paasta_tools/native_mesos_scheduler.py | yuanxu-li/paasta | 5b04f45659293f873c65111a9d1d0909aeed4019 | [
"Apache-2.0"
] | null | null | null | paasta_tools/native_mesos_scheduler.py | yuanxu-li/paasta | 5b04f45659293f873c65111a9d1d0909aeed4019 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import argparse
import sys
import time
from time import sleep
from typing import Optional
from typing import Sequence
from typing import Tuple
from paasta_tools import mesos_tools
from paasta_tools.frameworks.native_scheduler import create_driver
from paasta_tools.frameworks.native_scheduler import get_paasta_native_jobs_for_cluster
from paasta_tools.frameworks.native_scheduler import load_paasta_native_job_config
from paasta_tools.frameworks.native_scheduler import NativeScheduler
from paasta_tools.long_running_service_tools import load_service_namespace_config
from paasta_tools.long_running_service_tools import ServiceNamespaceConfig
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import decompose_job_id
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import PaastaNotConfiguredError
def parse_args(argv):
parser = argparse.ArgumentParser(description='Runs native paasta mesos scheduler.')
parser.add_argument('-d', '--soa-dir', dest="soa_dir", metavar="SOA_DIR", default=DEFAULT_SOA_DIR)
parser.add_argument('--stay-alive-seconds', dest="stay_alive_seconds", type=int, default=300)
parser.add_argument('--periodic-interval', dest="periodic_interval", type=int, default=30)
parser.add_argument('--staging-timeout', dest="staging_timeout", type=float, default=60)
return parser.parse_args(argv)
def main(argv):
args = parse_args(argv)
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
drivers = []
schedulers = []
for service, instance in get_paasta_native_jobs_for_cluster(cluster=cluster, soa_dir=args.soa_dir):
scheduler = NativeScheduler(
service_name=service,
instance_name=instance,
cluster=cluster,
staging_timeout=args.staging_timeout,
system_paasta_config=system_paasta_config,
soa_dir=args.soa_dir,
)
schedulers.append(scheduler)
driver = create_driver(
framework_name="paasta_native %s" % compose_job_id(service, instance),
scheduler=scheduler,
system_paasta_config=system_paasta_config,
)
driver.start()
drivers.append(driver)
end_time = time.time() + args.stay_alive_seconds
while time.time() < end_time:
sleep(args.periodic_interval)
for scheduler, driver in zip(schedulers, drivers):
scheduler.periodic(driver)
return schedulers
def get_app_id_and_task_uuid_from_executor_id(executor_id):
"""Parse the paasta_native executor ID and return the (app id, task uuid)"""
return executor_id.rsplit('.', 1)
def parse_service_instance_from_executor_id(task_id):
app_id, task_uuid = get_app_id_and_task_uuid_from_executor_id(task_id)
(srv_name, srv_instance, _, __) = decompose_job_id(app_id)
return srv_name, srv_instance
def paasta_native_services_running_here(hostname=None, framework_id=None):
"""See what paasta_native services are being run by a mesos-slave on this host.
:returns: A list of triples of (service, instance, port)
:param hostname: query the mesos slave on this hostname.
:param framework_id: If specified, return info only for tasks belonging to this framework id.
"""
def framework_filter(fw):
return fw['name'].startswith('paasta_native ') and (framework_id is None or fw['id'] == framework_id)
return mesos_tools.mesos_services_running_here(
framework_filter=framework_filter,
parse_service_instance_from_executor_id=parse_service_instance_from_executor_id,
hostname=hostname,
)
def get_paasta_native_services_running_here_for_nerve(
cluster: Optional[str],
soa_dir: str,
hostname: Optional[str] = None,
) -> Sequence[Tuple[str, ServiceNamespaceConfig]]:
if not cluster:
try:
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
# In the cases where there is *no* cluster or in the case
# where there isn't a Paasta configuration file at *all*, then
# there must be no native services running here, so we catch
# these custom exceptions and return [].
except (PaastaNotConfiguredError):
return []
if not system_paasta_config.get_register_native_services():
return []
# When a cluster is defined in mesos, let's iterate through paasta_native services
paasta_native_services = paasta_native_services_running_here(hostname=hostname)
nerve_list = []
for name, instance, port in paasta_native_services:
try:
job_config = load_paasta_native_job_config(
service=name,
instance=instance,
cluster=cluster,
load_deployments=False,
soa_dir=soa_dir,
)
for registration in job_config.get_registrations():
reg_service, reg_namespace, _, __ = decompose_job_id(registration)
nerve_dict = load_service_namespace_config(
service=reg_service, namespace=reg_namespace, soa_dir=soa_dir,
)
if not nerve_dict.is_in_smartstack():
continue
nerve_dict['port'] = port
nerve_list.append((registration, nerve_dict))
except KeyError:
continue # SOA configs got deleted for this app, it'll get cleaned up
return nerve_list
if __name__ == '__main__':
main(sys.argv[1:])
| 39.692308 | 109 | 0.713354 | 722 | 5,676 | 5.282548 | 0.240997 | 0.044048 | 0.047195 | 0.026219 | 0.289198 | 0.235973 | 0.144206 | 0.079706 | 0.056633 | 0.039329 | 0 | 0.002015 | 0.213002 | 5,676 | 142 | 110 | 39.971831 | 0.851802 | 0.128964 | 0 | 0.130841 | 0 | 0 | 0.043788 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065421 | false | 0 | 0.17757 | 0.009346 | 0.327103 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33f7e13a9320dff2213752e8e1f1ef5a669b102b | 1,070 | py | Python | chapter8/8_25Creating Cached Instances/8_25.py | atigerboy/PythonCookBook | e9238c7676063b5077a7645707ecc51052063d8d | [
"MIT"
] | null | null | null | chapter8/8_25Creating Cached Instances/8_25.py | atigerboy/PythonCookBook | e9238c7676063b5077a7645707ecc51052063d8d | [
"MIT"
] | null | null | null | chapter8/8_25Creating Cached Instances/8_25.py | atigerboy/PythonCookBook | e9238c7676063b5077a7645707ecc51052063d8d | [
"MIT"
] | null | null | null | '''
weakref
weakref.WeakValueDictionary
'''
import logging
a = logging.getLogger('foo')
b = logging.getLogger('bar')
print( a is b )
c = logging.getLogger('foo')
print( a is c ) #True.same name logger is same instance
# The class in question
class Spam:
def __init__(self, name):
self.name = name
# Caching support
import weakref
_spam_cache = weakref.WeakValueDictionary()
def get_spam(name):
if name not in _spam_cache:
s = Spam(name)
_spam_cache[name] = s
else:
s = _spam_cache[name]
return s
a = get_spam('foo')
b = get_spam('bar')
print( a is b )
c = get_spam('foo')
print( a is c )
# use new
class Spam:
_spam_cache = weakref.WeakValueDictionary()
def __new__(cls, name):
if name in cls._spam_cache:
return cls._spam_cache[name]
else:
self = super().__new__(cls)
cls._spam_cache[name] = self
return self
def __init__(self, name):
print('Initializing Spam')
self.name = name
s = Spam('Dave')
t = Spam('Dave')
print( s is t ) | 21.4 | 55 | 0.626168 | 152 | 1,070 | 4.171053 | 0.269737 | 0.113565 | 0.050473 | 0.0347 | 0.198738 | 0.041009 | 0 | 0 | 0 | 0 | 0 | 0 | 0.257009 | 1,070 | 50 | 56 | 21.4 | 0.797484 | 0.11215 | 0 | 0.368421 | 0 | 0 | 0.045745 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.052632 | 0 | 0.315789 | 0.157895 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33fca6b803aa07b91cf3c576d1059df9cb579535 | 3,986 | py | Python | console.py | maximilionus/pyspectator-x | 1265f1f39e7ca0534f9e6ffcd7087f2ebced3397 | [
"BSD-3-Clause"
] | 39 | 2017-02-27T15:21:21.000Z | 2021-12-31T03:23:43.000Z | console.py | maximilionus/pyspectator-x | 1265f1f39e7ca0534f9e6ffcd7087f2ebced3397 | [
"BSD-3-Clause"
] | 18 | 2017-07-09T00:16:28.000Z | 2021-12-03T21:01:38.000Z | console.py | maximilionus/pyspectator-x | 1265f1f39e7ca0534f9e6ffcd7087f2ebced3397 | [
"BSD-3-Clause"
] | 25 | 2017-03-05T07:59:34.000Z | 2021-12-15T15:22:58.000Z | import subprocess
import platform
from os import linesep
from time import sleep
from pyspectator.convert import UnitByte
CLEAR_CMD = 'cls' if platform.system() == 'Windows' else 'clear'
def clear():
subprocess.call(CLEAR_CMD, shell=True)
def print_hr(space_before=False, space_after=False):
before = linesep if space_before else ''
after = linesep if space_after else ''
print(before + '-' * 80 + after)
class Format(object):
@staticmethod
def temperature(value):
formatted_value = ''
if isinstance(value, (int, float)):
formatted_value = str(value) + '°C'
return formatted_value
@staticmethod
def byte_value(value):
formatted_value = ''
if isinstance(value, (int, float)):
value, unit = UnitByte.auto_convert(value)
value = '{:.2f}'.format(value)
unit = UnitByte.get_name_reduction(unit)
formatted_value = value + unit
return formatted_value
@staticmethod
def percent(value):
formatted_value = ''
if isinstance(value, (int, float)):
formatted_value = str(value) + '%'
return formatted_value
def main(computer):
print('Start monitoring system...')
print_hr(space_after=True)
# Show system info for ~16 seconds
for _ in range(16):
clear()
# Display general information about computer
print('OS: ' + str(computer.os))
print('Boot time: {}; Uptime: {}'.format(
computer.boot_time, computer.uptime
))
print('')
# Display CPU info
print('CPU name: ' + str(computer.processor.name))
print('Amount of CPU cores: ' + str(computer.processor.count))
print('CPU load: ' + Format.percent(computer.processor.load))
cpu_temperature = 'unknown'
if computer.processor.temperature is not None:
cpu_temperature = Format.temperature(
computer.processor.temperature
)
print('CPU temperature: ' + cpu_temperature)
print('')
# Display network info
print('Hostname: ' + str(computer.hostname))
print('Network interface: ' + str(computer.network_interface.name))
print('MAC: ' + str(computer.network_interface.hardware_address))
print('IP: {}; Mask: {}; Gateway: {}'.format(
computer.network_interface.ip_address,
computer.network_interface.subnet_mask,
computer.network_interface.default_route
))
print('Sent data: {}; Received data: {}'.format(
Format.byte_value(computer.network_interface.bytes_sent),
Format.byte_value(computer.network_interface.bytes_recv)
))
print('')
# Display virtual memory info
print('Virtual memory: use {} from {}, {}'.format(
Format.byte_value(computer.virtual_memory.available),
Format.byte_value(computer.virtual_memory.total),
Format.percent(computer.virtual_memory.used_percent)
))
print('')
# Display nonvolatile memory info
output_format1 = '{:_^16}{:_^16}{:_^16}{:_^16}{:_^16}'
output_format2 = '{: ^16}{: ^16}{: ^16}{: ^16}{: ^16}'
print(output_format1.format('Device', 'Total', 'Use', 'Type', 'Mount'))
for dev in computer.nonvolatile_memory:
output_text = output_format2.format(
dev.device,
Format.byte_value(dev.total),
Format.percent(dev.used_percent),
dev.fstype,
dev.mountpoint
)
print(output_text)
sleep(1)
print_hr(space_before=True)
print('Shutdown monitoring system...')
if __name__ == '__main__':
# Initialize computer instance
from pyspectator.computer import Computer
COMPUTER = Computer()
# Start monitoring system
with COMPUTER:
# Start console interface
main(COMPUTER)
| 33.779661 | 79 | 0.607627 | 419 | 3,986 | 5.610979 | 0.28401 | 0.053594 | 0.071459 | 0.039132 | 0.186304 | 0.142918 | 0.112293 | 0.074862 | 0.056146 | 0.056146 | 0 | 0.011088 | 0.275966 | 3,986 | 117 | 80 | 34.068376 | 0.803188 | 0.06272 | 0 | 0.215054 | 0 | 0.010753 | 0.108427 | 0.009393 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064516 | false | 0 | 0.064516 | 0 | 0.172043 | 0.258065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33feab5aa973cfa55aca4928158ae51920e92f2e | 11,500 | py | Python | scripts/bert/conversion_tools/convert_paddle_to_gluon.py | zburning/gluon-nlp | 101ce13bad3c26c802a4ff8ef47954fd2d0555d2 | [
"Apache-2.0"
] | 7 | 2019-12-05T02:49:07.000Z | 2020-08-17T01:11:59.000Z | scripts/bert/conversion_tools/convert_paddle_to_gluon.py | zburning/gluon-nlp | 101ce13bad3c26c802a4ff8ef47954fd2d0555d2 | [
"Apache-2.0"
] | null | null | null | scripts/bert/conversion_tools/convert_paddle_to_gluon.py | zburning/gluon-nlp | 101ce13bad3c26c802a4ff8ef47954fd2d0555d2 | [
"Apache-2.0"
] | 3 | 2021-03-12T04:41:00.000Z | 2021-03-12T04:41:24.000Z | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
import os
import sys
import numpy as np
import argparse
import logging
import json
import mxnet as mx
import gluonnlp as nlp
import paddle.fluid as fluid
from gluonnlp.model import BERTEncoder, BERTModel
from gluonnlp.model.bert import bert_hparams
from utils import get_hash, tf_vocab_to_gluon_vocab, load_text_vocab
parser = argparse.ArgumentParser()
parser.add_argument("--gluon_bert_model_base", default='ernie_12_768_12', type=str, help=".")
parser.add_argument("--init_pretraining_params", default='./ERNIE_stable-1.0.1/params',
type=str, help=".")
parser.add_argument("--ernie_config_path", default='./ERNIE_stable-1.0.1/ernie_config.json',
type=str, help=".")
parser.add_argument("--ernie_vocab_path", default='./ERNIE_stable-1.0.1/vocab.txt',
type=str, help=".")
parser.add_argument("--out_dir", default='./ernie_gluon_model2', type=str, help=".")
parser.add_argument("--baidu_lark_repo_dir", default='../../../../LARK', type=str,
help='path to the original baidu lark repository. '
'The repo should be at f97e3c8581e36dc1979560d62f75df862acd9585.'
'(https://github.com/PaddlePaddle/LARK.git)')
args = parser.parse_args()
sys.path = [os.path.join(args.baidu_lark_repo_dir,'ERNIE')] + sys.path
try:
from model.ernie import ErnieConfig
from finetune.classifier import create_model
except:
raise ImportError('Place clone ERNIE first')
def if_exist(var):
return os.path.exists(os.path.join(args.init_pretraining_params, var.name))
def build_weight_map():
weight_map = collections.OrderedDict({
'word_embedding': 'word_embed.0.weight',
'pos_embedding': 'encoder.position_weight',
'sent_embedding': 'token_type_embed.0.weight',
'pre_encoder_layer_norm_scale': 'encoder.layer_norm.gamma',
'pre_encoder_layer_norm_bias': 'encoder.layer_norm.beta',
})
def add_w_and_b(ernie_pre, gluon_pre):
weight_map[ernie_pre + ".w_0"] = gluon_pre + ".weight"
weight_map[ernie_pre + ".b_0"] = gluon_pre + ".bias"
def add_one_encoder_layer(layer_number):
# attention
add_w_and_b("encoder_layer_{}_multi_head_att_query_fc".format(layer_number),
"encoder.transformer_cells.{}.attention_cell.proj_query".format(layer_number))
add_w_and_b("encoder_layer_{}_multi_head_att_key_fc".format(layer_number),
"encoder.transformer_cells.{}.attention_cell.proj_key".format(layer_number))
add_w_and_b("encoder_layer_{}_multi_head_att_value_fc".format(layer_number),
"encoder.transformer_cells.{}.attention_cell.proj_value".format(layer_number))
add_w_and_b("encoder_layer_{}_multi_head_att_output_fc".format(layer_number),
"encoder.transformer_cells.{}.proj".format(layer_number))
weight_map["encoder_layer_{}_post_att_layer_norm_bias".format(layer_number)] = \
"encoder.transformer_cells.{}.layer_norm.beta".format(layer_number)
weight_map["encoder_layer_{}_post_att_layer_norm_scale".format(layer_number)] = \
"encoder.transformer_cells.{}.layer_norm.gamma".format(layer_number)
# intermediate
add_w_and_b("encoder_layer_{}_ffn_fc_0".format(layer_number),
"encoder.transformer_cells.{}.ffn.ffn_1".format(layer_number))
# output
add_w_and_b("encoder_layer_{}_ffn_fc_1".format(layer_number),
"encoder.transformer_cells.{}.ffn.ffn_2".format(layer_number))
weight_map["encoder_layer_{}_post_ffn_layer_norm_bias".format(layer_number)] = \
"encoder.transformer_cells.{}.ffn.layer_norm.beta".format(layer_number)
weight_map["encoder_layer_{}_post_ffn_layer_norm_scale".format(layer_number)] = \
"encoder.transformer_cells.{}.ffn.layer_norm.gamma".format(layer_number)
for i in range(12):
add_one_encoder_layer(i)
add_w_and_b('pooled_fc', 'pooler')
return weight_map
def extract_weights(args):
# add ERNIE to environment
print('extract weights start'.center(60, '='))
startup_prog = fluid.Program()
test_prog = fluid.Program()
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
args.max_seq_len = 512
args.use_fp16 = False
args.num_labels = 2
args.loss_scaling = 1.0
print('model config:')
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
_, _ = create_model(
args,
pyreader_name='train',
ernie_config=ernie_config)
fluid.io.load_vars(exe, args.init_pretraining_params, main_program=test_prog, predicate=if_exist)
state_dict = collections.OrderedDict()
weight_map = build_weight_map()
for ernie_name, gluon_name in weight_map.items():
fluid_tensor = fluid.global_scope().find_var(ernie_name).get_tensor()
fluid_array = np.array(fluid_tensor, dtype=np.float32)
if 'w_0' in ernie_name:
fluid_array = fluid_array.transpose()
state_dict[gluon_name] = fluid_array
print(f'{ernie_name} -> {gluon_name} {fluid_array.shape}')
print('extract weights done!'.center(60, '='))
return state_dict
def save_model(new_gluon_parameters, output_dir):
print('save model start'.center(60, '='))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# save model
# load vocab
vocab_f = open(os.path.join(output_dir, "vocab.txt"), "wt", encoding='utf-8')
with open(args.ernie_vocab_path, "rt", encoding='utf-8') as f:
for line in f:
data = line.strip().split("\t")
vocab_f.writelines(data[0] + "\n")
vocab_f.close()
vocab = tf_vocab_to_gluon_vocab(load_text_vocab(os.path.join(output_dir, "vocab.txt")))
# vocab serialization
tmp_file_path = os.path.expanduser(os.path.join(output_dir, 'tmp'))
if not os.path.exists(os.path.join(args.out_dir)):
os.makedirs(os.path.join(args.out_dir))
with open(tmp_file_path, 'w') as f:
f.write(vocab.to_json())
hash_full, hash_short = get_hash(tmp_file_path)
gluon_vocab_path = os.path.expanduser(os.path.join(output_dir, hash_short + '.vocab'))
with open(gluon_vocab_path, 'w') as f:
f.write(vocab.to_json())
logging.info('vocab file saved to %s. hash = %s', gluon_vocab_path, hash_full)
# BERT config
tf_config_names_to_gluon_config_names = {
'attention_probs_dropout_prob': 'embed_dropout',
'hidden_act': None,
'hidden_dropout_prob': 'dropout',
'hidden_size': 'units',
'initializer_range': None,
# 'intermediate_size': 'hidden_size',
'max_position_embeddings': 'max_length',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
'type_vocab_size': 'token_type_vocab_size',
'vocab_size': None
}
predefined_args = bert_hparams[args.gluon_bert_model_base]
with open(args.ernie_config_path, 'r') as f:
tf_config = json.load(f)
if 'layer_norm_eps' in tf_config: # ignore layer_norm_eps
del tf_config['layer_norm_eps']
assert len(tf_config) == len(tf_config_names_to_gluon_config_names)
for tf_name, gluon_name in tf_config_names_to_gluon_config_names.items():
if tf_name is None or gluon_name is None:
continue
if gluon_name != 'max_length':
assert tf_config[tf_name] == predefined_args[gluon_name]
encoder = BERTEncoder(attention_cell=predefined_args['attention_cell'],
num_layers=predefined_args['num_layers'], units=predefined_args['units'],
hidden_size=predefined_args['hidden_size'],
max_length=predefined_args['max_length'],
num_heads=predefined_args['num_heads'], scaled=predefined_args['scaled'],
dropout=predefined_args['dropout'],
use_residual=predefined_args['use_residual'],
activation='relu')
bert = BERTModel(encoder, len(vocab),
token_type_vocab_size=predefined_args['token_type_vocab_size'],
units=predefined_args['units'], embed_size=predefined_args['embed_size'],
embed_dropout=predefined_args['embed_dropout'],
word_embed=predefined_args['word_embed'], use_pooler=True,
use_decoder=False, use_classifier=False)
bert.initialize(init=mx.init.Normal(0.02))
ones = mx.nd.ones((2, 8))
out = bert(ones, ones, mx.nd.array([5, 6]), mx.nd.array([[1], [2]]))
params = bert._collect_params_with_prefix()
assert len(params) == len(new_gluon_parameters), "Gluon model does not match paddle model. " \
"Please fix the BERTModel hyperparameters"
# post processings for parameters:
# - handle tied decoder weight
new_gluon_parameters['decoder.3.weight'] = new_gluon_parameters['word_embed.0.weight']
# set parameter data
loaded_params = {}
for name in params:
if name == 'word_embed.0.weight':
arr = mx.nd.array(new_gluon_parameters[name][:params[name].shape[0]])
else:
arr = mx.nd.array(new_gluon_parameters[name])
try:
assert arr.shape == params[name].shape
except:
print(name)
params[name].set_data(arr)
loaded_params[name] = True
# post processings for parameters:
# - handle tied decoder weight
# - update word embedding for reserved tokens
if len(params) != len(loaded_params):
raise RuntimeError('The Gluon BERTModel comprises {} parameter arrays, '
'but {} have been extracted from the paddle model. '.format(
len(params), len(loaded_params)))
# param serialization
bert.save_parameters(tmp_file_path)
hash_full, hash_short = get_hash(tmp_file_path)
gluon_param_path = os.path.expanduser(os.path.join(args.out_dir, hash_short + '.params'))
logging.info('param saved to %s. hash = %s', gluon_param_path, hash_full)
bert.save_parameters(gluon_param_path)
mx.nd.waitall()
# save config
print('finish save vocab')
print('save model done!'.center(60, '='))
if __name__ == "__main__":
state_dict = extract_weights(args)
save_model(state_dict, args.out_dir)
| 44.921875 | 101 | 0.670783 | 1,526 | 11,500 | 4.740498 | 0.22346 | 0.031933 | 0.047 | 0.033177 | 0.285043 | 0.275366 | 0.244816 | 0.191319 | 0.138098 | 0.089301 | 0 | 0.010282 | 0.213478 | 11,500 | 255 | 102 | 45.098039 | 0.789497 | 0.100783 | 0 | 0.05641 | 0 | 0 | 0.239763 | 0.124005 | 0 | 0 | 0 | 0 | 0.020513 | 1 | 0.030769 | false | 0 | 0.082051 | 0.005128 | 0.128205 | 0.046154 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
33fece3a93d648aa9dc2fd580f1e130193907359 | 1,065 | py | Python | django-rgd-imagery/rgd_imagery/admin/processed.py | Kitware/ResonantGeoData | 6d111cbe1d57df2cd230edcf4724f6e33471f5ff | [
"Apache-2.0"
] | 3 | 2020-03-10T14:47:07.000Z | 2020-05-05T16:55:27.000Z | django-rgd-imagery/rgd_imagery/admin/processed.py | Kitware/ResonantGeoData | 6d111cbe1d57df2cd230edcf4724f6e33471f5ff | [
"Apache-2.0"
] | 13 | 2020-04-14T14:36:06.000Z | 2020-05-07T15:03:42.000Z | django-rgd-imagery/rgd_imagery/admin/processed.py | Kitware/ResonantGeoData | 6d111cbe1d57df2cd230edcf4724f6e33471f5ff | [
"Apache-2.0"
] | 1 | 2020-03-03T15:47:52.000Z | 2020-03-03T15:47:52.000Z | from django.contrib import admin
from django.contrib.gis.admin import OSMGeoAdmin
from rgd.admin.mixins import MODIFIABLE_FILTERS, TASK_EVENT_FILTERS, TASK_EVENT_READONLY, reprocess
from rgd_imagery.models import ProcessedImage, ProcessedImageGroup
class ProcessedImageAdmin(admin.StackedInline):
model = ProcessedImage
fk_name = 'group'
extra = 0
list_display = (
'pk',
'status',
'modified',
'created',
)
readonly_fields = MODIFIABLE_FILTERS + TASK_EVENT_READONLY
actions = (reprocess,)
list_filter = MODIFIABLE_FILTERS + TASK_EVENT_FILTERS
raw_id_fields = (
'source_images',
'processed_image',
'ancillary_files',
)
@admin.register(ProcessedImageGroup)
class ProcessedImageGroupAdmin(OSMGeoAdmin):
list_display = (
'pk',
'process_type',
'modified',
'created',
)
readonly_fields = MODIFIABLE_FILTERS
actions = (reprocess,)
list_filter = ('process_type',) + MODIFIABLE_FILTERS
inlines = (ProcessedImageAdmin,)
| 27.307692 | 99 | 0.687324 | 102 | 1,065 | 6.901961 | 0.490196 | 0.120739 | 0.090909 | 0.110795 | 0.224432 | 0.130682 | 0 | 0 | 0 | 0 | 0 | 0.001212 | 0.225352 | 1,065 | 38 | 100 | 28.026316 | 0.852121 | 0 | 0 | 0.294118 | 0 | 0 | 0.105164 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.558824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d5036b24bf1ba9544ffa7388314b414500db5c1b | 7,755 | py | Python | AutomatedTesting/Gem/PythonTests/largeworlds/landscape_canvas/test_GraphComponentSync.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-13T00:01:12.000Z | 2021-09-13T00:01:12.000Z | AutomatedTesting/Gem/PythonTests/largeworlds/landscape_canvas/test_GraphComponentSync.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | null | null | null | AutomatedTesting/Gem/PythonTests/largeworlds/landscape_canvas/test_GraphComponentSync.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-07-20T11:07:25.000Z | 2021-07-20T11:07:25.000Z | """
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
"""
C4705586 - Altering connections on graph nodes appropriately updates component properties
C22715182 - Components are updated when nodes are added/removed/updated
C22602072 - Graph is updated when underlying components are added/removed
C15987206 - Gradient Mixer Layers are properly setup when constructing in a graph
C21333743 - Vegetation Layer Blenders are properly setup when constructing in a graph
"""
import os
import pytest
# Bail on the test if ly_test_tools doesn't exist.
pytest.importorskip('ly_test_tools')
import ly_test_tools.environment.file_system as file_system
import ly_test_tools._internal.pytest_plugin as internal_plugin
import editor_python_test_tools.hydra_test_utils as hydra
test_directory = os.path.join(os.path.dirname(__file__), 'EditorScripts')
@pytest.mark.parametrize('project', ['AutomatedTesting'])
@pytest.mark.parametrize('level', ['tmp_level'])
@pytest.mark.usefixtures("automatic_process_killer")
@pytest.mark.parametrize("launcher_platform", ['windows_editor'])
class TestGraphComponentSync(object):
@pytest.fixture(autouse=True)
def setup_teardown(self, request, workspace, project, level):
def teardown():
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
request.addfinalizer(teardown)
file_system.delete([os.path.join(workspace.paths.engine_root(), project, "Levels", level)], True, True)
@pytest.mark.test_case_id('C4705586')
@pytest.mark.BAT
@pytest.mark.SUITE_main
def test_LandscapeCanvas_SlotConnections_UpdateComponentReferences(self, request, editor, level, launcher_platform):
# Skip test if running against Debug build
if "debug" in internal_plugin.build_directory:
pytest.skip("Does not execute against debug builds.")
cfg_args = [level]
expected_lines = [
"Landscape Canvas pane is open",
"New graph created",
"Graph registered with Landscape Canvas",
"Random Noise Gradient component Preview Entity property set to Box Shape EntityId",
"Dither Gradient Modifier component Inbound Gradient property set to Random Noise Gradient EntityId",
"Gradient Mixer component Inbound Gradient extendable property set to Dither Gradient Modifier EntityId",
"SlotConnectionsUpdateComponents: result=SUCCESS"
]
hydra.launch_and_validate_results(request, test_directory, editor,
'SlotConnections_UpdateComponentReferences.py', expected_lines,
cfg_args=cfg_args)
@pytest.mark.test_case_id('C22715182')
@pytest.mark.SUITE_periodic
def test_LandscapeCanvas_GraphUpdates_UpdateComponents(self, request, editor, level, launcher_platform):
cfg_args = [level]
expected_lines = [
'Rotation Modifier component was removed from entity',
'BushSpawner entity was deleted',
'Gradient Entity Id reference was properly updated',
'GraphUpdatesUpdateComponents: result=SUCCESS'
]
unexpected_lines = [
'Rotation Modifier component is still present on entity',
'Failed to delete BushSpawner entity',
'Gradient Entity Id was not updated properly'
]
hydra.launch_and_validate_results(request, test_directory, editor, 'GraphUpdates_UpdateComponents.py',
expected_lines, unexpected_lines=unexpected_lines,
cfg_args=cfg_args)
@pytest.mark.test_case_id('C22602072')
@pytest.mark.SUITE_periodic
def test_LandscapeCanvas_ComponentUpdates_UpdateGraph(self, request, editor, level, launcher_platform):
cfg_args = [level]
expected_lines = [
"LandscapeCanvas entity found",
"BushSpawner entity found",
"Vegetation Distribution Filter on BushSpawner entity found",
"Graph opened",
"Distribution Filter node found on graph",
"Vegetation Altitude Filter on BushSpawner entity found",
"Altitude Filter node found on graph",
"Vegetation Distribution Filter removed from BushSpawner entity",
"Distribution Filter node was removed from the graph",
"New entity successfully added as a child of the BushSpawner entity",
"Box Shape on Box entity found",
"Box Shape node found on graph",
'ComponentUpdatesUpdateGraph: result=SUCCESS'
]
unexpected_lines = [
"Distribution Filter node not found on graph",
"Distribution Filter node is still present on the graph",
"Altitude Filter node not found on graph",
"New entity added with an unexpected parent",
"Box Shape node not found on graph"
]
hydra.launch_and_validate_results(request, test_directory, editor, 'ComponentUpdates_UpdateGraph.py',
expected_lines, unexpected_lines=unexpected_lines, cfg_args=cfg_args)
@pytest.mark.test_case_id('C15987206')
@pytest.mark.SUITE_main
def test_LandscapeCanvas_GradientMixer_NodeConstruction(self, request, editor, level, launcher_platform):
"""
Verifies a Gradient Mixer can be setup in Landscape Canvas and all references are property set.
"""
# Skip test if running against Debug build
if "debug" in internal_plugin.build_directory:
pytest.skip("Does not execute against debug builds.")
cfg_args = [level]
expected_lines = [
'Landscape Canvas pane is open',
'New graph created',
'Graph registered with Landscape Canvas',
'Perlin Noise Gradient component Preview Entity property set to Box Shape EntityId',
'Gradient Mixer component Inbound Gradient extendable property set to Perlin Noise Gradient EntityId',
'Gradient Mixer component Inbound Gradient extendable property set to FastNoise Gradient EntityId',
'Configuration|Layers|[0]|Operation set to 0',
'Configuration|Layers|[1]|Operation set to 6',
'GradientMixerNodeConstruction: result=SUCCESS'
]
hydra.launch_and_validate_results(request, test_directory, editor, 'GradientMixer_NodeConstruction.py',
expected_lines, cfg_args=cfg_args)
@pytest.mark.test_case_id('C21333743')
@pytest.mark.SUITE_periodic
def test_LandscapeCanvas_LayerBlender_NodeConstruction(self, request, editor, level, launcher_platform):
"""
Verifies a Layer Blender can be setup in Landscape Canvas and all references are property set.
"""
cfg_args = [level]
expected_lines = [
'Landscape Canvas pane is open',
'New graph created',
'Graph registered with Landscape Canvas',
'Vegetation Layer Blender component Vegetation Areas[0] property set to Vegetation Layer Spawner EntityId',
'Vegetation Layer Blender component Vegetation Areas[1] property set to Vegetation Layer Blocker EntityId',
'LayerBlenderNodeConstruction: result=SUCCESS'
]
hydra.launch_and_validate_results(request, test_directory, editor, 'LayerBlender_NodeConstruction.py',
expected_lines, cfg_args=cfg_args)
| 46.437126 | 155 | 0.680206 | 850 | 7,755 | 6.055294 | 0.245882 | 0.029143 | 0.020206 | 0.017486 | 0.531183 | 0.501069 | 0.453662 | 0.411502 | 0.384107 | 0.338255 | 0 | 0.014961 | 0.250161 | 7,755 | 166 | 156 | 46.716867 | 0.870163 | 0.067311 | 0 | 0.284483 | 0 | 0 | 0.402453 | 0.060893 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060345 | false | 0 | 0.051724 | 0 | 0.12069 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d50d9e2ca679df89c4e0a95ae7adb1bc28f9bbe3 | 4,126 | py | Python | pyclustering/cluster/mbsas.py | JosephChataignon/pyclustering | bf4f51a472622292627ec8c294eb205585e50f52 | [
"BSD-3-Clause"
] | 1,013 | 2015-01-26T19:50:14.000Z | 2022-03-31T07:38:48.000Z | pyclustering/cluster/mbsas.py | peterlau0626/pyclustering | bf4f51a472622292627ec8c294eb205585e50f52 | [
"BSD-3-Clause"
] | 542 | 2015-01-20T16:44:32.000Z | 2022-01-29T14:57:20.000Z | pyclustering/cluster/mbsas.py | peterlau0626/pyclustering | bf4f51a472622292627ec8c294eb205585e50f52 | [
"BSD-3-Clause"
] | 262 | 2015-03-19T07:28:12.000Z | 2022-03-30T07:28:24.000Z | """!
@brief Cluster analysis algorithm: MBSAS (Modified Basic Sequential Algorithmic Scheme).
@details Implementation based on paper @cite book::pattern_recognition::2009.
@authors Andrei Novikov (pyclustering@yandex.ru)
@date 2014-2020
@copyright BSD-3-Clause
"""
from pyclustering.core.mbsas_wrapper import mbsas as mbsas_wrapper
from pyclustering.core.metric_wrapper import metric_wrapper
from pyclustering.cluster.bsas import bsas
class mbsas(bsas):
"""!
@brief Class represents MBSAS (Modified Basic Sequential Algorithmic Scheme).
@details Interface of MBSAS algorithm is the same as for BSAS. This algorithm performs clustering in two steps.
The first - is determination of amount of clusters. The second - is assignment of points that were not
marked as a cluster representatives to clusters.
Code example of MBSAS usage:
@code
from pyclustering.cluster.bsas import bsas_visualizer
from pyclustering.cluster.mbsas import mbsas
from pyclustering.utils import read_sample
from pyclustering.samples.definitions import SIMPLE_SAMPLES
# Read data sample from 'Simple02.data'.
sample = read_sample(SIMPLE_SAMPLES.SAMPLE_SIMPLE2)
# Prepare algorithm's parameters.
max_clusters = 3
threshold = 1.0
# Create instance of MBSAS algorithm.
mbsas_instance = mbsas(sample, max_clusters, threshold)
mbsas_instance.process()
# Get clustering results.
clusters = mbsas_instance.get_clusters()
representatives = mbsas_instance.get_representatives()
# Display results.
bsas_visualizer.show_clusters(sample, clusters, representatives)
@endcode
@see pyclustering.cluster.bsas, pyclustering.cluster.ttsas
"""
def __init__(self, data, maximum_clusters, threshold, ccore=True, **kwargs):
"""!
@brief Creates MBSAS algorithm.
@param[in] data (list): Input data that is presented as list of points (objects), each point should be represented by list or tuple.
@param[in] maximum_clusters: Maximum allowable number of clusters that can be allocated during processing.
@param[in] threshold: Threshold of dissimilarity (maximum distance) between points.
@param[in] ccore (bool): If True than DLL CCORE (C++ solution) will be used for solving.
@param[in] **kwargs: Arbitrary keyword arguments (available arguments: 'metric').
<b>Keyword Args:</b><br>
- metric (distance_metric): Metric that is used for distance calculation between two points.
"""
super().__init__(data, maximum_clusters, threshold, ccore, **kwargs)
def process(self):
"""!
@brief Performs cluster analysis in line with MBSAS algorithm.
@return (mbsas) Returns itself (MBSAS instance).
@see get_clusters()
@see get_representatives()
"""
if self._ccore is True:
self.__process_by_ccore()
else:
self.__prcess_by_python()
return self
def __process_by_ccore(self):
ccore_metric = metric_wrapper.create_instance(self._metric)
self._clusters, self._representatives = mbsas_wrapper(self._data, self._amount, self._threshold, ccore_metric.get_pointer())
def __prcess_by_python(self):
self._clusters.append([0])
self._representatives.append(self._data[0])
skipped_objects = []
for i in range(1, len(self._data)):
point = self._data[i]
index_cluster, distance = self._find_nearest_cluster(point)
if (distance > self._threshold) and (len(self._clusters) < self._amount):
self._representatives.append(point)
self._clusters.append([i])
else:
skipped_objects.append(i)
for i in skipped_objects:
point = self._data[i]
index_cluster, _ = self._find_nearest_cluster(point)
self._clusters[index_cluster].append(i)
self._update_representative(index_cluster, point) | 34.966102 | 140 | 0.676442 | 481 | 4,126 | 5.611227 | 0.345114 | 0.041497 | 0.025565 | 0.020748 | 0.129678 | 0.085217 | 0.038533 | 0 | 0 | 0 | 0 | 0.007027 | 0.241154 | 4,126 | 118 | 141 | 34.966102 | 0.854998 | 0.55841 | 0 | 0.125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.09375 | 0 | 0.28125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d50e06f01333daeea1620e158ae353617743fd95 | 2,306 | py | Python | activitysim/example/extensions/data_exchange.py | ual/DOE-repo-deliverable | 4bafdd9a702a9a6466dd32ae62f440644d735d3c | [
"BSD-3-Clause"
] | null | null | null | activitysim/example/extensions/data_exchange.py | ual/DOE-repo-deliverable | 4bafdd9a702a9a6466dd32ae62f440644d735d3c | [
"BSD-3-Clause"
] | null | null | null | activitysim/example/extensions/data_exchange.py | ual/DOE-repo-deliverable | 4bafdd9a702a9a6466dd32ae62f440644d735d3c | [
"BSD-3-Clause"
] | null | null | null | import pandas as pd
# system vars
path_to_usim = '/home/mgardner/src/bayarea_urbansim/'
usim_data_dir = 'data/'
usim_output_dir = 'output/'
usim_h5_file = '2015_09_01_bayarea_v3.h5'
path_to_asim = '/home/mgardner/src/activitysim/'
asim_data_dir = 'example/data/'
asim_h5_file = 'mtc_asim.h5'
# load both data stores
usim_store = pd.HDFStore(path_to_usim + usim_data_dir + usim_h5_file)
asim_store = pd.HDFStore(path_to_asim + asim_data_dir + asim_h5_file)
# replace asim households with usim households
usim_households = usim_store['households'].copy()
usim_store.close()
asim_col_names = asim_store['households'].columns
asim_index_name = asim_store['households'].index.name
asim_households = usim_households
asim_households.columns = asim_col_names.tolist() + \
usim_households.columns.tolist()[len(asim_col_names):]
asim_households.index.name = asim_index_name
asim_store.put('households', asim_households, format='table')
# drop asim persons with no households in the updated households table
asim_persons = asim_store['persons']
persons_mask = asim_persons.household_id.isin(asim_store['households'].index)
asim_persons = asim_persons[persons_mask]
asim_store.put('persons', asim_persons, format='table')
# replace asim land_use/taz_data with usim taz baseyear summaries
usim_taz_filename = 'baseyear_taz_summaries_2010.csv'
usim_taz_summaries = pd.read_csv(
path_to_usim + usim_output_dir + usim_taz_filename)
asim_taz_summaries = asim_store['land_use/taz_data']
assert len(asim_taz_summaries) == len(usim_taz_summaries)
asim_taz_persist = asim_taz_summaries[[ # these need to get updated somehow
'HSENROLL', 'COLLFTE', 'COLLPTE', 'TOPOLOGY', 'ZERO']] # persisting is a stop-gap!
asim_index_name = asim_taz_summaries.index.name
usim_taz_summaries.set_index('zone_id', inplace=True)
usim_taz_summaries.index.name = asim_index_name
usim_taz_summaries.rename(
columns={'GQPOP': 'gqpop', 'AREA_TYPE': 'area_type'}, inplace=True)
usim_taz_summaries.loc[:, 'hhlds'] = usim_taz_summaries['TOTHH']
usim_taz_summaries.loc[:, 'sftaz'] = usim_taz_summaries.index.values
usim_taz_summaries = pd.merge(
usim_taz_summaries, asim_taz_persist, left_index=True, right_index=True)
asim_store.put('land_use/taz_data', usim_taz_summaries, format='table')
# close up shop
asim_store.close()
| 41.927273 | 88 | 0.793148 | 350 | 2,306 | 4.842857 | 0.285714 | 0.120354 | 0.113274 | 0.030089 | 0.160472 | 0.035398 | 0 | 0 | 0 | 0 | 0 | 0.009095 | 0.094102 | 2,306 | 54 | 89 | 42.703704 | 0.802298 | 0.123591 | 0 | 0 | 0 | 0 | 0.176441 | 0.060636 | 0 | 0 | 0 | 0 | 0.02439 | 1 | 0 | false | 0 | 0.02439 | 0 | 0.02439 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d50e3f18bc5365f3dc7c9e1baf034077011723f5 | 970 | py | Python | project_watson/utils.py | alexisgourdol/project_watson | 706378a51833534b5188ca705054074ec2bc4ee2 | [
"MIT"
] | 3 | 2020-09-07T09:06:30.000Z | 2020-09-22T10:27:26.000Z | project_watson/utils.py | alexisgourdol/project_watson | 706378a51833534b5188ca705054074ec2bc4ee2 | [
"MIT"
] | 6 | 2020-09-07T09:42:54.000Z | 2020-09-14T10:23:36.000Z | project_watson/utils.py | alexisgourdol/project_watson | 706378a51833534b5188ca705054074ec2bc4ee2 | [
"MIT"
] | null | null | null | import time
################
# DECORATORS #
################
def simple_time_tracker(method):
"""Time tracker to check the fitting times when training the models."""
def timed(*args, **kw):
ts = time.time()
result = method(*args, **kw)
te = time.time()
if "log_time" in kw:
name = kw.get("log_name", method.__name__.upper())
kw["log_time"][name] = int((te - ts))
else:
print(method.__name__, round(te - ts, 2))
return result
return timed
def geocoder_here(adress, token=HERE_API_KEY):
"""
adress: 4 Av du General de Gaulle
==> {'Latitude': 48.85395, 'Longitude': 2.27758}
"""
geocoderApi = herepy.GeocoderApi(api_key=token)
res = geocoderApi.free_form(adress)
res = res.as_dict()
coords = res["Response"]["View"][0]["Result"][0]["Location"]["DisplayPosition"]
coords = {k.lower(): v for k, v in coords.items()}
return coords
| 28.529412 | 83 | 0.570103 | 122 | 970 | 4.377049 | 0.557377 | 0.041199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02332 | 0.248454 | 970 | 33 | 84 | 29.393939 | 0.709191 | 0.168041 | 0 | 0 | 0 | 0 | 0.087015 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.05 | 0 | 0.35 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d50e69416441ec59cb3e6ea30d341781ca743940 | 2,712 | py | Python | libs/environments/gym.py | eeshakumar/hythe | 52ca795c8370ddfb2aa6fb87ff3f63a85c55f913 | [
"MIT"
] | null | null | null | libs/environments/gym.py | eeshakumar/hythe | 52ca795c8370ddfb2aa6fb87ff3f63a85c55f913 | [
"MIT"
] | null | null | null | libs/environments/gym.py | eeshakumar/hythe | 52ca795c8370ddfb2aa6fb87ff3f63a85c55f913 | [
"MIT"
] | null | null | null | import gym
from gym.envs.registration import register
from bark_ml.environments.single_agent_runtime import SingleAgentRuntime
from bark_ml.environments.blueprints.highway.highway import DiscreteHighwayBlueprint
from bark_ml.environments.blueprints.merging.merging import DiscreteMergingBlueprint
from bark_ml.environments.blueprints.intersection.intersection import DiscreteIntersectionBlueprint
from hythe.libs.blueprint.blueprint import HyHighwayDiscreteBlueprint
class HyDiscreteHighway(SingleAgentRuntime, gym.Env):
def __init__(self, params=None, num_scenarios=25, random_seed=0, viewer=True,
behavior=None, evaluator=None, observer=None, scenario_generation=None,
map_filename=None, blueprint=None, render=True):
if blueprint is None:
self._blueprint = HyHighwayDiscreteBlueprint(params=params,
map_filename=map_filename,
num_scenarios=num_scenarios,
random_seed=random_seed,
behavior=behavior,
evaluator=evaluator,
observer=observer,
scenario_generation=scenario_generation,
viewer=viewer)
else:
self._blueprint = blueprint
SingleAgentRuntime.__init__(self, blueprint=self._blueprint, render=render)
@property
def blueprint(self):
return self._blueprint
class HyDiscreteMerging(SingleAgentRuntime, gym.Wrapper):
def __init__(self, params):
self._blueprint = DiscreteMergingBlueprint(params)
SingleAgentRuntime.__init__(self, blueprint=self._blueprint, render=True)
class HyDiscreteIntersection(SingleAgentRuntime, gym.Wrapper):
def __init__(self, params):
self._blueprint = DiscreteIntersectionBlueprint(params)
SingleAgentRuntime.__init__(self, blueprint=self._blueprint, render=True)
class GymSingleAgentRuntime(SingleAgentRuntime, gym.Wrapper):
def __init__(self, *args, **kwargs):
SingleAgentRuntime.__init__(self, *args, **kwargs)
# register gym envs
register(
id="hy-highway-v0",
entry_point="hythe.modules.environments.gym:HyDiscreteHighway"
)
register(
id="hy-merging-v0",
entry_point="hythe.modules.environments.gym:HyDiscreteMerging"
)
register(
id="hy-intersection-v0",
entry_point="hythe.modules.environments.gym:HyDiscreteIntersection"
)
| 38.197183 | 99 | 0.643068 | 233 | 2,712 | 7.223176 | 0.27897 | 0.084967 | 0.023767 | 0.052288 | 0.332739 | 0.275698 | 0.252525 | 0.150921 | 0.150921 | 0.081996 | 0 | 0.003093 | 0.284661 | 2,712 | 70 | 100 | 38.742857 | 0.864433 | 0.006268 | 0 | 0.14 | 0 | 0 | 0.071667 | 0.055329 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.14 | 0.02 | 0.34 | 0.3 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d50f0b1b425f7055c0a2f622927cba3c742a11b0 | 805 | py | Python | openapi_core/schema/security_schemes/models.py | eyadgaran/openapi-core | ff4a6c81eeda0e2274aa9dc03597779c141e5728 | [
"BSD-3-Clause"
] | null | null | null | openapi_core/schema/security_schemes/models.py | eyadgaran/openapi-core | ff4a6c81eeda0e2274aa9dc03597779c141e5728 | [
"BSD-3-Clause"
] | null | null | null | openapi_core/schema/security_schemes/models.py | eyadgaran/openapi-core | ff4a6c81eeda0e2274aa9dc03597779c141e5728 | [
"BSD-3-Clause"
] | 1 | 2022-01-19T21:23:56.000Z | 2022-01-19T21:23:56.000Z | """OpenAPI core security schemes models module"""
from openapi_core.schema.security_schemes.enums import (
SecuritySchemeType, ApiKeyLocation, HttpAuthScheme,
)
class SecurityScheme(object):
"""Represents an OpenAPI Security Scheme."""
def __init__(
self, scheme_type, description=None, name=None, apikey_in=None,
scheme=None, bearer_format=None, flows=None,
open_id_connect_url=None,
):
self.type = SecuritySchemeType(scheme_type)
self.description = description
self.name = name
self.apikey_in = apikey_in and ApiKeyLocation(apikey_in)
self.scheme = scheme and HttpAuthScheme(scheme)
self.bearer_format = bearer_format
self.flows = flows
self.open_id_connect_url = open_id_connect_url
| 35 | 75 | 0.695652 | 92 | 805 | 5.826087 | 0.391304 | 0.059701 | 0.072761 | 0.089552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.223602 | 805 | 22 | 76 | 36.590909 | 0.8576 | 0.101863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d51062e86ee2d313db17fee23177b0806f0fa5c7 | 895 | py | Python | tests/test_formulas.py | mnishida/refractiveindex.info-Pandas | 05317271ee9e550287887536ee03485164949ba2 | [
"MIT"
] | 1 | 2021-08-04T05:09:16.000Z | 2021-08-04T05:09:16.000Z | tests/test_formulas.py | mnishida/refractiveindex.info-Pandas | 05317271ee9e550287887536ee03485164949ba2 | [
"MIT"
] | 4 | 2021-08-16T07:15:10.000Z | 2021-09-04T08:47:47.000Z | tests/test_formulas.py | mnishida/refractiveindex.info-Pandas | 05317271ee9e550287887536ee03485164949ba2 | [
"MIT"
] | 1 | 2021-08-04T05:09:13.000Z | 2021-08-04T05:09:13.000Z | import numpy as np
import numpy.testing as npt
import riip
from riip.formulas import formulas_cython_dict, formulas_numpy_dict
def test_cython_formulas():
rid = riip.RiiDataFrame()
fbps = {
1: ("MgAl2O4", "Tropf"),
2: ("Ar", "Borzsonyi"),
3: ("methanol", "Moutzouris"),
4: ("BaB2O4", "Eimerl-o"),
5: ("SiC", "Shaffer"),
6: ("Ar", "Bideau-Mehu"),
7: ("Si", "Edwards"),
8: ("AgBr", "Schröter"),
9: ("urea", "Rosker-e"),
21: ("Ag", "Rakic-DLF"),
22: ("Cu", "Rakic-BBF"),
}
for f, (b, p) in fbps.items():
m = rid.material({"book": b, "page": p})
wls = np.linspace(m.wl_max, m.wl_min)
ws = 2 * np.pi / wls
f_c = [formulas_cython_dict[f](w, m.cs) for w in ws]
f_n = [formulas_numpy_dict[f](wl, m.cs) for wl in wls]
npt.assert_allclose(f_c, f_n)
| 28.870968 | 67 | 0.52514 | 128 | 895 | 3.539063 | 0.585938 | 0.048565 | 0.07947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028081 | 0.283799 | 895 | 30 | 68 | 29.833333 | 0.678627 | 0 | 0 | 0 | 0 | 0 | 0.157542 | 0 | 0 | 0 | 0 | 0 | 0.038462 | 1 | 0.038462 | false | 0 | 0.153846 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d108170bdbd66d850dc91b904933812c15cd5c8 | 6,823 | py | Python | checkers/gallery/checker.py | vient/proctf-2019 | b7b954fff2396a7a7a83c90ec55d75bce4a3485c | [
"MIT"
] | 2 | 2020-04-22T19:36:16.000Z | 2020-09-16T07:45:54.000Z | checkers/gallery/checker.py | vient/proctf-2019 | b7b954fff2396a7a7a83c90ec55d75bce4a3485c | [
"MIT"
] | 3 | 2021-03-31T19:21:51.000Z | 2021-06-08T20:31:48.000Z | checkers/gallery/checker.py | leetchicken/proctf-2019 | b7b954fff2396a7a7a83c90ec55d75bce4a3485c | [
"MIT"
] | 3 | 2019-10-26T00:25:03.000Z | 2019-11-23T21:10:10.000Z | #!/usr/bin/env python3
import sys
import requests
import hashlib
import random
import re
import functools
import time
import json
import pathlib
import base64
import traceback
import PIL.Image
from io import BytesIO
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA256
import paintings
from user_agents import USER_AGENTS
OK, CORRUPT, MUMBLE, DOWN, CHECKER_ERROR = 101, 102, 103, 104, 110
PORT = 80
TIMEOUT = 3
SCRIPT_PATH = pathlib.Path(__file__).parent
signer = PKCS1_v1_5.new(RSA.importKey(open("private.pem").read()))
def get_team_num(host):
m = re.search(r"\d+\.\d+\.(\d+)\.\d+", host)
if not m:
return None
return m.group(1)
def parse_jpg_bytes(body):
try:
painting = PIL.Image.open(BytesIO(body))
except Exception as e:
return None
return painting
def create_session():
s = requests.Session()
# add timeouts
s.get = functools.partial(s.get, timeout=TIMEOUT)
s.post = functools.partial(s.post, timeout=TIMEOUT)
s.put = functools.partial(s.put, timeout=TIMEOUT)
s.headers["User-Agent"] = random.choice(USER_AGENTS)
return s
def call_get_paintings_api(session, host):
url = "http://%s:%d/paintings" % (host, PORT)
ans = session.get(url)
if ans.status_code != 200:
return None
ans_obj = ans.json()
if type(ans_obj) is not list or any(type(o) != str for o in ans_obj):
return None
return ans_obj
def call_put_painting_api(session, host, reward, painting_bytes):
url = "http://%s:%d/painting?reward=%s" % (host, PORT, reward)
h = SHA256.new()
h.update(host.encode())
h.update(painting_bytes)
sign = signer.sign(h)
ans = session.put(url, data=painting_bytes, headers={"X_SIGN": base64.b64encode(sign)})
if ans.status_code != 200:
return None
ans_obj = ans.json()
if type(ans_obj) is not dict:
return None
return ans_obj
def call_post_replica_api(session, host, painting_id, replica_bytes):
url = "http://%s:%d/replica?id=%s" % (host, PORT, painting_id)
ans = session.post(url, data=replica_bytes)
if ans.status_code != 200:
return None
ans_obj = ans.json()
if type(ans_obj) is not dict:
return None
return ans_obj
def call_get_preview_api(session, host, painting_id):
url = "http://%s:%d/preview?id=%s" % (host, PORT, painting_id)
ans = session.get(url)
if ans.status_code != 200:
return None
ans_bytes = ans.content
return ans_bytes
def verdict(exit_code, public="", private=""):
if public:
print(public)
if private:
print(private, file=sys.stderr)
sys.exit(exit_code)
def info():
verdict(OK, "vulns: 1")
def check(host):
if random.randint(0,1) == 0:
print("Lucky! skipping check", file=sys.stderr)
verdict(OK)
start = time.time()
s = create_session()
ids = call_get_paintings_api(s, host)
elapsed = time.time() - start
print(f"Done call_get_paintings_api in {elapsed}", file=sys.stderr)
if ids is None:
verdict(MUMBLE, "Can't get valid paintings list", f"Can't get valid paintings list")
verdict(OK)
def put(host, flag_id, flag, vuln):
s = create_session()
file_name, painting_bytes = paintings.next_painting(get_team_num(host))
start = time.time()
ans = call_put_painting_api(s, host, flag, painting_bytes)
elapsed = time.time() - start
print(f"Done call_put_painting_api in {elapsed}", file=sys.stderr)
if ans is None:
verdict(MUMBLE, "Malformed response", "Malformed response: can't parse as expected")
error = ans.get("error")
if error is not None:
verdict(MUMBLE, "Error from service", f"Error from service: {error}")
painting_id = ans.get("id")
if painting_id is None:
verdict(MUMBLE, "Painting id not found", "Painting id not found")
flag_id = base64.b64encode(json.dumps({"file": file_name, "id": painting_id}).encode()).decode()
verdict(OK, flag_id)
def get(host, flag_id, flag, vuln):
obj = json.loads(base64.b64decode(flag_id))
file_name = obj["file"]
painting_id = obj["id"]
start = time.time()
s = create_session()
ids = call_get_paintings_api(s, host)
elapsed = time.time() - start
print(f"Done call_get_paintings_api in {elapsed}", file=sys.stderr)
if ids is None or painting_id not in ids:
verdict(MUMBLE, "Can't find painting id in list", "Can't find painting id in list")
start = time.time()
preview_bytes = call_get_preview_api(s, host, painting_id)
elapsed = time.time() - start
print(f"Done call_get_preview_api in {elapsed}", file=sys.stderr)
if preview_bytes is None:
verdict(MUMBLE, "Can't get preview", "Can't get preview")
preview_img = parse_jpg_bytes(preview_bytes)
if preview_img is None:
verdict(MUMBLE, "Can't parse preview", "Can't parse preview")
# TODO check preview image
team_num = get_team_num(host)
methods = [paintings.get_replica, paintings.get_copy, paintings.get_random_painting]
random.shuffle(methods)
for m in methods:
name, painting_bytes = m(team_num, file_name)
start = time.time()
ans = call_post_replica_api(s, host, painting_id, painting_bytes)
elapsed = time.time() - start
print(f"{file_name}: sent file {name} in {elapsed}: ans {ans}", file=sys.stderr)
if ans is not None and ans.get("reward") is not None:
reward = ans.get("reward")
if reward != flag:
verdict(CORRUPT, "Got invalid reward", f"Got invalid reward: {reward}")
else:
verdict(OK)
verdict(CORRUPT, "Can't get valid reward", "Can't get valid reward")
def main(args):
CMD_MAPPING = {
"info": (info, 0),
"check": (check, 1),
"put": (put, 4),
"get": (get, 4),
}
if not args:
verdict(CHECKER_ERROR, "No args", "No args")
cmd, args = args[0], args[1:]
if cmd not in CMD_MAPPING:
verdict(CHECKER_ERROR, "Checker error", "Wrong command %s" % cmd)
handler, args_count = CMD_MAPPING[cmd]
if len(args) != args_count:
verdict(CHECKER_ERROR, "Checker error", "Wrong args count for %s" % cmd)
try:
handler(*args)
except requests.RequestException as E:
verdict(DOWN, "Connect error", "Connect error: %s" % E)
except json.decoder.JSONDecodeError as E:
verdict(MUMBLE, "Json decode error", "Json decode error: %s" % traceback.format_exc())
except Exception as E:
verdict(CHECKER_ERROR, "Checker error", "Checker error: %s" % traceback.format_exc())
verdict(CHECKER_ERROR, "Checker error", "No verdict")
if __name__ == "__main__":
main(args=sys.argv[1:])
| 28.668067 | 100 | 0.651913 | 993 | 6,823 | 4.331319 | 0.189325 | 0.034876 | 0.021158 | 0.022088 | 0.331086 | 0.253429 | 0.215531 | 0.174378 | 0.142292 | 0.13369 | 0 | 0.013016 | 0.223069 | 6,823 | 237 | 101 | 28.78903 | 0.79834 | 0.008647 | 0 | 0.252809 | 0 | 0 | 0.159 | 0.009614 | 0 | 0 | 0 | 0.004219 | 0 | 1 | 0.073034 | false | 0 | 0.106742 | 0 | 0.269663 | 0.044944 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d10fc4b27b6136c47a7bdee2bd28cad40658f1e | 1,929 | py | Python | oolearning/model_wrappers/RidgeRegressor.py | shane-kercheval/oo-learning | 9e3ebe5f7460179e23f6801bc01f1114bb896dea | [
"MIT"
] | 1 | 2020-10-09T09:11:46.000Z | 2020-10-09T09:11:46.000Z | oolearning/model_wrappers/RidgeRegressor.py | shane-kercheval/oo-learning | 9e3ebe5f7460179e23f6801bc01f1114bb896dea | [
"MIT"
] | 48 | 2018-04-09T01:30:31.000Z | 2021-06-13T03:25:59.000Z | oolearning/model_wrappers/RidgeRegressor.py | shane-kercheval/oo-learning | 9e3ebe5f7460179e23f6801bc01f1114bb896dea | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.linear_model import Ridge
from oolearning.model_wrappers.HyperParamsBase import HyperParamsBase
from oolearning.model_wrappers.ModelExceptions import MissingValueError
from oolearning.model_wrappers.ModelWrapperBase import ModelWrapperBase
from oolearning.model_wrappers.SklearnPredictMixin import SklearnPredictArrayMixin
class RidgeRegressorHP(HyperParamsBase):
"""
See http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.Ridge.html for more information
on tuning parameters
"""
# noinspection SpellCheckingInspection
def __init__(self, alpha: float = 0.5, solver: str = 'cholesky'):
super().__init__()
self._params_dict = dict(alpha=alpha, solver=solver)
class RidgeRegressor(SklearnPredictArrayMixin, ModelWrapperBase):
"""
fits Linear Regression model on the data
"""
def __init__(self, fit_intercept: bool = True, seed: int = 42):
super().__init__()
self._fit_intercept = fit_intercept
self._seed = seed
@property
def feature_importance(self):
return None
def _train(self,
data_x: pd.DataFrame,
data_y: np.ndarray,
hyper_params: RidgeRegressorHP = None) -> object:
assert hyper_params is not None
assert isinstance(hyper_params, RidgeRegressorHP)
param_dict = hyper_params.params_dict
# Regression can't handle missing values
if data_x.isnull().sum().sum() > 0:
raise MissingValueError()
if any(np.isnan(data_y)):
raise MissingValueError()
ridge_reg = Ridge(alpha=param_dict['alpha'],
solver=param_dict['solver'],
fit_intercept=self._fit_intercept,
random_state=self._seed)
ridge_reg.fit(data_x, data_y)
return ridge_reg
| 32.694915 | 109 | 0.67548 | 210 | 1,929 | 5.952381 | 0.447619 | 0.048 | 0.0608 | 0.0864 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003434 | 0.245205 | 1,929 | 58 | 110 | 33.258621 | 0.855082 | 0.12649 | 0 | 0.111111 | 0 | 0 | 0.011522 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 1 | 0.111111 | false | 0 | 0.222222 | 0.027778 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d164d92f8df106a33e5db8f4a80c18b71d6631d | 692 | py | Python | Problems/Binary Tree/107. Binary Tree Level Order Traversal II.py | BYJRK/LeetCode-Solutions | 008467e1717309066a519acb8623d2f84071b64a | [
"MIT"
] | null | null | null | Problems/Binary Tree/107. Binary Tree Level Order Traversal II.py | BYJRK/LeetCode-Solutions | 008467e1717309066a519acb8623d2f84071b64a | [
"MIT"
] | null | null | null | Problems/Binary Tree/107. Binary Tree Level Order Traversal II.py | BYJRK/LeetCode-Solutions | 008467e1717309066a519acb8623d2f84071b64a | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/binary-tree-level-order-traversal-ii/
from typing import List
from collections import deque
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:
res = deque()
def dfs(node, level=0):
if node == None:
return
while len(res) <= level:
res.appendleft([])
res[-(level+1)].append(node.val)
dfs(node.left, level+1)
dfs(node.right, level+1)
dfs(root)
return res
| 23.862069 | 69 | 0.559249 | 85 | 692 | 4.505882 | 0.470588 | 0.05483 | 0.046997 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010616 | 0.319364 | 692 | 28 | 70 | 24.714286 | 0.802548 | 0.096821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15 | false | 0 | 0.1 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d19cd709d43bcc2b8f554495c58d5b7a1afce89 | 5,453 | py | Python | rl_1.py | canbefine/CO2-heat-pump-system-optimization | 253b54b5bdfafbfde4776f0226e79e290fce43a9 | [
"MIT"
] | 1 | 2021-11-15T17:32:11.000Z | 2021-11-15T17:32:11.000Z | rl_1.py | canbefine/CO2-heat-pump-system-optimization | 253b54b5bdfafbfde4776f0226e79e290fce43a9 | [
"MIT"
] | null | null | null | rl_1.py | canbefine/CO2-heat-pump-system-optimization | 253b54b5bdfafbfde4776f0226e79e290fce43a9 | [
"MIT"
] | 1 | 2021-07-03T13:19:12.000Z | 2021-07-03T13:19:12.000Z | """
Note: This is a updated version from my previous code,
for the target network, I use moving average to soft replace target parameters instead using assign function.
By doing this, it has 20% speed up on my machine (CPU).
Deep Deterministic Policy Gradient (DDPG), Reinforcement Learning.
DDPG is Actor Critic based algorithm.
Pendulum example.
View more on my tutorial page: https://morvanzhou.github.io/tutorials/
Using:
tensorflow 1.0
gym 0.8.0
"""
"""
Modified by Keven
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
##################### hyper parameters ####################
MAX_EPISODES = 500
MAX_EP_STEPS = 200
LR_A = 0.001 # learning rate for actor
LR_C = 0.002 # learning rate for critic
GAMMA = 0.9 # reward discount
TAU = 0.01 # soft replacement
MEMORY_CAPACITY = 500
BATCH_SIZE = 32
RENDER = False
# ENV_NAME = 'Pendulum-v0'
############################### DDPG ####################################
class RL_DDPG(object):
def __init__(self, a_dim, s_dim, a_bound,):
self.memory = np.zeros((MEMORY_CAPACITY, s_dim * 2 + a_dim + 1), dtype=np.float32)
self.pointer = 0
self.sess = tf.Session()
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound,
self.S = tf.placeholder(tf.float32, [None, s_dim], 's')
self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')
self.R = tf.placeholder(tf.float32, [None, 1], 'r')
self.a = self._build_a(self.S,)
q = self._build_c(self.S, self.a, )
self.a_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Actor')
self.c_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='Critic')
ema = tf.train.ExponentialMovingAverage(decay=1 - TAU) # soft replacement
def ema_getter(getter, name, *args, **kwargs):
return ema.average(getter(name, *args, **kwargs))
target_update = [ema.apply(self.a_params), ema.apply(self.c_params)] # soft update operation
a_ = self._build_a(self.S_, reuse=True, custom_getter=ema_getter) # replaced target parameters
q_ = self._build_c(self.S_, a_, reuse=True, custom_getter=ema_getter)
a_loss = - tf.reduce_mean(q) # maximize the q
self.atrain = tf.train.AdamOptimizer(LR_A).minimize(a_loss, var_list=self.a_params)
with tf.control_dependencies(target_update): # soft replacement happened at here
q_target = self.R + GAMMA * q_
td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
self.ctrain = tf.train.AdamOptimizer(LR_C).minimize(td_error, var_list=self.c_params)
self.sess.run(tf.global_variables_initializer())
def choose_action(self, s):
rs = self.sess.run(self.a, {self.S: s[np.newaxis, :]})[0]
# rs1 = self.sess.run(self.a, {self.S: s[np.newaxis, :]})
# print(rs1)
return rs
def learn(self):
indices = np.random.choice(MEMORY_CAPACITY, size=BATCH_SIZE)
bt = self.memory[indices, :]
bs = bt[:, :self.s_dim]
ba = bt[:, self.s_dim: self.s_dim + self.a_dim]
br = bt[:, -self.s_dim - 1: -self.s_dim]
bs_ = bt[:, -self.s_dim:]
self.sess.run(self.atrain, {self.S: bs})
self.sess.run(self.ctrain, {self.S: bs, self.a: ba, self.R: br, self.S_: bs_})
# print(self.sess.run(self.a_params[0]))
# print(self.sess.run(self.a_params[1]))
# print(self.sess.run(self.a_params[2]))
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, a, [r], s_))
index = self.pointer % MEMORY_CAPACITY # replace the old memory with new memory
self.memory[index, :] = transition
self.pointer += 1
def _build_a(self, s, reuse=None, custom_getter=None):
trainable = True if reuse is None else False
with tf.variable_scope('Actor', reuse=reuse, custom_getter=custom_getter):
net = tf.layers.dense(s, 30, activation=tf.nn.relu, name='l1', trainable=trainable)
a = tf.layers.dense(net, self.a_dim, activation=tf.nn.softmax, name='a', trainable=trainable)
# scaled_a_ = tf.add(tf.multiply(a, tf.subtract(self.a_bound[1], self.a_bound[0])), self.a_bound[0])
scaled_a_ = tf.add(tf.multiply(a, tf.subtract(self.a_bound[1], self.a_bound[0])), self.a_bound[0])
# print(scaled_a_)
return scaled_a_
def _build_c(self, s, a, reuse=None, custom_getter=None):
trainable = True if reuse is None else False
with tf.variable_scope('Critic', reuse=reuse, custom_getter=custom_getter):
n_l1 = 30
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
return tf.layers.dense(net, 1, trainable=trainable) # Q(s,a)
def save(self):
saver = tf.train.Saver()
saver.save(self.sess, './params', write_meta_graph=False)
print(saver)
def restore(self):
saver = tf.train.Saver()
saver.restore(self.sess, './params')
| 42.271318 | 113 | 0.61269 | 792 | 5,453 | 4.039141 | 0.275253 | 0.035949 | 0.020006 | 0.032823 | 0.331354 | 0.293842 | 0.203501 | 0.168178 | 0.168178 | 0.133792 | 0 | 0.019509 | 0.238584 | 5,453 | 128 | 114 | 42.601563 | 0.750963 | 0.191271 | 0 | 0.051282 | 0 | 0 | 0.01334 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.115385 | false | 0 | 0.025641 | 0.012821 | 0.205128 | 0.012821 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d1ac581b59a0c2d9ffa10dc5dad9768f58e2cab | 3,376 | py | Python | packages/grid/apps/domain/src/main/core/infrastructure/providers/azure/utils.py | exityan/PySyft | 35166c487a5be57f9ad28929ed88a8ba6bdd5aeb | [
"Apache-2.0"
] | 425 | 2019-09-22T06:14:53.000Z | 2022-03-30T02:17:34.000Z | packages/grid/apps/domain/src/main/core/infrastructure/providers/azure/utils.py | Metrix1010/PySyft | 6477f64b63dc285059c3766deab3993653cead2e | [
"Apache-2.0"
] | 352 | 2019-09-17T15:32:51.000Z | 2022-03-12T01:07:35.000Z | packages/grid/apps/domain/src/main/core/infrastructure/providers/azure/utils.py | Metrix1010/PySyft | 6477f64b63dc285059c3766deab3993653cead2e | [
"Apache-2.0"
] | 208 | 2019-09-18T18:32:10.000Z | 2022-03-24T01:10:11.000Z | # stdlib
import json
import subprocess
# third party
from PyInquirer import prompt
import click
# grid relative
from ...utils import Config
from ...utils import styles
class AZ:
def locations_list(self):
proc = subprocess.Popen(
"az account list-locations",
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True,
)
locations = json.loads(proc.stdout.read())
return [location["name"] for location in locations]
def get_all_instance_types(location=None):
proc = subprocess.Popen(
f"az vm list-sizes --location {location}",
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True,
)
machines = json.loads(proc.stdout.read())
all_instances = {
"all_instances": [
f"Name: {machine['name']} | CPUs: {machine['numberOfCores']} | Mem: {int(machine['memoryInMb']/1024)} "
for machine in machines
]
}
return all_instances
def get_azure_config() -> Config:
"""Getting the configration required for deployment on AZURE.
Returns:
Config: Simple Config with the user inputs
"""
az = AZ()
subscription_id = prompt(
[
{
"type": "password",
"name": "subscription_id",
"message": "Please provide your subscription_id",
"default": "00000000-0000-0000-0000-000000000000",
}
],
style=styles.second,
)["subscription_id"]
client_id = prompt(
[
{
"type": "password",
"name": "client_id",
"message": "Please provide your client_id",
"default": "00000000-0000-0000-0000-000000000000",
}
],
style=styles.second,
)["client_id"]
client_secret = prompt(
[
{
"type": "password",
"name": "client_secret",
"message": "Please provide your client_secret",
"default": "XXXX-XXXX-XXX-XXX-XXX",
}
],
style=styles.second,
)["client_secret"]
tenant_id = prompt(
[
{
"type": "password",
"name": "tenant_id",
"message": "Please provide your tenant_id",
"default": "00000000-0000-0000-0000-000000000000",
}
],
style=styles.second,
)["tenant_id"]
location = prompt(
[
{
"type": "list",
"name": "location",
"message": "Please select your desired location",
"choices": az.locations_list(),
}
],
style=styles.second,
)["location"]
vm_size = prompt(
[
{
"type": "list",
"name": "VMSize",
"message": "Please select your desired VM Size",
"choices": get_all_instance_types(location=location)["all_instances"],
}
],
style=styles.second,
)["VMSize"].split(" ")[1]
return Config(
location=location,
subscription_id=subscription_id,
client_id=client_id,
client_secret=client_secret,
tenant_id=tenant_id,
vm_size=vm_size,
)
| 25.575758 | 115 | 0.512737 | 307 | 3,376 | 5.498371 | 0.299674 | 0.049763 | 0.060427 | 0.052133 | 0.387441 | 0.162322 | 0.162322 | 0.162322 | 0.103081 | 0.103081 | 0 | 0.047329 | 0.367891 | 3,376 | 131 | 116 | 25.770992 | 0.743674 | 0.044135 | 0 | 0.271028 | 0 | 0.009346 | 0.252651 | 0.05864 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028037 | false | 0.037383 | 0.056075 | 0 | 0.121495 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d1ad5700c7e709bb9104f588aba454d223ca272 | 8,930 | py | Python | sparv/core/preload.py | spraakbanken/sparv-pipeline | 7293d42c577afdaf01ce8a936743f8b83d6eb962 | [
"MIT"
] | 17 | 2018-09-21T07:01:45.000Z | 2022-02-24T23:26:49.000Z | sparv/core/preload.py | spraakbanken/sparv-pipeline | 7293d42c577afdaf01ce8a936743f8b83d6eb962 | [
"MIT"
] | 146 | 2018-11-13T19:13:25.000Z | 2022-03-31T09:57:56.000Z | sparv/core/preload.py | spraakbanken/sparv-pipeline | 7293d42c577afdaf01ce8a936743f8b83d6eb962 | [
"MIT"
] | 5 | 2019-02-14T00:50:38.000Z | 2021-03-29T15:37:41.000Z | """Sparv preloader."""
import logging
import multiprocessing
import pickle
import socket
import struct
import time
from contextlib import contextmanager
from pathlib import Path
from typing import Dict, Iterator
from rich.logging import RichHandler
from sparv.core import config, log_handler
from sparv.core.console import console
from sparv.core.snake_utils import SnakeStorage
from sparv.util import SparvErrorMessage
INFO = "INFO"
STATUS = "STATUS"
STOP = "STOP"
PING = "PING"
PONG = "PONG"
# Set up logging
log = logging.getLogger("sparv_preloader")
log.setLevel(logging.INFO)
handler = RichHandler(show_path=False, rich_tracebacks=True, console=console)
handler.setFormatter(logging.Formatter("%(message)s", datefmt=log_handler.DATE_FORMAT))
log.addHandler(handler)
class Preloader:
"""Class representing a preloader."""
def __init__(self, function, target, preloader, params, cleanup, shared):
self.function = function
self.target = target
self.preloader = preloader
self.params = params
self.cleanup = cleanup
self.shared = shared
self.preloaded = None
def connect_to_socket(socket_path: str, timeout: bool = False) -> socket.socket:
"""Connect to a socket and return it."""
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if timeout:
s.settimeout(1)
s.connect(socket_path)
s.settimeout(None)
return s
@contextmanager
def socketcontext(socket_path: str) -> Iterator[socket.socket]:
"""Context manager for socket."""
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(socket_path)
try:
yield s
finally:
s.close()
def receive_data(sock):
"""Receive pickled data from socket and unpickle."""
# Get data length
buf_length = recvall(sock, 4)
if not buf_length or len(buf_length) < 4:
return None
length, = struct.unpack(">I", buf_length)
# Get data
data = recvall(sock, length)
# Unpickle data
data = pickle.loads(data)
return data
def send_data(sock, data):
"""Send pickled data over socket."""
datap = pickle.dumps(data)
sock.sendall(struct.pack(">I", len(datap)))
sock.sendall(datap)
def get_preloader_info(socket_path):
"""Get information about preloaded modules."""
with socketcontext(socket_path) as sock:
send_data(sock, INFO)
response = receive_data(sock)
return response
def get_preloader_status(socket_path):
"""Get preloader status."""
with socketcontext(socket_path) as sock:
send_data(sock, STATUS)
response = receive_data(sock)
return response
def stop(socket_path):
"""Send stop signal to Sparv preloader."""
try:
with socketcontext(socket_path) as sock:
send_data(sock, STOP)
return True
except ConnectionRefusedError:
return False
def recvall(sock, size: int):
"""Receive data of a specific size from socket.
If 'size' number of bytes are not received, None is returned.
"""
buf = b""
while size:
newbuf = sock.recv(size)
if not newbuf:
return None
buf += newbuf
size -= len(newbuf)
return buf
def handle(client_sock, annotators: Dict[str, Preloader]):
"""Handle request and execute preloaded function."""
# Get data
data = receive_data(client_sock)
if data is None:
return
# Check if we got a command instead of annotator info
if isinstance(data, str):
if data == STOP:
return False
elif data == INFO:
send_data(client_sock, {k: v.params for k, v in annotators.items()})
return
elif data == PING:
try:
send_data(client_sock, PONG)
except BrokenPipeError:
return
data = receive_data(client_sock)
log.info("Running %s...", data[0])
annotator = annotators[data[0]]
# Set target parameter to preloaded data
data[1][annotator.target] = annotator.preloaded
# Set up logging over socket
log_handler.setup_logging(data[2]["log_server"],
log_level=data[2]["log_level"],
log_file_level=data[2]["log_file_level"])
# Call annotator function
try:
annotator.function(**data[1])
except SparvErrorMessage as e:
send_data(client_sock, e)
return
except Exception as e:
console.print_exception()
send_data(client_sock, e)
return
# Clear log handlers
logger = logging.getLogger("sparv")
logger.handlers.clear()
log.info("Done")
send_data(client_sock, True)
# Run cleanup if available
if annotator.cleanup:
annotator.preloaded = annotator.cleanup(**{**annotator.params, **{annotator.target: annotator.preloaded}})
def worker(worker_no: int, server_socket, annotators: Dict[str, Preloader], stop_event):
"""Listen to the socket server and handle incoming requests."""
log.info(f"Worker {worker_no} started")
# Load any non-shared preloaders
for annotator in annotators.values():
if not annotator.shared:
annotator.preloaded = annotator.preloader(**annotator.params)
while True:
try:
client_sock, _address = server_socket.accept() # Accept a connection
except KeyboardInterrupt:
stop_event.set()
return
try:
log.debug("Handling request")
result = handle(client_sock, annotators)
if result is False:
stop_event.set()
return
except:
log.exception("Error during handling")
client_sock.close()
def serve(socket_path: str, processes: int, storage: SnakeStorage):
"""Start the Sparv preloader socket server."""
socket_file = Path(socket_path)
if socket_file.exists():
raise SparvErrorMessage(f"Socket {socket_path} already exists.")
# If processes is not set, set it to the number of processors
if not processes:
processes = multiprocessing.cpu_count()
# Dictionary of preloaded models, indexed by module and annotator name
annotators = {}
preload_config = config.get("preload")
if not preload_config:
raise SparvErrorMessage("Preloader config is missing. Use the 'preload' section "
"in your config file to list annotators to preload.")
rules = {}
for rule in storage.all_rules:
if rule.has_preloader:
rules[rule.target_name] = rule
log.info("Loading annotators: " + ", ".join(preload_config))
for annotator in preload_config:
if annotator not in rules:
raise SparvErrorMessage(f"Unknown annotator '{annotator}' in preloader config. Either it doesn't exist "
"or it doesn't support preloading.")
rule = rules[annotator]
preloader_params = {}
for param in rule.annotator_info["preloader_params"]:
preloader_params[param] = rule.parameters[param]
annotator_obj = Preloader(
rule.annotator_info["function"],
rule.annotator_info["preloader_target"],
rule.annotator_info["preloader"],
preloader_params,
rule.annotator_info["preloader_cleanup"],
rule.annotator_info["preloader_shared"]
)
if annotator_obj.shared:
annotator_obj.preloaded = annotator_obj.preloader(**annotator_obj.params)
annotators[annotator] = annotator_obj
# Start the socket (AF_UNIX should be supported in Windows 10 since 2018)
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_socket.bind(socket_path)
server_socket.listen(processes)
stop_event = multiprocessing.Event()
workers = []
for i in range(processes):
p = multiprocessing.Process(target=worker, args=(i + 1, server_socket, annotators, stop_event))
p.start()
workers.append(p)
# Free up memory
del annotators
del annotator_obj
log.info(f"The Sparv preloader is ready and waiting for connections using the socket at {socket_file.absolute()}. "
"Run Sparv with the command 'sparv run --socket /path/to/socket' to use the preloader. "
"Press Ctrl-C to exit, or run 'sparv preload stop --socket /path/to/socket'.")
# Periodically check whether stop_event is set or not and stop all processes when set
while True:
if stop_event.is_set():
log.info("Stopping all workers...")
for p in workers:
if p.is_alive():
# Send stop signal to worker
stop(socket_path)
break
time.sleep(2)
# Remove socket file
if socket_file.exists():
socket_file.unlink()
| 30.168919 | 119 | 0.643673 | 1,078 | 8,930 | 5.21243 | 0.24397 | 0.030254 | 0.017441 | 0.016017 | 0.077772 | 0.068873 | 0.059975 | 0.045738 | 0.038619 | 0 | 0 | 0.002734 | 0.262822 | 8,930 | 295 | 120 | 30.271186 | 0.850828 | 0.133259 | 0 | 0.188119 | 0 | 0.009901 | 0.107115 | 0.00327 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059406 | false | 0 | 0.069307 | 0 | 0.217822 | 0.004951 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d1c5756061b18bbb1b7b5e25656572631739d00 | 1,273 | py | Python | setup.py | gawainguo/Flask-AC | 6b7c7ebe7721b1208d7596999ce29852a7e1879c | [
"MIT"
] | 3 | 2018-11-09T09:06:01.000Z | 2018-11-11T15:38:45.000Z | setup.py | gawainguo/Flask-AC | 6b7c7ebe7721b1208d7596999ce29852a7e1879c | [
"MIT"
] | 1 | 2018-11-11T15:39:04.000Z | 2018-11-15T02:31:32.000Z | setup.py | gawainguo/Flask-AC | 6b7c7ebe7721b1208d7596999ce29852a7e1879c | [
"MIT"
] | null | null | null | """
Flask-ac
--------
Flask-ac provide role based access control(rbac) for Flask.
This plugin implement tree-like permission structure for access control.
Flask-ac is not implement persistent storage of user, roles and permissions,
instead using self-defined loaders to load data from anywhere you want.
"""
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='Flask-AC',
version='0.0.3',
url='',
license='MIT',
author='Jiaqi Guo',
author_email='gawain_guo@hotmail.com',
description='Flaks access control extension',
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
py_modules=['flask_ac'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 25.979592 | 76 | 0.666143 | 148 | 1,273 | 5.621622 | 0.722973 | 0.042067 | 0.045673 | 0.072115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002994 | 0.212883 | 1,273 | 48 | 77 | 26.520833 | 0.827345 | 0.236449 | 0 | 0 | 0 | 0 | 0.401869 | 0.022845 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.032258 | 0 | 0.032258 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d223111cfbe4ff9cf61540962d513bd338f369e | 269 | py | Python | Desafios/Desafio 14.py | VictorFBX/Python | cf2c5fcb86b161a25bae14eb9925af9b3852f4a5 | [
"MIT"
] | null | null | null | Desafios/Desafio 14.py | VictorFBX/Python | cf2c5fcb86b161a25bae14eb9925af9b3852f4a5 | [
"MIT"
] | null | null | null | Desafios/Desafio 14.py | VictorFBX/Python | cf2c5fcb86b161a25bae14eb9925af9b3852f4a5 | [
"MIT"
] | null | null | null | print('=== Desafio 15 ===')
print('Aluguel de carro')
d = int(input('Por quantos dias o carro foi alugado? '))
k = float(input('Qual a quantidade de quilometros percorridos?'))
rd = float(d*60)
rk = float(k*0,15)
r = float(rd + rk)
print(f'Você tem a pagar R${r}')
| 20.692308 | 65 | 0.64684 | 47 | 269 | 3.702128 | 0.659574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.03125 | 0.167286 | 269 | 12 | 66 | 22.416667 | 0.745536 | 0 | 0 | 0 | 0 | 0 | 0.516729 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d2415810e07270813c6afdcc29d272aef24020d | 668 | py | Python | ITcoach/DataAnalysis-master/day01/code/page30.py | ww35133634/chenxusheng | 666e0eb3aedde46342faf0d4030f5c72b10c9732 | [
"AFL-3.0"
] | null | null | null | ITcoach/DataAnalysis-master/day01/code/page30.py | ww35133634/chenxusheng | 666e0eb3aedde46342faf0d4030f5c72b10c9732 | [
"AFL-3.0"
] | null | null | null | ITcoach/DataAnalysis-master/day01/code/page30.py | ww35133634/chenxusheng | 666e0eb3aedde46342faf0d4030f5c72b10c9732 | [
"AFL-3.0"
] | null | null | null | # coding=utf-8
from matplotlib import pyplot as plt
from matplotlib import font_manager
my_font = font_manager.FontProperties(fname="/System/Library/Fonts/PingFang.ttc")
y_1 = [1,0,1,1,2,4,3,2,3,4,4,5,6,5,4,3,3,1,1,1]
y_2 = [1,0,3,1,2,2,3,3,2,1 ,2,1,1,1,1,1,1,1,1,1]
x = range(11,31)
#设置图形大小
plt.figure(figsize=(20,8),dpi=80)
plt.plot(x,y_1,label="自己",color="#F08080")
plt.plot(x,y_2,label="同桌",color="#DB7093",linestyle="--")
#设置x轴刻度
_xtick_labels = ["{}岁".format(i) for i in x]
plt.xticks(x,_xtick_labels,fontproperties=my_font)
# plt.yticks(range(0,9))
#绘制网格
plt.grid(alpha=0.4,linestyle=':')
#添加图例
plt.legend(prop=my_font,loc="upper left")
#展示
plt.show()
| 21.548387 | 81 | 0.687126 | 142 | 668 | 3.140845 | 0.471831 | 0.053812 | 0.053812 | 0.053812 | 0.020179 | 0.020179 | 0.020179 | 0.020179 | 0 | 0 | 0 | 0.109477 | 0.083832 | 668 | 30 | 82 | 22.266667 | 0.619281 | 0.085329 | 0 | 0 | 0 | 0 | 0.112769 | 0.056385 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d25121bf6642b18ea19ea2d54bd4ec3807ae0af | 3,830 | py | Python | AutomatedTesting/Gem/PythonTests/largeworlds/dyn_veg/EditorScripts/MeshSurfaceTagEmitter_DependentOnMeshComponent.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-09-13T00:01:12.000Z | 2021-09-13T00:01:12.000Z | AutomatedTesting/Gem/PythonTests/largeworlds/dyn_veg/EditorScripts/MeshSurfaceTagEmitter_DependentOnMeshComponent.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | null | null | null | AutomatedTesting/Gem/PythonTests/largeworlds/dyn_veg/EditorScripts/MeshSurfaceTagEmitter_DependentOnMeshComponent.py | aaarsene/o3de | 37e3b0226958974defd14dd6d808e8557dcd7345 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-07-20T11:07:25.000Z | 2021-07-20T11:07:25.000Z | """
Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import azlmbr.bus as bus
import azlmbr.editor as editor
import azlmbr.entity as EntityId
import azlmbr.math as math
import azlmbr.paths
sys.path.append(os.path.join(azlmbr.paths.devroot, 'AutomatedTesting', 'Gem', 'PythonTests'))
import editor_python_test_tools.hydra_editor_utils as hydra
from editor_python_test_tools.editor_test_helper import EditorTestHelper
class TestMeshSurfaceTagEmitter(EditorTestHelper):
def __init__(self):
EditorTestHelper.__init__(self, log_prefix="MeshSurfaceTagEmitter_DependentOnMeshComponent", args=["level"])
def run_test(self):
"""
Summary:
A New level is loaded. A New entity is created with component "Mesh Surface Tag Emitter". Adding a component
"Mesh" to the same entity.
Expected Behavior:
Mesh Surface Tag Emitter is disabled until the required Mesh component is added to the entity.
Test Steps:
1) Open level
2) Create a new entity with component "Mesh Surface Tag Emitter"
3) Make sure Mesh Surface Tag Emitter is disabled
4) Add Mesh to the same entity
5) Make sure Mesh Surface Tag Emitter is enabled after adding Mesh
Note:
- This test file must be called from the Open 3D Engine Editor command terminal
- Any passed and failed tests are written to the Editor.log file.
Parsing the file or running a log_monitor are required to observe the test results.
:return: None
"""
def is_component_enabled(EntityComponentIdPair):
return editor.EditorComponentAPIBus(bus.Broadcast, "IsComponentEnabled", EntityComponentIdPair)
# 1) Open level
self.test_success = self.create_level(
self.args["level"],
heightmap_resolution=1024,
heightmap_meters_per_pixel=1,
terrain_texture_resolution=4096,
use_terrain=False,
)
# 2) Create a new entity with component "Mesh Surface Tag Emitter"
entity_position = math.Vector3(125.0, 136.0, 32.0)
component_to_add = "Mesh Surface Tag Emitter"
entity_id = editor.ToolsApplicationRequestBus(
bus.Broadcast, "CreateNewEntityAtPosition", entity_position, EntityId.EntityId()
)
meshentity = hydra.Entity("meshentity", entity_id)
meshentity.components = []
meshentity.components.append(hydra.add_component(component_to_add, entity_id))
if entity_id.IsValid():
print("New Entity Created")
# 3) Make sure Mesh Surface Tag Emitter is disabled
is_enabled = is_component_enabled(meshentity.components[0])
self.test_success = self.test_success and not is_enabled
if not is_enabled:
print(f"{component_to_add} is Disabled")
elif is_enabled:
print(f"{component_to_add} is Enabled. But It should be disabled before adding Mesh")
# 4) Add Mesh to the same entity
component = "Mesh"
meshentity.components.append(hydra.add_component(component, entity_id))
# 5) Make sure Mesh Surface Tag Emitter is enabled after adding Mesh
is_enabled = is_component_enabled(meshentity.components[0])
self.test_success = self.test_success and is_enabled
if is_enabled:
print(f"{component_to_add} is Enabled")
elif not is_enabled:
print(f"{component_to_add} is Disabled. But It should be enabled after adding Mesh")
test = TestMeshSurfaceTagEmitter()
test.run()
| 39.484536 | 155 | 0.692689 | 493 | 3,830 | 5.231237 | 0.314402 | 0.041877 | 0.048856 | 0.073284 | 0.355564 | 0.320667 | 0.295463 | 0.237301 | 0.237301 | 0.176813 | 0 | 0.012676 | 0.237859 | 3,830 | 96 | 156 | 39.895833 | 0.870846 | 0.311227 | 0 | 0.04 | 0 | 0 | 0.158276 | 0.028594 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06 | false | 0 | 0.18 | 0.02 | 0.28 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d26647d4723f5235618279993d3fbddff95048d | 2,600 | py | Python | autohoot/einsum_ops/basic_ops.py | LinjianMa/AutoHOOT | 5f9b790afb15cf4ae97cd0929a1db65d7c0347b3 | [
"Apache-2.0"
] | 13 | 2020-11-16T03:35:50.000Z | 2022-03-02T05:20:16.000Z | autohoot/einsum_ops/basic_ops.py | LinjianMa/AutoHOOT | 5f9b790afb15cf4ae97cd0929a1db65d7c0347b3 | [
"Apache-2.0"
] | 19 | 2020-10-02T00:52:08.000Z | 2021-09-10T19:34:29.000Z | autohoot/einsum_ops/basic_ops.py | LinjianMa/AutoHOOT | 5f9b790afb15cf4ae97cd0929a1db65d7c0347b3 | [
"Apache-2.0"
] | 1 | 2020-10-10T18:26:00.000Z | 2020-10-10T18:26:00.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from autohoot import autodiff as ad
einsum = ad.EinsumNode.create
from autohoot.utils import indices_to_subscripts
def transpose(node, axes=(1, 0)):
# Default to handle simple transpose of a 2d matrix.
subscripts = indices_to_subscripts([list(range(len(node.shape)))], axes,
len(node.shape))
return einsum(subscripts, node)
def sum(node, axis=None):
if axis != None:
raise Exception(f"Sum with axis {axis} is not implemented.")
subscripts = indices_to_subscripts([list(range(len(node.shape)))], [],
len(node.shape))
return einsum(subscripts, node)
def tensordot(node_A, node_B, axes):
"""
Compute tensor dot product along specified axes.
Given node_A and node_B, and an array_like object containing two array_like objects,
(a_axes, b_axes), sum the products of node_A’s and node_B’s elements over the axes specified.
Example: for 4-d tensors node_A and node_B,
tensordot(node_A, node_B, axes=[[2,3], [0,1]]) is same as
einsum("abcd,cdef->abef", node_A, node_B).
"""
assert len(axes) == 2
assert len(axes[0]) == len(axes[1])
dim = len(node_A.shape) + len(node_B.shape) - len(axes[0])
input_indices_A = list(range(len(node_A.shape)))
index_acc = len(node_A.shape)
input_indices_B = [0] * len(node_B.shape)
for i in range(len(node_B.shape)):
if i not in axes[1]:
input_indices_B[i] = index_acc
index_acc += 1
for i in range(len(axes[1])):
input_indices_B[axes[1][i]] = input_indices_A[axes[0][i]]
assert index_acc == dim
out_indices = [
v for (i, v) in enumerate(input_indices_A) if i not in axes[0]
]
out_indices += [
v for (i, v) in enumerate(input_indices_B) if i not in axes[1]
]
subscripts = indices_to_subscripts([input_indices_A, input_indices_B],
out_indices, dim)
return einsum(subscripts, node_A, node_B)
| 34.666667 | 97 | 0.657692 | 400 | 2,600 | 4.1325 | 0.335 | 0.042347 | 0.039322 | 0.024198 | 0.259528 | 0.200847 | 0.15729 | 0.15729 | 0.107683 | 0.047187 | 0 | 0.014134 | 0.238077 | 2,600 | 74 | 98 | 35.135135 | 0.820293 | 0.375 | 0 | 0.111111 | 0 | 0 | 0.025381 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 1 | 0.083333 | false | 0 | 0.055556 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d27e0ec2cfcb3d79fec0c7c0ef9ea9efb7b7eba | 1,816 | py | Python | resistorCor.py | EduFreit4s/Python | fcefb055467f2ef1d8a2ce97d2da78ed9052f982 | [
"MIT"
] | null | null | null | resistorCor.py | EduFreit4s/Python | fcefb055467f2ef1d8a2ce97d2da78ed9052f982 | [
"MIT"
] | null | null | null | resistorCor.py | EduFreit4s/Python | fcefb055467f2ef1d8a2ce97d2da78ed9052f982 | [
"MIT"
] | null | null | null | # ===================================
# = https://github.com/EduFreit4s =
# ===================================
# TRADUZ CÓDIGO DE CORES DE RESISTORES DE QUATRO BANDAS
# OBS : SAÍDA STRING
def resistorCor(cor1, cor2, cor3, cor4):
def corToNumber(cor): # CONVERTE COR DO PARAMETRO PARA NUMERO
return { # TIPO DICIONÁRIO (gambiarra parecida com switch rs)
"PRETO" : 0,
"MARROM" : 1,
"VERMELHO" : 2,
"LARANJA" : 3,
"AMARELO" : 4,
"VERDE" : 5,
"AZUL" : 6,
"VIOLETA" : 7,
"CINZA" : 8,
"BRANCO" : 9,
"OURO" : -1,
"PRATA" : -2
}[cor.upper()] # upper() CONVERTE TODA LETRA PARA CAIXA ALTA
def tolerancia(cor4): #USA DINICONÁRIO PARA VERIFICAR AS TOLERÂNCIAS
if corToNumber(cor4) == 2:
return " ±2%"
elif corToNumber(cor4) == -1:
return " ±5%"
elif corToNumber(cor4) == -2:
return " ±10%"
else:
return " ±20%"
def prefixo(ohm): # IMPLEMENTA O PREFIXO E REDUZ O NUMERO INICIALMENTE EM OHMS
if ohm >= 1000000000:
return str(int(ohm/1000000000))+" GΩ" + tolerancia(cor4)
elif ohm >= 1000000:
return str(int(ohm/1000000))+" MΩ" + tolerancia(cor4)
elif ohm >= 1000:
return str(int(ohm/1000))+" kΩ" + tolerancia(cor4)
else:
return str(ohm)+" Ω" + tolerancia(cor4)
resistencia = (corToNumber(cor1)*10 + corToNumber(cor2)) * pow(10, corToNumber(cor3))
return prefixo(resistencia)
# TESTE
#print(resistorCor("amarelo","vermelho", "preto", "ouro")) | 36.32 | 97 | 0.47467 | 179 | 1,816 | 4.837989 | 0.530726 | 0.080831 | 0.04157 | 0.051963 | 0.053118 | 0 | 0 | 0 | 0 | 0 | 0 | 0.072552 | 0.370044 | 1,816 | 50 | 98 | 36.32 | 0.680944 | 0.265419 | 0 | 0.055556 | 0 | 0 | 0.077044 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0.027778 | 0.388889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d29be1a771cd9e0590fc73466c60e3543600577 | 1,705 | py | Python | notifications/consumers.py | utkarsh23/endorsity-club | 7b3684856f9e14284d9998931b7124505376ca5a | [
"MIT"
] | null | null | null | notifications/consumers.py | utkarsh23/endorsity-club | 7b3684856f9e14284d9998931b7124505376ca5a | [
"MIT"
] | null | null | null | notifications/consumers.py | utkarsh23/endorsity-club | 7b3684856f9e14284d9998931b7124505376ca5a | [
"MIT"
] | null | null | null | import json
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from channels.generic.websocket import WebsocketConsumer
from notifications.models import Notification
class NotificationConsumer(WebsocketConsumer):
def connect(self):
"""Authenticate the user and accept the connection"""
if self.scope['user'].is_authenticated:
# Add the user to the group id = user_id
self.group_name = f'notification_{self.scope["user"].id}'
async_to_sync(self.channel_layer.group_add)(
self.group_name,
self.channel_name
)
self.accept()
def disconnect(self, code):
async_to_sync(self.channel_layer.group_discard)(
self.group_name,
self.channel_name
)
def receive(self, text_data):
""" Receive all the requests to the socket """
functions = {
'all_seen': self.all_seen,
}
json_data = json.loads(text_data)
functions[json_data['function']]()
def receive_notification(self, args):
"""
1. Receive a notification from another socket
2. Send the notification received to the frontend
Requires the following properties in args:
1. notifs : List of notifications received
"""
notification = args['notification']
self.send(text_data=json.dumps({
"function": "new_notifications_received",
"arguments": {
"notifs": notification,
}
}))
def all_seen(self):
Notification.objects.filter(user=self.scope['user']).update(is_seen=True)
| 29.396552 | 81 | 0.619355 | 189 | 1,705 | 5.417989 | 0.386243 | 0.042969 | 0.032227 | 0.029297 | 0.117188 | 0.117188 | 0.0625 | 0 | 0 | 0 | 0 | 0.002479 | 0.290323 | 1,705 | 57 | 82 | 29.912281 | 0.843802 | 0.181232 | 0 | 0.114286 | 0 | 0 | 0.090501 | 0.046372 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.314286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d2aa89261445c754be61bf1b35f8ba6bb1c3e45 | 3,061 | py | Python | ecg_client.py | jtorniainen/ecgnode | 9b03025b43417d126df2829e8ee16efdafb09c10 | [
"MIT"
] | null | null | null | ecg_client.py | jtorniainen/ecgnode | 9b03025b43417d126df2829e8ee16efdafb09c10 | [
"MIT"
] | null | null | null | ecg_client.py | jtorniainen/ecgnode | 9b03025b43417d126df2829e8ee16efdafb09c10 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Jari Torniainen <jari.torniainen@ttl.fi>
# Finnish Institute of Occupational Health
# Copyright 2015
#
# This code is released under the MIT license
# http://opensource.org/licenses/mit-license.php
#
# Please see the file LICENSE for details
import pygame
import requests
import time
def request_hr():
# TODO: Some kind of try-catch?
address = 'http://127.0.0.1:8080/'
request = 'ecgnode/metric/{"type":"mean_hr", "channels":["ch0"] , "time_window":[5], "arguments":[100]}'
return round(requests.get(address + request).json()[0]['return'])
def request_rmssd():
# TODO: Some kind of try-catch?
address = 'http://127.0.0.1:8080/'
request = 'ecgnode/metric/{"type":"rmssd", "channels":["ch0"] , "time_window":[30], "arguments":[100]}'
return round(requests.get(address + request).json()[0]['return'])
def calculate_transition(current_value, target, increment):
if current_value < target:
current_value += increment
elif current_value > target:
current_value -= increment
if abs(current_value - target) < increment:
current_value = target
return current_value
def render_text(font, string, value, color=(0, 0, 0)):
return font.render(string.ljust(7) + '{}'.format(value), 1, color)
def main():
width = 600
height = 200
pygame.init()
pygame.display.set_caption('HRV-monitor')
screen = pygame.display.set_mode((width, height))
clock = pygame.time.Clock()
BACKGROUND_COLOR = (0, 0, 0)
TEXT_COLOR = (255, 255, 255)
last_update = time.time()
update_interval = 1
font = pygame.font.SysFont('Monospace', 70)
target_hr = request_hr()
current_hr = target_hr
target_rmssd = request_rmssd()
current_rmssd = target_rmssd
text_hr = render_text(font, 'HR:', current_hr)
text_rmssd = render_text(font, 'RMSSD:', current_rmssd)
increment_hr = 1.0
increment_rmssd = 1.0
running = True
while running:
# Handle events
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
running = False
if time.time() - last_update > update_interval:
target_hr = request_hr()
target_rmssd = request_rmssd()
last_update = time.time()
#current_hr = calculate_transition(current_hr, target_hr, increment_hr)
#current_rmssd = calculate_transition(current_rmssd, target_rmssd, increment_rmssd)
current_hr = target_hr
current_rmssd = target_rmssd
text_hr = render_text(font, 'HR:', current_hr, TEXT_COLOR)
text_rmssd = render_text(font, 'RMSSD:', current_rmssd, TEXT_COLOR)
screen.fill(BACKGROUND_COLOR)
screen.blit(text_hr, (10, 10))
screen.blit(text_rmssd, (10, 100))
pygame.display.flip()
clock.tick(60)
if __name__ == '__main__':
main()
| 27.827273 | 108 | 0.644561 | 391 | 3,061 | 4.846547 | 0.329923 | 0.05066 | 0.047493 | 0.026913 | 0.305013 | 0.278628 | 0.237467 | 0.237467 | 0.197361 | 0.197361 | 0 | 0.033798 | 0.226723 | 3,061 | 109 | 109 | 28.082569 | 0.766793 | 0.162365 | 0 | 0.25 | 0 | 0.03125 | 0.112593 | 0.025108 | 0 | 0 | 0 | 0.009174 | 0 | 1 | 0.078125 | false | 0 | 0.046875 | 0.015625 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d2bea8c6a7f20c4476edadefe36e9e239e2343f | 1,339 | py | Python | tests/test_models/test_utils/test_corrblock.py | hologerry/mmflow | 40caf064851bd95317424e31cc137c0007a2bece | [
"Apache-2.0"
] | 481 | 2021-11-16T07:04:23.000Z | 2022-03-31T22:21:21.000Z | tests/test_models/test_utils/test_corrblock.py | hologerry/mmflow | 40caf064851bd95317424e31cc137c0007a2bece | [
"Apache-2.0"
] | 72 | 2021-11-16T12:25:55.000Z | 2022-03-28T13:10:45.000Z | tests/test_models/test_utils/test_corrblock.py | hologerry/mmflow | 40caf064851bd95317424e31cc137c0007a2bece | [
"Apache-2.0"
] | 48 | 2021-11-16T06:48:46.000Z | 2022-03-30T12:46:40.000Z | # Copyright (c) OpenMMLab. All rights reserved.
from math import sqrt
import pytest
import torch
from mmflow.models.utils import CorrBlock
@pytest.mark.skipif(not torch.cuda.is_available(), reason='CUDA not available')
@pytest.mark.parametrize('scaled', [True, False])
def test_corr_block(scaled):
feat1 = torch.randn(1, 3, 10, 10).cuda()
feat2 = torch.randn(1, 3, 10, 10).cuda()
corr_block_cfg = dict(
corr_cfg=dict(type='Correlation', max_displacement=1, padding=0))
out = CorrBlock(**corr_block_cfg, scaled=scaled)(feat1, feat2)
assert out.shape == torch.Size((1, 9, 10, 10))
with pytest.raises(AssertionError):
CorrBlock(**corr_block_cfg, scaled=scaled, scale_mode='test')
if scaled:
out = CorrBlock(**corr_block_cfg, scaled=False)(feat1, feat2)
out_scaled_dimension = CorrBlock(
**corr_block_cfg, scaled=scaled, scale_mode='dimension')(feat1,
feat2)
out_scaled_sqrtdimension = CorrBlock(
**corr_block_cfg, scaled=scaled,
scale_mode='sqrt dimension')(feat1, feat2)
# test scaled by dimension and sqrt dimension
assert torch.allclose(out, out_scaled_dimension * 3)
assert torch.allclose(out, out_scaled_sqrtdimension * sqrt(3))
| 34.333333 | 79 | 0.651979 | 168 | 1,339 | 5.029762 | 0.363095 | 0.074556 | 0.085207 | 0.12426 | 0.347929 | 0.347929 | 0.19645 | 0.149112 | 0 | 0 | 0 | 0.031159 | 0.23301 | 1,339 | 38 | 80 | 35.236842 | 0.791626 | 0.066468 | 0 | 0 | 0 | 0 | 0.049719 | 0 | 0 | 0 | 0 | 0 | 0.16 | 1 | 0.04 | false | 0 | 0.16 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d35816f874492c596c0c80e5339e40d5d0c398d | 5,284 | py | Python | build/lib/fab_addon_turbowidgets/widgets.py | lemeur/fab_addon_turbowidgets | 9ae89cb06b236af9ec2e81e86fcc3d5ccde040fb | [
"MIT"
] | null | null | null | build/lib/fab_addon_turbowidgets/widgets.py | lemeur/fab_addon_turbowidgets | 9ae89cb06b236af9ec2e81e86fcc3d5ccde040fb | [
"MIT"
] | null | null | null | build/lib/fab_addon_turbowidgets/widgets.py | lemeur/fab_addon_turbowidgets | 9ae89cb06b236af9ec2e81e86fcc3d5ccde040fb | [
"MIT"
] | null | null | null | import json
import logging
from flask_babel import lazy_gettext as _
from markupsafe import Markup
#from wtforms import widgets
from wtforms.widgets import html_params, HTMLString
from wtforms import SelectField
from wtforms.compat import izip, text_type
log = logging.getLogger(__name__)
DEFAULT_JSEDITOR_CONFIG = {
'theme': 'spectre',
'iconlib': 'fontawesome4',
'object_layout': 'normal',
'template': 'default',
'show_errors': 'interaction',
'required_by_default': 1,
'no_additional_properties': 1,
'display_required_only': 0,
'remove_empty_properties': 0,
'keep_oneof_values': 1,
'ajax': 0,
'show_opt_in': 0,
'disable_edit_json': 1,
'disable_collapse': 1,
'disable_properties': 1,
'disable_array_add': 1,
'disable_array_reorder': 1,
'disable_array_delete': 1,
'enable_array_copy': 0,
'array_controls_top': 0,
'disable_array_delete_all_rows': 0,
'disable_array_delete_last_row': 0,
'prompt_before_delete': 1,
}
class DynamicSelectField(SelectField):
"""
DynamicSelect
"""
def __init__(
self,
label=None,
validators=None,
coerce=text_type,
choices_func=None,
validate_choice=True,
**kwargs
):
super(DynamicSelectField, self).__init__(label=label, validators=validators, coerce=coerce, **kwargs)
self.choices_func = choices_func
self.validate_choice= validate_choice
def iter_choices(self):
self.__set_choices__()
for value, label in self.choices:
yield (value, label, self.coerce(value) == self.data)
def __set_choices__(self):
if not callable(self.choices_func):
choices = []
else:
choices = self.choices_func()
self.choices = choices
def pre_validate(self, form):
self.__set_choices__()
log.debug("TIBO choices='{}'".format(repr(self.choices)))
if self.validate_choice:
for v, _ in self.choices:
if self.data == v:
break
else:
raise ValueError(self.gettext("Not a valid choice"))
class JsonEditorWidget(object):
"""
JsonEditor
"""
data_template = (
'<input class="form-control hidden" %(text)s />'
'<div %(jse_params)s id="jse_%(id)s">'
"</div>"
'<script>'
" %(before_js)s"
'</script>'
'<script>'
"editor_%(id)s = init_json_editor('%(id)s', '%(json_schema)s', '%(starting_value)s', '%(json_config)s');"
"if (typeof listOfJsonEditors === 'undefined') { listOfJsonEditors = new Object()};"
"listOfJsonEditors['%(id)s'] = editor_%(id)s;"
'</script>'
'<script>'
" %(after_js)s"
'</script>'
)
def __init__(self, json_schema, before_js=None,after_js=None, extra_classes=None, jseditor_config=DEFAULT_JSEDITOR_CONFIG, master_id=None):
super().__init__()
self.json_schema = json_schema
self.before_js = before_js
self.after_js = after_js
self.jseditor_config = jseditor_config
self.extra_classes = extra_classes
self.master_id = master_id
def __call__(self, field, **kwargs):
kwargs.setdefault("id", field.id)
kwargs.setdefault("name", field.name)
starting_value = ""
if field.data:
starting_value = '{}'.format(field.data)
else:
starting_value = "{}"
if not callable(self.json_schema):
schema = self.json_schema
else:
schema = self.json_schema()
input_classes = 'input-group'
if self.extra_classes:
input_classes = input_classes + ' ' + self.extra_classes
if not schema:
field.json_schema = "{}"
else:
field.json_schema = json.dumps(schema)
before_js = "// No Extra Javascript given"
if not callable(self.before_js) and self.before_js:
before_js = self.before_js
if callable(self.before_js):
before_js = self.before_js()
after_js = "// No Extra Javascript given"
if not callable(self.after_js) and after_js:
after_js = self.after_js
if callable(self.after_js):
after_js = self.after_js()
input_params = html_params(type="text", value=field.data, **kwargs)
jse_dict = {
'id': "jse_{}".format(field.id),
'class': input_classes
}
if self.master_id:
jse_dict['master_id'] = self.master_id
jse_params = html_params(**jse_dict)
template_string = self.data_template % {
"text": input_params,
"jse_params": jse_params,
"id": field.id,
"json_schema": field.json_schema,
"starting_value": starting_value,
"json_config": json.dumps(self.jseditor_config),
"before_js": before_js,
"after_js": after_js
}
return HTMLString(template_string)
| 31.640719 | 143 | 0.575511 | 579 | 5,284 | 4.930915 | 0.248705 | 0.039229 | 0.025219 | 0.023818 | 0.084764 | 0.084764 | 0.068651 | 0.051138 | 0.028722 | 0 | 0 | 0.005208 | 0.309614 | 5,284 | 166 | 144 | 31.831325 | 0.777412 | 0.009841 | 0 | 0.094203 | 0 | 0.007246 | 0.198808 | 0.042492 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.050725 | 0 | 0.123188 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d36e906a5b1061cbbd34d130e802c023af6d395 | 3,780 | py | Python | wsgi_lineprof/formatter.py | ymyzk/wsgi_lineprof | 7aca9899f7d6c08747b17893b47ee8e92347ab9a | [
"MIT"
] | 48 | 2016-10-03T14:04:38.000Z | 2021-12-22T04:02:37.000Z | wsgi_lineprof/formatter.py | ymyzk/wsgi_lineprof | 7aca9899f7d6c08747b17893b47ee8e92347ab9a | [
"MIT"
] | 52 | 2016-11-30T16:13:24.000Z | 2021-10-16T15:56:53.000Z | wsgi_lineprof/formatter.py | ymyzk/wsgi_lineprof | 7aca9899f7d6c08747b17893b47ee8e92347ab9a | [
"MIT"
] | 3 | 2017-05-30T03:36:44.000Z | 2021-07-13T13:10:08.000Z | from abc import ABCMeta, abstractmethod
import inspect
import itertools
import linecache
from os import path
from typing import Any, cast, Dict, Sequence
import colorama
from wsgi_lineprof.stats import LineProfilerStat, LineProfilerStats
from wsgi_lineprof.types import Stream
class BaseFormatter(metaclass=ABCMeta):
def __init__(self, *kwargs: Any) -> None:
return
@abstractmethod
def format_stats(self, stats: LineProfilerStats, stream: Stream) -> None:
return
class TextFormatter(BaseFormatter):
def __init__(self, color: bool = False) -> None:
self.color = color
def format_stats(self, stats: LineProfilerStats, stream: Stream) -> None:
unit = stats.unit
stream.write("Time unit: %s [sec]\n\n" % unit)
for stat in stats.stats:
self.format_stat(stat, stream, unit)
def format_stat(self, stat: LineProfilerStat, stream: Stream, unit: float) -> None:
stream.write("File: %s\n" % stat.filename)
stream.write("Name: %s\n" % stat.name)
total_time = stat.total_time * unit
stream.write("Total time: %g [sec]\n" % total_time)
if not path.exists(stat.filename):
# e.g., filename is <frozen importlib._bootstrap>
stream.write("WARNING: Cannot find a file\n")
return
linecache.clearcache()
lines: Sequence[str] = linecache.getlines(stat.filename)
if stat.name != "<module>":
lines = inspect.getblock(lines[stat.firstlineno - 1 :])
template = "%6s %9s %12s %8s %7s %-s"
header = template % ("Line", "Hits", "Time", "Per Hit", "% Time", "Code")
stream.write(header)
stream.write("\n")
stream.write("=" * len(header))
stream.write("\n")
d: Dict[int, Dict[str, Any]] = {}
for i, code in zip(itertools.count(stat.firstlineno), lines):
timing = stat.timings.get(i)
if timing is None:
d[i] = {
"hits": "",
"time": "",
"per_hit": "",
"percent": "",
"code": code,
"style": self.style_for_percent(0),
}
else:
if stat.total_time == 0:
# TODO: Consider a better way to handle when total_time is 0
percent = 0.0
else:
percent = 100 * timing.total_time / stat.total_time
d[i] = {
"hits": timing.n_hits,
"time": timing.total_time,
"per_hit": "%.1f" % (timing.total_time / timing.n_hits),
"percent": "%.1f" % percent,
"code": code,
"style": self.style_for_percent(percent),
}
if self.color:
colorama.init()
for i in sorted(d.keys()):
r = d[i]
if self.color:
stream.write(r["style"])
stream.write(
template
% (i, r["hits"], r["time"], r["per_hit"], r["percent"], r["code"])
)
if self.color:
stream.write(colorama.Style.RESET_ALL)
colorama.deinit()
stream.write("\n")
# TODO: Make constants (percent/color) configurable
def style_for_percent(self, percent: float) -> str:
"""Returns ANSI style for a given percent"""
if percent < 0.2:
return cast(str, colorama.Fore.LIGHTBLACK_EX)
elif percent >= 50:
return cast(str, colorama.Fore.RED)
elif percent >= 5:
return cast(str, colorama.Fore.YELLOW)
else:
return cast(str, colorama.Fore.WHITE)
| 35.327103 | 87 | 0.533333 | 422 | 3,780 | 4.689573 | 0.303318 | 0.072259 | 0.026276 | 0.042446 | 0.191006 | 0.096008 | 0.096008 | 0.096008 | 0.056594 | 0 | 0 | 0.00886 | 0.343122 | 3,780 | 106 | 88 | 35.660377 | 0.788159 | 0.051852 | 0 | 0.202247 | 0 | 0 | 0.073826 | 0 | 0 | 0 | 0 | 0.009434 | 0 | 1 | 0.067416 | false | 0 | 0.101124 | 0.022472 | 0.269663 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d370ce200f5d51b82bfad5cfc999bc9dd17a50f | 4,611 | py | Python | backup.py | 0xfadead/soundcloud-backup | 2132d6d21707577486d5632eca6b6194f02b7fa5 | [
"MIT"
] | null | null | null | backup.py | 0xfadead/soundcloud-backup | 2132d6d21707577486d5632eca6b6194f02b7fa5 | [
"MIT"
] | null | null | null | backup.py | 0xfadead/soundcloud-backup | 2132d6d21707577486d5632eca6b6194f02b7fa5 | [
"MIT"
] | null | null | null | import requests
import zipfile
import tempfile
import sys
import argparse
import time
CLIENT_ID = 'JlZIsxg2hY5WnBgtn3jfS0UYCl0K8DOg'
INFO_BASE_URL = 'https://api.soundcloud.com/resolve.json'
TRACKS_BASE_URL = 'https://api.soundcloud.com/users/{:d}/tracks'
LIMIT = 50 # the max track data SoundCloud will return
ARCHIVE_SKELETON = '{:s}-{:s}.zip'
# SoundCloud streamable tracks are transcoded
# at 128 kbps and are in mp3 format
STREAM_SKELETON = '{:s}.mp3'
def error(msg):
print(msg, file=sys.stderr)
def json_request(scurl, payload):
try:
r = requests.get(scurl, params=payload)
if r.status_code != requests.codes.ok:
error('Could not reach: {}'.format(str(r.status_code)))
return {}
return r.json()
except requests.exceptions.RequestException as e:
error(e)
return {}
def user_info(scurl):
data = json_request(
INFO_BASE_URL, {
'url': scurl, # encode (?)
'client_id': CLIENT_ID
})
if not bool(data):
return [None for _ in range(4)]
return data.get('id'), data.get('username'), \
data.get('permalink'), data.get('track_count')
def user_tracks(userid, offset):
# todo: downloadable + download_url (?)
target_keys = ('id', 'streamable', 'stream_url', 'permalink', 'title')
data = json_request(
TRACKS_BASE_URL.format(userid),
{'client_id': CLIENT_ID,
'offset': offset})
return [{k: unfiltered.get(k) for k in target_keys}
for unfiltered in data ]
def save_audio_stream(fout, csize, streamurl):
r = requests.get(streamurl,
{'client_id' : CLIENT_ID},
stream=True)
if r.status_code != requests.codes.ok:
error('Could not reach: {}'.format(str(r.status_code)))
return False
for chunk in r.iter_content(chunk_size=csize):
if chunk:
fout.write(chunk)
return True
def main():
# Get command line args.
# Everyone's favorite thing about coding...
parser = argparse.ArgumentParser()
parser.add_argument('url',
type=str,
help="Url of SoundCloud profile you'd like to backup"
)
parser.add_argument('-C', '--client-id',
type=str,
help='If you are gettinga 429 response the default \
client id is maxed out for the day so you \
can optionally supply a different one.'
)
parser.add_argument('-A', '--name',
type=str,
help="Name of the archive"
)
parser.add_argument('-Z', '--chunk-size',
type=int,
default=1024,
help='The chunk size in which pieces of the mp3 file \
will be saved (default: 1024).'
)
parser.add_argument('-d', '--delay-time',
type=int,
default=0,
help='Specify a delay time (in seconds) between each track download.'
)
args = parser.parse_args()
url = args.url
if ('soundcloud.com' not in url or
not url.startswith('https://')):
print('Please use a valid HTTPS Soundcloud Url')
return
if args.client_id is not None:
global CLIENT_ID
CLIENT_ID = args.client_id
uid, uname, ulink, trackcnt = user_info(url)
if uid is None:
print('Could not locate: {}'.format(url))
return
tracks = []
for offset in range(0, trackcnt, LIMIT+1):
tracks += user_tracks(uid, offset)
if not bool(tracks):
print('{} has no songs!'.format(artist_url))
return
print('{:d} streamable tracks on {}\'s page'.format(len(tracks), uname))
zipname = (ARCHIVE_SKELETON.format(uname, ulink)
if args.name is None else args.name)
with zipfile.ZipFile(zipname, 'w', zipfile.ZIP_DEFLATED) as archive:
print('Starting download...')
for track in tracks:
if not track['streamable']:
print(' {} is not streamable.'.format(track['title']))
continue
with tempfile.NamedTemporaryFile('wb') as f:
if save_audio_stream(f, args.chunk_size, track['stream_url']):
archive.write(f.name, arcname=STREAM_SKELETON
.format(track['permalink']))
print(' {} has been saved to the archive'.format(track['title']))
else:
print(' Could not download: {}'.format(track['title']))
time.sleep(args.delay_time)
if __name__ == '__main__':
main()
| 31.367347 | 88 | 0.585556 | 568 | 4,611 | 4.639085 | 0.330986 | 0.039469 | 0.032258 | 0.024288 | 0.075901 | 0.075901 | 0.054649 | 0.054649 | 0.054649 | 0.054649 | 0 | 0.008896 | 0.292995 | 4,611 | 146 | 89 | 31.582192 | 0.799387 | 0.050748 | 0 | 0.135593 | 0 | 0 | 0.170101 | 0.007326 | 0 | 0 | 0 | 0.006849 | 0 | 1 | 0.050847 | false | 0 | 0.050847 | 0 | 0.194915 | 0.076271 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d3ab01e36180285711e2ffbcad7ba2b517d7211 | 5,564 | py | Python | distpy/workers/strainrate2summary.py | billmcchesney1/distpy | fa49d2a910b164514057ee75fa570397b7004159 | [
"MIT"
] | 23 | 2020-02-12T13:25:22.000Z | 2022-01-19T07:09:39.000Z | distpy/workers/strainrate2summary.py | billmcchesney1/distpy | fa49d2a910b164514057ee75fa570397b7004159 | [
"MIT"
] | 2 | 2020-03-09T11:24:00.000Z | 2020-09-14T10:50:51.000Z | distpy/workers/strainrate2summary.py | billmcchesney1/distpy | fa49d2a910b164514057ee75fa570397b7004159 | [
"MIT"
] | 11 | 2020-03-28T16:40:39.000Z | 2021-07-30T16:02:12.000Z | # (C) 2020, Schlumberger. Refer to LICENSE
import numpy
import datetime
import scipy.signal
import os
import distpy.io_help.io_helpers as io_helpers
import distpy.io_help.directory_services as directory_services
import distpy.calc.pub_command_set as pub_command_set
import distpy.calc.extra_numpy as extra_numpy
import distpy.calc.processing_commands as processing_commands
def build_command_list(commandJson, data=None, dirout="scratch",
datedir="none",datestring="none",nx=100,prf=10000,xaxis=None,taxis=None,nt=100,
extended_list=[],inline_plots=0):
if xaxis is None:
xaxis=numpy.arange(nx)
if taxis is None:
taxis=numpy.arange(nt)
# initialize the command list that will eventually be executed.
# This first one is anomalous - need a good way to initiate the top of the tree...
# possibly this is a root...
command_list=[]
# { "name" : "data", "uid" : 0},
command_list.append(pub_command_set.DataLoadCommand(data,{}))
# command_list is a list of dictionaries
for command in commandJson['command_list']:
# we add the basics of nx and prf, which belong to the data
dir_suffix = command.get('directory_out','NONE')
if not dir_suffix=='NONE':
dirval = os.path.join(dirout,dir_suffix)
if not os.path.exists(dirval):
os.makedirs(dirval)
command['directory_out']=dirval
dir_suffix = command.get('directory_in','NONE')
if not dir_suffix=='NONE':
dirval = os.path.join(dirout,dir_suffix)
if not os.path.exists(dirval):
os.makedirs(dirval)
command['directory_in']=dirval
command['date_dir']=datedir
command['datestring']=datestring
command['nx']=nx
command['prf']=prf
command['xaxis']=xaxis
command['taxis']=taxis
command['nt']=nt
command['inline_plots']=inline_plots
# prf/nt is the rescale factor
command['f_rescale']=float(nt)/float(prf)
index = command.get('band00',-1)
if index>=0:
command['command']=command_list[index]
#s(len(command_list))
command_list.append(processing_commands.CommandFactory(command_list,command,extended_list=extended_list))
return command_list
def docs(command_list,commandJson):
ltxJson = []
ltxJson = io_helpers.latexJson({"command_list" : commandJson['command_list']},ltxJson)
lines =[]
lines = io_helpers.latexTop(lines)
lines = io_helpers.command2latex(command_list,lines, commandJson.get('name','Command Set'), commandJson.get('description','Commands used'))
for line in ltxJson:
lines.append(line)
lines = io_helpers.latexPng(lines)
lines = io_helpers.latexTail(lines)
# dot graphviz graph
graphs = io_helpers.dot_graph(commandJson['command_list'])
return lines, graphs
'''
strainrate2summary : process one file of strainrate data using the provided
command tree in commandJSON.
This executes in one thread.
'''
def strainrate2summary(filename, xaxis, prf, dirout, commandJson, extended_list,data):
# try to make a datestamp that WITSML could use...
tokens = filename.split(os.sep)
# the final token without its .npy is the unix timestamp
unixtime = int(tokens[-1][:-4])
datestring = datetime.datetime.utcfromtimestamp(unixtime).strftime("%Y-%m-%dT%H:%M:%S+00:00")
datedir = str(unixtime)
# Configure the hardware
boxsize = commandJson.get('BOXSIZE', 500)
extra_numpy.set_boxsize(boxsize)
if data is None:
data = numpy.load(filename)
#print(data.shape)
nx = data.shape[0]
nt = data.shape[1]
# Is this a request to document a workflow?
isDocs = commandJson.get('document',0)
# initialize the command list that will eventually be executed.
# This first one is anomalous - need a good way to initiate the top of the tree...
# possibly this is a root...
taxis = commandJson.get('taxis',None)
# A convenience for Jupyter Notebooks - when developing workflows we sometimes want to inline plot views
inline_plots = commandJson.get('inline_plots',0)
command_list=build_command_list(commandJson, data=data,dirout=dirout,
datedir=datedir,datestring=datestring,
nx=nx,prf=prf,xaxis=xaxis,taxis=taxis,
nt=nt,extended_list=extended_list,inline_plots=inline_plots)
# if documenting - that is one path...execution is the other
if isDocs>0:
lines, graphs = docs(command_list, commandJson)
with open(os.path.join(dirout,'config.tex'),'w') as f:
for line in lines:
f.write(line+'\n')
# dot graphviz graph
with open(os.path.join(dirout,'config.gv'),'w') as f:
for line in graphs:
f.write(line+'\n')
else:
# if postconditions are all already met, we can skip this workflow...
post_cond = True
for command in command_list:
for filename in command.postcond():
if not directory_services.exists(filename):
post_cond = False
if post_cond==True:
print(filename, ' post-conditions are all satisfied, skipping processing.')
else:
for command in command_list:
command.execute()
return None, None
| 38.909091 | 143 | 0.640546 | 707 | 5,564 | 4.932108 | 0.291372 | 0.069401 | 0.031546 | 0.018354 | 0.208775 | 0.161744 | 0.154287 | 0.137081 | 0.137081 | 0.137081 | 0 | 0.009192 | 0.257009 | 5,564 | 142 | 144 | 39.183099 | 0.834301 | 0.183681 | 0 | 0.147368 | 0 | 0 | 0.086103 | 0.005309 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031579 | false | 0 | 0.094737 | 0 | 0.157895 | 0.010526 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d3b37227fa67f63b24e1ab4a3e9bca2332dd99a | 542 | py | Python | backend.py | LRAbbade/postal-validator | 4f705fdb766eee8e16f61a82cfef1b25f78a42a6 | [
"MIT"
] | 1 | 2019-08-13T20:48:03.000Z | 2019-08-13T20:48:03.000Z | backend.py | LRAbbade/postal-validator | 4f705fdb766eee8e16f61a82cfef1b25f78a42a6 | [
"MIT"
] | null | null | null | backend.py | LRAbbade/postal-validator | 4f705fdb766eee8e16f61a82cfef1b25f78a42a6 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request, jsonify
from main import Validate
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/validate')
def validate_cep():
cep = str(request.args.get('cep'))
try:
res = Validate(cep)
error = None
except Exception as e:
res = False
error = str(e)
return jsonify({
'cep': cep,
'result': res,
'error': error
})
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 19.357143 | 58 | 0.586716 | 69 | 542 | 4.391304 | 0.492754 | 0.019802 | 0.019802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010076 | 0.267528 | 542 | 27 | 59 | 20.074074 | 0.753149 | 0 | 0 | 0 | 0 | 0 | 0.095941 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.090909 | 0.045455 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d3e82a670bdb1776ae183cbe465aaa5f1d7f137 | 1,537 | py | Python | supports/pyload/src/pyload/plugins/downloaders/ShareplaceCom.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | 1 | 2020-04-02T17:03:39.000Z | 2020-04-02T17:03:39.000Z | supports/pyload/src/pyload/plugins/downloaders/ShareplaceCom.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | null | null | null | supports/pyload/src/pyload/plugins/downloaders/ShareplaceCom.py | LuckyNicky/pycrawler | 4b3fe2f6e8e51f236d95a64a89a44199e4e97743 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import re
import urllib.parse
from ..base.simple_downloader import SimpleDownloader
class ShareplaceCom(SimpleDownloader):
__name__ = "ShareplaceCom"
__type__ = "downloader"
__version__ = "0.19"
__status__ = "testing"
__pyload_version__ = "0.5"
__pattern__ = r"http://(?:www\.)?shareplace\.(com|org)/\?\w+"
__config__ = [("enabled", "bool", "Activated", True)]
__description__ = """Shareplace.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("ACCakut", None), ("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
NAME_PATTERN = r"Filename:</font></b>\s*(?P<N>.+?)<b><br>"
SIZE_PATTERN = r"Filesize:</font></b>\s*(?P<S>[\d.,]+) (?P<U>[\w^_]+)<b><br>"
TEMP_OFFLINE_PATTERN = r"^unmatchable$"
OFFLINE_PATTERN = r"Your requested file is not found"
WAIT_PATTERN = r"var zzipitime = (\d+);"
def handle_free(self, pyfile):
response = self.captcha.decrypt("http://shareplace.com/captcha.php")
self.data = self.load(pyfile.url, post={"captchacode": response})
if "Captcha number error or expired" in self.data:
self.retry_captcha()
self.captcha.correct()
self.check_errors()
m = re.search(r"var beer = '(.+?)'", self.data)
if m is not None:
self.link = urllib.parse.unquote(
urllib.parse.unquote(
m.group(1).replace("vvvvvvvvv", "").replace("lllllllll", "")
).replace("teletubbies", "")
)[13:]
| 31.367347 | 82 | 0.594665 | 177 | 1,537 | 4.870057 | 0.60452 | 0.055684 | 0.013921 | 0.016241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012563 | 0.223162 | 1,537 | 48 | 83 | 32.020833 | 0.70938 | 0.013663 | 0 | 0 | 0 | 0.030303 | 0.309115 | 0.081902 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.090909 | 0 | 0.606061 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d4048ca3098d4827db9662975d9206ac1e37948 | 3,152 | py | Python | app/perm_app.py | StatML-dAI/dnn-inference | f17b6945dfff80118e25f1b39b56c3e4027b3b92 | [
"MIT"
] | 4 | 2020-12-08T18:53:31.000Z | 2021-11-23T11:29:17.000Z | app/perm_app.py | statmlben/dnn-inference | fef1cbe9382141dfe5c81e84a6bd39f7a17cd736 | [
"MIT"
] | null | null | null | app/perm_app.py | statmlben/dnn-inference | fef1cbe9382141dfe5c81e84a6bd39f7a17cd736 | [
"MIT"
] | 2 | 2022-02-17T15:57:42.000Z | 2022-02-20T11:50:45.000Z | from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from tensorflow.python.keras import backend as K
import numpy as np
import time
from numpy import linalg as LA
import funs
from sklearn.model_selection import train_test_split
from keras.optimizers import Adam, SGD
import seaborn as sns
import matplotlib.pyplot as plt
import time
num_classes = 2
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x = np.vstack((x_train, x_test))
y = np.hstack((y_train, y_test))
ind = (y == 9) + (y == 7)
x, y = x[ind], y[ind]
x = x.astype('float32')
x += .01*np.random.randn(14251, 28, 28)
y[y==7], y[y==9] = 0, 1
if K.image_data_format() == 'channels_first':
x = x.reshape(x.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x = x.reshape(x.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x = x.astype('float32')
x /= 255.
print('x shape:', x.shape)
# convert class vectors to binary class matrices
y = keras.utils.to_categorical(y, num_classes)
K.clear_session()
def cnn():
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.binary_crossentropy, optimizer=keras.optimizers.Adam(0.005), metrics=['accuracy'])
return model
tic = time.perf_counter()
model, model_perm = cnn(), cnn()
from keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='val_accuracy', mode='max', verbose=1, patience=20, restore_best_weights=True)
fit_params = {'callbacks': [es],
'epochs': 100,
'batch_size': 32,
'validation_split': .2,
'verbose': 0}
# split_params = {'split': 'one-sample',
# 'perturb': None,
# 'num_perm': 1000,
# 'ratio_grid': [.3, .4, .5],
# 'perturb_grid': [.05, .1, .5, 1.],
# 'min_inf': 100,
# 'min_est': 1000,
# 'metric': 'close',
# 'verbose': 1}
inf_cov = [[np.arange(19,28), np.arange(13, 20)], [np.arange(21,28), np.arange(4, 13)],
[np.arange(7,16), np.arange(9, 16)]]
shiing = funs.PermT(inf_cov=inf_cov, model=model, model_perm=model_perm, num_perm=100, eva_metric='zero-one')
p_value_tmp, metric_tmp = shiing.testing(x, y, fit_params=fit_params)
toc = time.perf_counter()
print('testing time: %.3f' %(toc-tic))
# 0-th inference; Adaptive data splitting: n: 8551; m: 2850
# diff: -0.012(0.700); metric: 0.459(0.498); metric_mask: 0.471(0.499)
# accept H0 with p_value: 0.181
# testing time: 1492.996
# 0-th inference; Adaptive data splitting: n: 8551; m: 5700; perturb: 0.05
# diff: 0.000(0.087); metric: 0.463(0.499); metric_mask: 0.463(0.499)
# accept H0 with p_value: 0.561
# testing time: 2075.215
| 30.901961 | 115 | 0.695431 | 522 | 3,152 | 4.059387 | 0.377395 | 0.030203 | 0.023596 | 0.033034 | 0.117036 | 0.07362 | 0.058518 | 0.058518 | 0.03681 | 0 | 0 | 0.077548 | 0.140863 | 3,152 | 101 | 116 | 31.207921 | 0.704948 | 0.229061 | 0 | 0.0625 | 0 | 0 | 0.063123 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015625 | false | 0 | 0.265625 | 0 | 0.296875 | 0.046875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d44201cc80a01ee7dce6e56338d1f7b7bb103f3 | 3,144 | py | Python | preprocess.py | Mblakey/MOLVAE-Tensorflow-2 | 55d37b9560450c1f3d83c3b0c852301bd7271659 | [
"MIT"
] | null | null | null | preprocess.py | Mblakey/MOLVAE-Tensorflow-2 | 55d37b9560450c1f3d83c3b0c852301bd7271659 | [
"MIT"
] | null | null | null | preprocess.py | Mblakey/MOLVAE-Tensorflow-2 | 55d37b9560450c1f3d83c3b0c852301bd7271659 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
def Smiles_Tokeniser(smiles):
toke_smiles = list(smiles)
return toke_smiles
def max_length_list(input_list):
max_length = max(len(x) for x in input_list )
max_list = max(input_list, key = len)
return(max_length)
def Preprocess(file_dir,file_name,splitting=True, smiles_filter=120):
print('Reading file...')
data = pd.read_csv(file_dir + file_name, delimiter = '\t')
print("Read Successful")
print()
print('Length Checking...')
smiles_data = data['canonical_smiles'].astype('string')
smiles_data = smiles_data[pd.notnull(smiles_data)]
smiles_data = smiles_data.reset_index(level=None, drop=True, name=None, inplace=False)
idx = [i for i, x in enumerate(smiles_data) if len(x)<=120]
smiles_data = smiles_data[idx]
print(f"{len(smiles_data)} molecules at max smiles length: {smiles_filter}")
smiles_data = smiles_data[:100000] # used for local machienes to keep size down, it CAN be removed
print()
print("Tokenising... this may take some time")
tokenised_elements = [Smiles_Tokeniser(i) for i in smiles_data]
vocab_set = {x for l in tokenised_elements for x in l}
char_to_int = dict((c, i) for i, c in enumerate(vocab_set))
int_to_char = dict((i, c) for i, c in enumerate(vocab_set))
vocab_size = len(vocab_set)
max_length = max_length_list(tokenised_elements)
print(f"Vocab set generated, size: {vocab_size}, Max Length: {max_length}")
print()
print('Converting Char to Int with Post Zero Padding...')
labeled_data = np.zeros((len(tokenised_elements), max_length, 1), dtype=np.int32)
for i in range(len(tokenised_elements)):
for t, char in enumerate(tokenised_elements[i]):
labeled_data[i, t, 0] = char_to_int[char]
print('One Hot Encoding Data...')
encoded_data = to_categorical(labeled_data)
print()
if splitting == True:
# --- Block for Test-Train Splitting IF retraining model ---
print('Train:Test split to 0.8:0.2')
x_train, x_test = train_test_split(encoded_data , test_size=0.2)
print('Number of training data: '+str(len(x_train)))
print('Number of testing data: '+str(len(x_test)))
print('Saving...')
np.save(file_dir + 'x_train', x_train)
np.save(file_dir + 'x_test', x_test)
np.savez(file_dir + 'char_data', char_to_int=char_to_int, int_to_char=int_to_char)
print('Saved.')
print()
print('Preproccess Successful')
if splitting == False:
print("Saving Whole File")
np.save(file_dir + 'chemical_data', labeled_data)
np.savez(file_dir + 'char_data', char_to_int=char_to_int, int_to_char=int_to_char)
print('Saved.')
print()
print('Preproccess Successful')
file_dir = 'data/'
file_name = 'chembl_29_chemreps.txt'
Preprocess(file_dir,file_name,splitting=True) | 34.933333 | 104 | 0.672074 | 462 | 3,144 | 4.337662 | 0.28355 | 0.06487 | 0.031437 | 0.0499 | 0.174651 | 0.160679 | 0.160679 | 0.098802 | 0.098802 | 0.098802 | 0 | 0.009705 | 0.213422 | 3,144 | 90 | 105 | 34.933333 | 0.800647 | 0.038168 | 0 | 0.181818 | 0 | 0 | 0.179021 | 0.00728 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.151515 | 0.348485 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d4678ad04ea0836fb25388266d9121d37ee740d | 2,196 | py | Python | src/merge.py | mgarriott/PDFMerger | ae1167bf832cae947ac13b4625b7c50e1704da67 | [
"BSD-2-Clause"
] | 2 | 2019-05-31T17:04:33.000Z | 2021-05-16T17:29:04.000Z | src/merge.py | mgarriott/PDFMerger | ae1167bf832cae947ac13b4625b7c50e1704da67 | [
"BSD-2-Clause"
] | null | null | null | src/merge.py | mgarriott/PDFMerger | ae1167bf832cae947ac13b4625b7c50e1704da67 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
'''
Merge together a pdf document containing only front pages with a separate
document containing only back pages and save the result into a new document.
@author: Matt Garriott
'''
import argparse
import os
from pyPdf import PdfFileReader, PdfFileWriter
def merge(fppath, bppath, outputpath, no_delete, fed_backwards):
fpfile = PdfFileReader(open(fppath))
bpfile = PdfFileReader(open(bppath))
outputfile = PdfFileWriter()
outputpages = []
for i in range(fpfile.getNumPages()):
backpages = True
try:
outputpages.append(fpfile.getPage(i))
if backpages:
if fed_backwards:
outputpages.append(bpfile.getPage(bpfile.getNumPages() - i - 1))
else:
outputpages.append(bpfile.getPage(i))
except IndexError:
backpages = False
if not no_delete:
outputpages = [page for page in outputpages if page.extractText() != '']
[outputfile.addPage(page) for page in outputpages]
outputfile.write(open(os.path.expanduser(outputpath), 'w'))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Merge front and back pages located in separate ' +
'PDF documents into one PDF document.')
parser.add_argument('-f', '--front-pages', required=True,
help='The path to the PDF containing the front pages')
parser.add_argument('-b', '--back-pages', required=True,
help='The path to the PDF containing the back pages')
parser.add_argument('-o', '--output-file', default='~/Desktop/merged.pdf',
help='The path to save the completed pdf file, default is ~/Desktop/merged.pdf')
parser.add_argument('-nd', '--no-delete', default=False, action='store_true',
help='Prevent blank pages from being deleted from the finished document.')
parser.add_argument('--fed-backwards', default=False, action='store_true',
help='If you were lazy and fed the document in backwards on the seconds side, use this flag.')
args = parser.parse_args()
merge(args.front_pages, args.back_pages, args.output_file, args.no_delete, args.fed_backwards)
| 38.526316 | 115 | 0.674408 | 277 | 2,196 | 5.256318 | 0.400722 | 0.030907 | 0.058379 | 0.026786 | 0.142857 | 0.10989 | 0.067308 | 0.067308 | 0.067308 | 0.067308 | 0 | 0.000581 | 0.215847 | 2,196 | 56 | 116 | 39.214286 | 0.844948 | 0.088798 | 0 | 0 | 0 | 0 | 0.260913 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.078947 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d498e78b566a14f91a958cfce1f1978c77ba59a | 32,045 | py | Python | tests/test_ls.py | jain-aayush1123/here-location-services-python | 11ad5ef8273b4f243c43bc00ebd470f725b980bc | [
"Apache-2.0"
] | 16 | 2021-02-15T13:49:29.000Z | 2022-03-29T10:34:43.000Z | tests/test_ls.py | jain-aayush1123/here-location-services-python | 11ad5ef8273b4f243c43bc00ebd470f725b980bc | [
"Apache-2.0"
] | 8 | 2021-02-27T18:40:46.000Z | 2021-10-03T15:49:27.000Z | tests/test_ls.py | jain-aayush1123/here-location-services-python | 11ad5ef8273b4f243c43bc00ebd470f725b980bc | [
"Apache-2.0"
] | 11 | 2021-02-16T04:58:08.000Z | 2022-02-21T20:51:55.000Z | # Copyright (C) 2019-2021 HERE Europe B.V.
# SPDX-License-Identifier: Apache-2.0
import json
from datetime import datetime, timedelta
import pandas as pd
import pytest
import pytz
from geojson import FeatureCollection, Point
from geojson.geometry import LineString
from here_location_services import LS
from here_location_services.config.autosuggest_config import POLITICAL_VIEW, SHOW, SearchCircle
from here_location_services.config.base_config import (
ROUTING_MODE,
SHIPPED_HAZARDOUS_GOODS,
PlaceOptions,
Truck,
WayPointOptions,
)
from here_location_services.config.dest_weather_config import (
DEST_WEATHER_PRODUCT,
DEST_WEATHER_UNITS,
WEATHER_SEVERITY,
WEATHER_TYPE,
)
from here_location_services.config.isoline_routing_config import (
ISOLINE_ROUTING_AVOID_FEATURES,
ISOLINE_ROUTING_TRANSPORT_MODE,
RANGE_TYPE,
)
from here_location_services.config.matrix_routing_config import (
AVOID_FEATURES,
MATRIX_ATTRIBUTES,
PROFILE,
AutoCircleRegion,
AvoidBoundingBox,
BoundingBoxRegion,
CircleRegion,
PolygonRegion,
WorldRegion,
)
from here_location_services.config.routing_config import AVOID_FEATURES as ROUTING_AVOID_FEATURES
from here_location_services.config.routing_config import (
ROUTE_COURSE,
ROUTE_MATCH_SIDEOF_STREET,
ROUTING_RETURN,
ROUTING_SPANS,
ROUTING_TRANSPORT_MODE,
Scooter,
Via,
)
from here_location_services.config.search_config import PLACES_CATEGORIES
from here_location_services.config.tour_planning_config import (
VEHICLE_MODE,
Fleet,
Job,
JobPlaces,
Plan,
Relation,
VehicleProfile,
VehicleType,
)
from here_location_services.exceptions import ApiError
from here_location_services.responses import GeocoderResponse
from here_location_services.utils import get_apikey
LS_API_KEY = get_apikey()
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_ls_tour_planning():
"""Test Tour Planning API."""
ls = LS(api_key=LS_API_KEY)
fleet = Fleet(
vehicle_types=[
VehicleType(
id="09c77738-1dba-42f1-b00e-eb63da7147d6",
profile_name="normal_car",
costs_fixed=22,
costs_distance=0.0001,
costs_time=0.0048,
capacity=[100, 5],
skills=["fridge"],
amount=1,
shift_start={
"time": "2020-07-04T09:00:00Z",
"location": {"lat": 52.5256, "lng": 13.4542},
},
limits={"maxDistance": 20000, "shiftTime": 21600},
shift_end={
"location": {"lat": 52.5256, "lng": 13.4542},
"time": "2020-07-04T18:00:00Z",
},
shift_breaks=[
{
"duration": 1800,
"times": [["2020-07-04T11:00:00Z", "2020-07-04T13:00:00Z"]],
}
],
)
],
vehicle_profiles=[VehicleProfile(name="normal_car", vehicle_mode=VEHICLE_MODE.car)],
)
plan = Plan(
jobs=[
Job(
id="4bbc206d-1583-4266-bac9-d1580f412ac0",
pickups=[
JobPlaces(
duration=180,
demand=[10],
location=(52.53088, 13.38471),
times=[["2020-07-04T10:00:00Z", "2020-07-04T12:00:00Z"]],
)
],
deliveries=[
JobPlaces(
duration=300,
demand=[10],
location=(52.53088, 13.38471),
times=[["2020-07-04T14:00:00Z", "2020-07-04T16:00:00Z"]],
)
],
skills=["fridge"],
priority=2,
)
],
relations=[
Relation(
type="sequence",
jobs=["departure", "4bbc206d-1583-4266-bac9-d1580f412ac0", "arrival"],
vehicle_id="09c77738-1dba-42f1-b00e-eb63da7147d6_1",
)
],
)
resp = ls.solve_tour_planning(
fleet=fleet,
plan=plan,
id="7f3423c2-784a-4983-b472-e14107d5a54a",
optimization_traffic="liveOrHistorical",
optimization_waiting_time={"reduce": True, "bufferTime": 15},
)
assert resp
assert resp.problemId
assert resp.statistic
assert resp.tours
resp2 = ls.solve_tour_planning(
fleet=fleet,
plan=plan,
id="7f3423c2-784a-4983-b472-e14107d5a54a",
optimization_traffic="liveOrHistorical",
optimization_waiting_time={"reduce": True, "bufferTime": 15},
is_async=True,
)
assert resp2
assert resp.problemId
assert resp.statistic
assert resp.tours
with pytest.raises(ValueError):
fleet2 = Fleet(
vehicle_types=[
VehicleType(
id="09c77738-1dba-42f1-b00e-eb63da7147d6",
profile_name="normal_car",
costs_fixed=22,
capacity=[100, 5],
skills=["fridge"],
amount=1,
shift_start={
"time": "2020-07-04T09:00:00Z",
"location": {"lat": 52.5256, "lng": 13.4542},
},
)
],
vehicle_profiles=[
VehicleProfile(
name="normal_car",
vehicle_mode=VEHICLE_MODE.car,
allow_highway_for_scooter=True,
)
],
)
ls.solve_tour_planning(
fleet=fleet2,
plan=plan,
)
with pytest.raises(ValueError):
plan2 = Plan(
jobs=[
Job(
id="4bbc206d-1583-4266-bac9-d1580f412ac0",
)
],
relations=[
Relation(
type="sequence",
jobs=["departure", "4bbc206d-1583-4266-bac9-d1580f412ac0", "arrival"],
vehicle_id="09c77738-1dba-42f1-b00e-eb63da7147d6_1",
)
],
)
ls.solve_tour_planning(
fleet=fleet,
plan=plan2,
)
with pytest.raises(ApiError):
ls2 = LS(api_key="dummy")
ls2.solve_tour_planning(fleet=fleet, plan=plan, id="7f3423c2-784a-4983-b472-e14107d5a54a")
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_ls_weather_alerts():
"""Test weather alerts endpoint of destination weather api."""
ls = LS(api_key=LS_API_KEY)
resp = ls.get_weather_alerts(
geometry=LineString([(8.919, 44.4074), (8.923, 44.4075)]),
start_time=datetime.now(),
width=25000,
)
assert resp
geo_json = resp.to_geojson()
assert geo_json.type == "FeatureCollection"
resp2 = ls.get_weather_alerts(
geometry=Point(coordinates=[15.256, 23.456]),
start_time=datetime.now(),
weather_type=WEATHER_TYPE.ice,
weather_severity=WEATHER_SEVERITY.high,
country="US",
end_time=datetime.now() + timedelta(days=7),
)
assert resp2
with pytest.raises(ValueError):
ls.get_weather_alerts(
geometry=LineString([(8.919, 44.4074), (8.923, 44.4075)]),
start_time=datetime.now(),
width=50000,
)
with pytest.raises(ValueError):
ls.get_weather_alerts(
geometry=Point(coordinates=[15.256, 23.456]),
start_time=datetime.now(),
width=1000000,
)
with pytest.raises(ApiError):
ls2 = LS(api_key="dummy")
ls2.get_weather_alerts(
geometry=Point(coordinates=[15.256, 23.456]),
start_time=datetime.now(),
)
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_ls_dest_weather():
"""Test destination weather api."""
ls = LS(api_key=LS_API_KEY)
resp = ls.get_dest_weather(products=[DEST_WEATHER_PRODUCT.observation], query="Chicago")
assert resp.places
assert resp.places[0]["observations"]
resp2 = ls.get_dest_weather(
products=[DEST_WEATHER_PRODUCT.forecastHourly],
query="Chicago",
units=DEST_WEATHER_UNITS.imperial,
)
assert resp2.places
assert resp2.places[0]["hourlyforecasts"]
resp3 = ls.get_dest_weather(
products=[DEST_WEATHER_PRODUCT.forecast7days], at=["-13.163068,-72.545128"]
)
assert resp3.places
assert resp3.places[0]["extendedDailyforecasts"]
resp4 = ls.get_dest_weather(
products=[DEST_WEATHER_PRODUCT.forecast7daysSimple, DEST_WEATHER_PRODUCT.observation],
zipcode="10025",
one_observation=True,
)
assert resp4.places
assert resp4.places[0]["observations"]
resp5 = ls.get_dest_weather(
products=[DEST_WEATHER_PRODUCT.forecast7daysSimple, DEST_WEATHER_PRODUCT.observation],
zipcode="10025",
at=["-13.163068,-72.545128"],
one_observation=True,
)
assert resp5.places
assert resp5.places[0]["observations"]
with pytest.raises(ValueError):
ls.get_dest_weather(products=[DEST_WEATHER_PRODUCT.forecast7days])
with pytest.raises(ValueError):
ls.get_dest_weather(
products=[DEST_WEATHER_PRODUCT.forecast7days], query="Chicago", one_observation=True
)
with pytest.raises(ValueError):
ls.get_dest_weather(
products=[DEST_WEATHER_PRODUCT.forecast7days], query="Chicago", one_observation=True
)
with pytest.raises(ApiError):
ls2 = LS(api_key="dummy")
ls2.get_dest_weather(products=[DEST_WEATHER_PRODUCT.observation], query="Chicago")
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_ls_autosuggest():
"""Test autosuggest api."""
ls = LS(api_key=LS_API_KEY)
resp = ls.autosuggest(query="bar", limit=5, at=["-13.163068,-72.545128"], in_country=["USA"])
assert resp.items
assert len(resp.items) <= 5
search_in_circle1 = SearchCircle(lat=52.53, lng=13.38, radius="10000")
search_in_bbox1 = ("13.08836", "52.33812", "13.761", "52.6755")
resp3 = ls.autosuggest(query="bar", limit=5, search_in_circle=search_in_circle1, lang=["en"])
assert resp3.items
assert len(resp3.items) <= 5
resp4 = ls.autosuggest(
query="res",
limit=5,
search_in_bbox=search_in_bbox1,
terms_limit=3,
show=[SHOW.phonemes],
political_view=POLITICAL_VIEW.RUS,
)
assert resp4.items
assert len(resp4.items) <= 5
assert len(resp4.queryTerms) == 3
for item in resp4.items:
if item["resultType"] == "place":
assert item["politicalView"]
assert item["phonemes"]
with pytest.raises(ValueError):
ls.autosuggest(
query="res",
)
with pytest.raises(ValueError):
ls.autosuggest(
query="res",
search_in_bbox=search_in_bbox1,
search_in_circle=search_in_circle1,
)
with pytest.raises(ValueError):
ls.autosuggest(
query="res",
at=["-13.163068,-72.545128"],
search_in_bbox=search_in_bbox1,
search_in_circle=search_in_circle1,
)
with pytest.raises(ApiError):
ls2 = LS(api_key="dummy")
ls2.autosuggest(
query="res",
at=["-13.163068,-72.545128"],
)
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_ls_geocoding():
"""Test geocoding api."""
address = "200 S Mathilda Sunnyvale CA"
ls = LS(api_key=LS_API_KEY)
resp = ls.geocode(query=address, limit=2)
assert isinstance(resp, GeocoderResponse)
assert resp.__str__()
assert resp.as_json_string()
geo_json = resp.to_geojson()
assert geo_json.type == "FeatureCollection"
assert geo_json.features
pos = resp.items[0]["position"]
assert len(resp.items) == 1
assert pos == {"lat": 37.37634, "lng": -122.03405}
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_ls_geocoding_exception():
"""Test geocoding api exception."""
address = "Goregaon West, Mumbai 400062, India"
ls = LS(api_key="dummy")
with pytest.raises(ApiError):
ls.geocode(query=address)
with pytest.raises(ValueError):
ls.geocode(query="")
ls.geocode(query=" ")
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_ls_reverse_geocoding():
"""Test reverse geocoding."""
ls = LS(api_key=LS_API_KEY)
resp = ls.reverse_geocode(lat=19.1646, lng=72.8493)
address = resp.items[0]["address"]["label"]
assert "Goregaon" in address
resp1 = ls.reverse_geocode(lat=19.1646, lng=72.8493, limit=4)
assert len(resp1.items) == 4
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_ls_reverse_geocoding_exception():
"""Test reverse geocoding api exception."""
ls = LS(api_key=LS_API_KEY)
with pytest.raises(ValueError):
ls.reverse_geocode(lat=91, lng=90)
with pytest.raises(ValueError):
ls.reverse_geocode(lat=19, lng=190)
ls = LS(api_key="dummy")
with pytest.raises(ApiError):
ls.reverse_geocode(lat=19.1646, lng=72.8493)
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_isonline_routing():
"""Test isonline routing api."""
ls = LS(api_key=LS_API_KEY)
place_options = PlaceOptions(
course=ROUTE_COURSE.west,
sideof_street_hint=[52.512149, 13.304076],
match_sideof_street=ROUTE_MATCH_SIDEOF_STREET.always,
radius=10,
min_course_distance=10,
)
assert json.loads(place_options.__str__()) == {
"course": 270,
"sideOfStreetHint": "52.512149,13.304076",
"matchSideOfStreet": "always",
"namehint": None,
"radius": 10,
"minCourseDistance": 10,
}
origin_waypoint_options = WayPointOptions(stop_duration=0)
result = ls.calculate_isoline(
origin=[52.5, 13.4],
range="1000,3000",
range_type=RANGE_TYPE.time,
transport_mode=ISOLINE_ROUTING_TRANSPORT_MODE.car,
departure_time=datetime.now(),
truck=Truck(
shipped_hazardous_goods=[SHIPPED_HAZARDOUS_GOODS.explosive],
gross_weight=100,
weight_per_axle=10,
height=10,
width=10,
length=10,
tunnel_category="B",
axle_count=4,
),
shape_max_points=100,
avoid_features=[ISOLINE_ROUTING_AVOID_FEATURES.tollRoad],
origin_place_options=place_options,
origin_waypoint_options=origin_waypoint_options,
)
assert result.isolines
assert result.departure
coordinates = result.isolines[0]["polygons"][0]["outer"]
assert coordinates
geo_json = result.to_geojson()
assert geo_json.type == "FeatureCollection"
destination_waypoint_options = WayPointOptions(stop_duration=0)
result2 = ls.calculate_isoline(
destination=[52.51578, 13.37749],
range="600",
range_type=RANGE_TYPE.time,
transport_mode=ISOLINE_ROUTING_TRANSPORT_MODE.car,
destination_place_options=place_options,
destination_waypoint_options=destination_waypoint_options,
)
assert result2.isolines
assert result2.arrival
with pytest.raises(ValueError):
ls.calculate_isoline(
destination=[82.8628, 135.00],
range="3000",
range_type=RANGE_TYPE.distance,
transport_mode=ISOLINE_ROUTING_TRANSPORT_MODE.car,
arrival_time=datetime.now(),
)
with pytest.raises(ValueError):
ls.calculate_isoline(
origin=[52.5, 13.4],
range="900",
range_type=RANGE_TYPE.time,
transport_mode=ISOLINE_ROUTING_TRANSPORT_MODE.car,
destination=[52.5, 13.4],
)
with pytest.raises(ApiError):
ls2 = LS(api_key="dummy")
ls2.calculate_isoline(
origin=[52.5, 13.4],
range="900",
range_type=RANGE_TYPE.time,
transport_mode=ISOLINE_ROUTING_TRANSPORT_MODE.car,
)
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_isonline_routing_exception():
"""Test isonline exceptions."""
ls = LS(api_key=LS_API_KEY)
with pytest.raises(ValueError):
ls.calculate_isoline(
range="900",
range_type=RANGE_TYPE.time,
transport_mode=ISOLINE_ROUTING_TRANSPORT_MODE.car,
)
with pytest.raises(ValueError):
ls.calculate_isoline(
range="900",
range_type=RANGE_TYPE.time,
transport_mode=ISOLINE_ROUTING_TRANSPORT_MODE.car,
arrival_time=datetime.now(),
origin=[52.5, 13.4],
)
with pytest.raises(ValueError):
ls.calculate_isoline(
range="900",
range_type=RANGE_TYPE.time,
transport_mode=ISOLINE_ROUTING_TRANSPORT_MODE.car,
departure_time=datetime.now(),
destination=[52.5, 13.4],
)
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_ls_discover():
ls = LS(api_key=LS_API_KEY)
result = ls.discover(query="starbucks", center=[19.1663, 72.8526], radius=10000, lang="en")
assert len(result.items) == 20
result2 = ls.discover(
query="starbucks",
center=[19.1663, 72.8526],
country_codes=["IND"],
limit=2,
)
assert len(result2.items) == 2
result3 = ls.discover(
query="starbucks",
bounding_box=[13.08836, 52.33812, 13.761, 52.6755],
)
assert len(result3.items) == 20
with pytest.raises(ValueError):
ls.discover(
query="starbucks",
center=[52.5, 13.4],
bounding_box=[13.08836, 52.33812, 13.761, 52.6755],
)
with pytest.raises(ApiError):
ls2 = LS(api_key="dummy")
ls2.discover(query="starbucks", center=[19.1663, 72.8526], radius=10000, limit=10)
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_ls_browse():
ls = LS(api_key=LS_API_KEY)
result = ls.browse(
center=[19.1663, 72.8526],
radius=9000,
limit=5,
categories=[
PLACES_CATEGORIES.historical_monument,
PLACES_CATEGORIES.museum,
PLACES_CATEGORIES.park_recreation_area,
PLACES_CATEGORIES.leisure,
PLACES_CATEGORIES.shopping_mall,
],
lang="en",
)
assert len(result.items) == 5
result2 = ls.browse(
center=[19.1663, 72.8526],
name="starbucks",
country_codes=["IND"],
limit=10,
categories=[PLACES_CATEGORIES.restaurant],
lang="en",
)
assert len(result2.items) <= 10
result3 = ls.browse(
center=[19.1663, 72.8526],
name="starbucks",
bounding_box=[13.08836, 52.33812, 13.761, 52.6755],
categories=[PLACES_CATEGORIES.restaurant],
lang="en",
)
assert len(result3.items) <= 10
with pytest.raises(ApiError):
ls2 = LS(api_key="dummy")
ls2.browse(
center=[19.1663, 72.8526],
radius=9000,
limit=5,
categories=[
PLACES_CATEGORIES.historical_monument,
PLACES_CATEGORIES.museum,
PLACES_CATEGORIES.park_recreation_area,
PLACES_CATEGORIES.leisure,
PLACES_CATEGORIES.shopping_mall,
],
)
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_ls_lookup():
ls = LS(api_key=LS_API_KEY)
result = ls.lookup(
location_id="here:pds:place:276u0vhj-b0bace6448ae4b0fbc1d5e323998a7d2",
lang="en",
)
assert result.response["title"] == "Frankfurt-Hahn Airport"
with pytest.raises(ApiError):
ls2 = LS(api_key="dummy")
ls2.lookup(location_id="here:pds:place:276u0vhj-b0bace6448ae4b0fbc1d5e323998a7d2")
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_car_route():
"""Test routing API for car route."""
ls = LS(api_key=LS_API_KEY)
avoid_areas = [AvoidBoundingBox(68.1766451354, 7.96553477623, 97.4025614766, 35.4940095078)]
avoid_features = [ROUTING_AVOID_FEATURES.tollRoad]
via1 = Via(lat=52.52426, lng=13.43000)
via2 = Via(lat=52.52624, lng=13.44012)
result = ls.car_route(
origin=[52.51375, 13.42462],
destination=[52.52332, 13.42800],
via=[via1, via2],
return_results=[ROUTING_RETURN.polyline, ROUTING_RETURN.elevation],
departure_time=datetime.now(),
spans=[ROUTING_SPANS.names],
avoid_areas=avoid_areas,
avoid_features=avoid_features,
exclude=["IND", "NZL", "AUS"],
)
assert result.response["routes"][0]["sections"][0]["departure"]["place"]["location"] == {
"lat": 52.5137479,
"lng": 13.4246242,
"elv": 76.0,
}
assert result.response["routes"][0]["sections"][1]["departure"]["place"]["location"] == {
"lat": 52.5242323,
"lng": 13.4301462,
"elv": 80.0,
}
assert type(result.to_geojson()) == FeatureCollection
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_car_route_extra_options():
"""Test routing API for car route."""
place_options = PlaceOptions(
course=ROUTE_COURSE.west,
sideof_street_hint=[52.512149, 13.304076],
match_sideof_street=ROUTE_MATCH_SIDEOF_STREET.always,
radius=10,
min_course_distance=10,
)
assert json.loads(place_options.__str__()) == {
"course": 270,
"sideOfStreetHint": "52.512149,13.304076",
"matchSideOfStreet": "always",
"namehint": None,
"radius": 10,
"minCourseDistance": 10,
}
via_waypoint_options = WayPointOptions(stop_duration=0, pass_through=True)
assert json.loads(via_waypoint_options.__str__()) == {
"stopDuration": 0,
"passThrough": True,
}
dest_waypoint_options = WayPointOptions(stop_duration=10, pass_through=False)
via1 = Via(
lat=52.52426,
lng=13.43000,
place_options=place_options,
waypoint_options=via_waypoint_options,
)
via2 = Via(lat=52.52426, lng=13.43000)
via3 = Via(
lat=52.52426,
lng=13.43000,
place_options=place_options,
waypoint_options=via_waypoint_options,
)
ls = LS(api_key=LS_API_KEY)
resp = ls.car_route(
origin=[52.51375, 13.42462],
destination=[52.52332, 13.42800],
via=[via1, via2, via3],
origin_place_options=place_options,
destination_place_options=place_options,
destination_waypoint_options=dest_waypoint_options,
return_results=[ROUTING_RETURN.polyline, ROUTING_RETURN.elevation],
departure_time=datetime.now(),
spans=[ROUTING_SPANS.names],
)
resp = resp.response
assert len(resp["routes"][0]["sections"]) == 2
assert list(resp["routes"][0]["sections"][0].keys()) == [
"id",
"type",
"departure",
"arrival",
"polyline",
"spans",
"notices",
"transport",
]
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_bicycle_route():
"""Test routing API for car route."""
ls = LS(api_key=LS_API_KEY)
avoid_areas = [AvoidBoundingBox(68.1766451354, 7.96553477623, 97.4025614766, 35.4940095078)]
avoid_features = [ROUTING_AVOID_FEATURES.tollRoad]
via = Via(lat=52.52426, lng=13.43000)
_ = ls.bicycle_route(
origin=[52.51375, 13.42462],
destination=[52.52332, 13.42800],
via=[via],
return_results=[ROUTING_RETURN.polyline, ROUTING_RETURN.elevation],
departure_time=datetime.now(),
spans=[ROUTING_SPANS.names],
avoid_areas=avoid_areas,
avoid_features=avoid_features,
exclude=["IND", "NZL", "AUS"],
)
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_truck_route():
"""Test routing API for truck route."""
ls = LS(api_key=LS_API_KEY)
truck = Truck(
shipped_hazardous_goods=[SHIPPED_HAZARDOUS_GOODS.explosive],
gross_weight=100,
weight_per_axle=10,
height=10,
width=10,
length=10,
tunnel_category="B",
axle_count=4,
)
avoid_areas = [AvoidBoundingBox(68.1766451354, 7.96553477623, 97.4025614766, 35.4940095078)]
avoid_features = [ROUTING_AVOID_FEATURES.tollRoad]
via = Via(lat=52.52426, lng=13.43000)
_ = ls.truck_route(
origin=[52.51375, 13.42462],
destination=[52.52332, 13.42800],
via=[via],
return_results=[ROUTING_RETURN.polyline, ROUTING_RETURN.elevation],
departure_time=datetime.now(),
spans=[ROUTING_SPANS.names],
truck=truck,
avoid_areas=avoid_areas,
avoid_features=avoid_features,
exclude=["IND", "NZL", "AUS"],
)
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_scooter_route():
"""Test routing API for scooter route."""
ls = LS(api_key=LS_API_KEY)
scooter = Scooter(allow_highway=True)
assert json.loads(scooter.__str__()) == {"allowHighway": True}
via = Via(lat=52.52426, lng=13.43000)
_ = ls.scooter_route(
origin=[52.51375, 13.42462],
destination=[52.52332, 13.42800],
via=[via],
return_results=[ROUTING_RETURN.polyline, ROUTING_RETURN.elevation],
departure_time=datetime.now(),
spans=[ROUTING_SPANS.names],
scooter=scooter,
exclude=["IND", "NZL", "AUS"],
)
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_pedestrian_route():
"""Test routing API for pedestrian route."""
ls = LS(api_key=LS_API_KEY)
via = Via(lat=52.52426, lng=13.43000)
_ = ls.pedestrian_route(
origin=[52.51375, 13.42462],
destination=[52.52332, 13.42800],
via=[via],
return_results=[ROUTING_RETURN.polyline, ROUTING_RETURN.elevation],
departure_time=datetime.now(),
spans=[ROUTING_SPANS.names],
exclude=["IND", "NZL", "AUS"],
)
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_matrix_route_exception():
"""Test exceptions for Matrix routing."""
ls = LS(api_key=LS_API_KEY)
origins = [
{"lat": 37.76, "lng": -122.42},
{"lat": 40.63, "lng": -74.09},
{"lat": 30.26, "lng": -97.74},
{"lat": 40.63, "lng": -74.09},
]
region_definition = CircleRegion(radius=1000, center={"lat": 37.76, "lng": -122.42})
matrix_attributes = [MATRIX_ATTRIBUTES.distances, MATRIX_ATTRIBUTES.travelTimes]
profile = PROFILE.carFast
truck = Truck(
shipped_hazardous_goods=[SHIPPED_HAZARDOUS_GOODS.explosive],
gross_weight=100,
weight_per_axle=10,
height=10,
width=10,
length=10,
tunnel_category="B",
axle_count=4,
)
with pytest.raises(ValueError):
ls.matrix(
origins=origins,
region_definition=region_definition,
profile=profile,
matrix_attributes=matrix_attributes,
)
with pytest.raises(ValueError):
ls.matrix(
origins=origins,
region_definition=region_definition,
matrix_attributes=matrix_attributes,
transport_mode=ROUTING_TRANSPORT_MODE.car,
truck=truck,
)
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_matrix_route():
"""Test Matrix routing."""
ls = LS(api_key=LS_API_KEY)
origins = [
{"lat": 37.76, "lng": -122.42},
{"lat": 40.63, "lng": -74.09},
{"lat": 30.26, "lng": -97.74},
]
region_definition = WorldRegion()
matrix_attributes = [MATRIX_ATTRIBUTES.distances, MATRIX_ATTRIBUTES.travelTimes]
avoid_areas = AvoidBoundingBox(68.1766451354, 7.96553477623, 97.4025614766, 35.4940095078)
assert json.loads(avoid_areas.__str__()) == {
"type": "boundingBox",
"north": 68.1766451354,
"south": 7.96553477623,
"west": 97.4025614766,
"east": 35.4940095078,
}
truck = Truck(
shipped_hazardous_goods=[SHIPPED_HAZARDOUS_GOODS.explosive],
gross_weight=100,
weight_per_axle=10,
height=10,
width=10,
length=10,
tunnel_category="B",
axle_count=4,
)
result = ls.matrix(
origins=origins,
region_definition=region_definition,
destinations=origins,
routing_mode=ROUTING_MODE.fast,
departure_time=datetime.now(tz=pytz.utc),
transport_mode=ROUTING_TRANSPORT_MODE.truck,
avoid_features=[AVOID_FEATURES.tollRoad],
avoid_areas=[avoid_areas],
truck=truck,
matrix_attributes=matrix_attributes,
)
mat = result.matrix
assert mat["numOrigins"] == 3
assert mat["numDestinations"] == 3
assert len(mat["distances"]) == 9
profile = PROFILE.carShort
result2 = ls.matrix(
origins=origins,
region_definition=region_definition,
matrix_attributes=matrix_attributes,
profile=profile,
)
mat2 = result2.matrix
assert mat2["numOrigins"] == 3
assert mat2["numDestinations"] == 3
with pytest.raises(NotImplementedError):
result2.to_geojson()
assert isinstance(result2.to_distnaces_matrix(), pd.DataFrame)
assert isinstance(result2.to_travel_times_matrix(), pd.DataFrame)
@pytest.mark.skipif(not LS_API_KEY, reason="No api key found.")
def test_matrix_route_async():
"""Test Matrix routing."""
ls = LS(api_key=LS_API_KEY)
origins = [
{"lat": 37.76, "lng": -122.42},
{"lat": 40.63, "lng": -74.09},
{"lat": 30.26, "lng": -97.74},
]
region_definition = WorldRegion()
matrix_attributes = [MATRIX_ATTRIBUTES.distances, MATRIX_ATTRIBUTES.travelTimes]
avoid_areas = AvoidBoundingBox(68.1766451354, 7.96553477623, 97.4025614766, 35.4940095078)
truck = Truck(
shipped_hazardous_goods=[SHIPPED_HAZARDOUS_GOODS.explosive],
gross_weight=100,
weight_per_axle=10,
height=10,
width=10,
length=10,
tunnel_category="B",
axle_count=4,
)
result = ls.matrix(
origins=origins,
region_definition=region_definition,
async_req=True,
destinations=origins,
routing_mode=ROUTING_MODE.fast,
departure_time="any",
transport_mode=ROUTING_TRANSPORT_MODE.truck,
avoid_features=[AVOID_FEATURES.tollRoad],
avoid_areas=[avoid_areas],
truck=truck,
matrix_attributes=matrix_attributes,
)
mat = result.matrix
assert mat["numOrigins"] == 3
assert mat["numDestinations"] == 3
assert len(mat["distances"]) == 9
def test_matrix_routing_config():
"""Test Matrix routing config objects."""
circle = CircleRegion(radius=1000, center={"lat": 37.76, "lng": -122.42})
assert json.loads(circle.__str__()) == {
"type": "circle",
"center": {"lat": 37.76, "lng": -122.42},
"radius": 1000,
}
bbox = BoundingBoxRegion(0, 0, 0, 0)
assert json.loads(bbox.__str__()) == {
"type": "boundingBox",
"north": 0,
"south": 0,
"west": 0,
"east": 0,
}
poly = PolygonRegion(outer=[1, 1, 1, 1, 1, 1])
assert json.loads(poly.__str__()) == {
"type": "polygon",
"outer": [1, 1, 1, 1, 1, 1],
}
autocircle = AutoCircleRegion(margin=100)
assert json.loads(autocircle.__str__()) == {"type": "autoCircle", "margin": 100}
| 32.077077 | 98 | 0.610298 | 3,676 | 32,045 | 5.103917 | 0.132481 | 0.03102 | 0.03198 | 0.012259 | 0.726735 | 0.691184 | 0.666453 | 0.653129 | 0.601642 | 0.555325 | 0 | 0.091941 | 0.266188 | 32,045 | 998 | 99 | 32.109218 | 0.705932 | 0.021657 | 0 | 0.550905 | 0 | 0 | 0.087555 | 0.020449 | 0 | 0 | 0 | 0 | 0.085973 | 1 | 0.026018 | false | 0.003394 | 0.022624 | 0 | 0.048643 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d4ae17041f29d8b2b01fbaeb51039292f4a9347 | 6,159 | py | Python | tests/tags_tests.py | yoursantu/indiannewsplus | 252f0367b43ec2edea636157bcf2d8a92dda6f3f | [
"MIT"
] | null | null | null | tests/tags_tests.py | yoursantu/indiannewsplus | 252f0367b43ec2edea636157bcf2d8a92dda6f3f | [
"MIT"
] | null | null | null | tests/tags_tests.py | yoursantu/indiannewsplus | 252f0367b43ec2edea636157bcf2d8a92dda6f3f | [
"MIT"
] | null | null | null | """Tests for tags of the ``multilingual_news``` application."""
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils.translation import activate
from cms.api import add_plugin
from mixer.backend.django import mixer
from ..templatetags.multilingual_news_tags import (
get_newsentry_meta_description,
get_newsentry_meta_title,
get_published_entries,
get_recent_news,
)
class GetPublishedEntriesTestCase(TestCase):
"""Tests for the `get_published_entries` template tag."""
longMessage = True
def setUp(self):
self.entry1 = mixer.blend('multilingual_news.NewsEntry')
new_entry = self.entry1.translate('de')
new_entry.title = 'GerTitle'
new_entry.is_published = True
new_entry.save()
self.entry2 = mixer.blend('multilingual_news.NewsEntry')
new_entry = self.entry2.translate('de')
new_entry.title = 'GerTitle2'
new_entry.is_published = False
new_entry.save()
self.object_list = [self.entry1, self.entry2]
def test_tag(self):
activate('de')
# retrieve the one german entry
self.assertEqual(
get_published_entries(self.object_list, 'de').count(),
1, msg=('The tag should have returned only one entry.'))
# retrieve the two english entries
self.assertEqual(
get_published_entries(self.object_list).count(),
1, msg=('The tag should have returned one entry.'))
class GetNewsEntryMetaDescriptionTestCase(TestCase):
"""Tests for the `get_newsentry_meta_description` template tag."""
longMessage = True
def setUp(self):
self.newsentry_with_meta = mixer.blend(
'multilingual_news.NewsEntry')
en_trans = self.newsentry_with_meta.translate('en')
en_trans.meta_description = 'Meta Description'
en_trans.save()
self.newsentry_with_excerpt = mixer.blend(
'multilingual_news.NewsEntry')
self.newsentry_with_excerpt.translate('en')
add_plugin(self.newsentry_with_excerpt.excerpt, 'LinkPlugin', 'en')
add_plugin(self.newsentry_with_excerpt.excerpt, 'TextPlugin', 'en',
body='<p>Test excerpt</p>')
self.newsentry_with_content = mixer.blend(
'multilingual_news.NewsEntry')
self.newsentry_with_content.translate('en')
add_plugin(self.newsentry_with_content.content, 'LinkPlugin', 'en')
add_plugin(self.newsentry_with_content.content, 'TextPlugin', 'en',
body=(
'<p>Test content - lorem ipsum longer than 160 chars'
' to test the cropping and the appending of the dots,'
' which happens only on very long descriptions.'
' When will this become longer than 160?</p>'))
def test_tag(self):
activate('en')
self.assertEqual(
get_newsentry_meta_description(self.newsentry_with_meta),
'Meta Description',
)
self.assertEqual(
get_newsentry_meta_description(self.newsentry_with_excerpt),
'Test excerpt',
msg='Should have returned the content of the excerpt placeholder.',
)
self.assertIn(
'Test content',
get_newsentry_meta_description(self.newsentry_with_content),
msg='Should have returned the content of the content placeholder.',
)
self.assertIn(
'...',
get_newsentry_meta_description(self.newsentry_with_content),
msg='Should have appended "...".',
)
class GetNewsEntryMetaTitleTestCase(TestCase):
"""Tests for the `get_newsentry_meta_title` template tag."""
longMessage = True
def setUp(self):
self.entry = mixer.blend(
'multilingual_news.NewsEntryTranslation',
title='Title',
master__author=mixer.blend('people.PersonTranslation'),
)
self.entry_with_meta = mixer.blend(
'multilingual_news.NewsEntryTranslation',
master__author=mixer.blend('people.PersonTranslation'),
meta_title='Meta',
title='Title2'
)
def test_tag(self):
self.assertEqual(get_newsentry_meta_title(self.entry), 'Title')
self.assertEqual(get_newsentry_meta_title(self.entry_with_meta),
'Meta')
class GetRecentNewsTestCase(TestCase):
"""Tests for the ``get_recent_news`` assignment tag."""
longMessage = True
def setUp(self):
self.news_entry = mixer.blend('multilingual_news.NewsEntry')
trans = self.news_entry.translate('en')
trans.is_published = True
trans.save()
self.category = mixer.blend('multilingual_news.Category')
self.news_entry.categories.add(self.category)
self.news_entry2 = mixer.blend('multilingual_news.NewsEntry')
trans = self.news_entry2.translate('en')
trans.is_published = True
trans.save()
for x in range(0, 2):
entry = mixer.blend('multilingual_news.NewsEntry')
trans = entry.translate('en')
trans.is_published = True
trans.save()
def test_tag(self):
activate('en')
req = RequestFactory().get('/')
context = {'request': req, }
result = get_recent_news(context)
self.assertEqual(result.count(), 3, msg=(
'Should return last three recent news'))
result = get_recent_news(context, category='foo')
self.assertEqual(result.count(), 3, msg=(
'Should return last three recent news, if category is invalid.'))
result = get_recent_news(context, category=self.category.slug)
self.assertEqual(result.count(), 1, msg=(
'Should only return recent news from chosen category'))
self.news_entry2.categories.add(self.category)
result = get_recent_news(context, category=self.category.slug)
self.assertEqual(result.count(), 2, msg=(
'Should only return recent news from chosen category'))
| 38.018519 | 79 | 0.639552 | 682 | 6,159 | 5.576246 | 0.193548 | 0.047857 | 0.062582 | 0.075204 | 0.61767 | 0.548777 | 0.482514 | 0.432027 | 0.185117 | 0.105706 | 0 | 0.005466 | 0.257347 | 6,159 | 161 | 80 | 38.254658 | 0.825973 | 0.055041 | 0 | 0.328244 | 0 | 0 | 0.205248 | 0.06318 | 0 | 0 | 0 | 0 | 0.091603 | 1 | 0.061069 | false | 0 | 0.045802 | 0 | 0.167939 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d4b3704523e5ede209ca40a8ab2c453344b2984 | 3,590 | py | Python | dwave_networkx/drawing/tests/test_chimera_layout.py | mstechly/dwave_networkx | c319222ba47c83d86bd9d0de045c497d37ffd8ad | [
"Apache-2.0"
] | null | null | null | dwave_networkx/drawing/tests/test_chimera_layout.py | mstechly/dwave_networkx | c319222ba47c83d86bd9d0de045c497d37ffd8ad | [
"Apache-2.0"
] | null | null | null | dwave_networkx/drawing/tests/test_chimera_layout.py | mstechly/dwave_networkx | c319222ba47c83d86bd9d0de045c497d37ffd8ad | [
"Apache-2.0"
] | null | null | null | from __future__ import division
import unittest
import networkx as nx
import dwave_networkx as dnx
try:
import matplotlib.pyplot as plt
_plt = True
except ImportError:
_plt = False
try:
import numpy as np
_numpy = True
except ImportError:
_numpy = False
class TestDrawing(unittest.TestCase):
@unittest.skipUnless(_numpy and _plt, "No numpy or matplotlib")
def test_chimera_layout_basic(self):
G = dnx.chimera_graph(1, 1, 4)
pos = dnx.chimera_layout(G)
@unittest.skipUnless(_numpy and _plt, "No numpy or matplotlib")
def test_chimera_layout_typical(self):
G = dnx.chimera_graph(2, 2, 4)
pos = dnx.chimera_layout(G)
@unittest.skipUnless(_numpy and _plt, "No numpy or matplotlib")
def test_chimera_layout_center(self):
G = dnx.chimera_graph(2, 2, 4)
pos = dnx.chimera_layout(G, center=(5, 5))
with self.assertRaises(ValueError):
pos = dnx.chimera_layout(G, center=(5, 5, 5))
@unittest.skipUnless(_numpy and _plt, "No numpy or matplotlib")
def test_chimera_layout_lowdim(self):
G = dnx.chimera_graph(2, 2, 4)
with self.assertRaises(ValueError):
pos = dnx.chimera_layout(G, dim=1)
@unittest.skipUnless(_numpy and _plt, "No numpy or matplotlib")
def test_chimera_layout_weird_nodata(self):
G = dnx.chimera_graph(2, 2, 4)
del G.graph["family"]
with self.assertRaises(ValueError):
pos = dnx.chimera_layout(G, dim=1)
@unittest.skipUnless(_numpy and _plt, "No numpy or matplotlib")
def test_chimera_layout_no_chimera_indices(self):
G = nx.Graph()
G.add_edges_from([(0, 2), (1, 2), (1, 3), (0, 3)])
pos = dnx.chimera_layout(G)
pos2 = dnx.chimera_layout(dnx.chimera_graph(1, 1, 2))
for v in pos:
self.assertTrue(all(pos[v] == pos2[v]))
for v in pos2:
self.assertIn(v, pos)
@unittest.skipUnless(_numpy and _plt, "No numpy or matplotlib")
def test_chimera_layout_coords(self):
G = dnx.chimera_graph(2, 2, 4, coordinates=True)
pos = dnx.chimera_layout(G)
@unittest.skipUnless(_numpy and _plt, "No numpy or matplotlib")
def test_chimera_layout_nodata(self):
G = dnx.chimera_graph(2, 2, 4, data=False)
pos = dnx.chimera_layout(G)
@unittest.skipUnless(_numpy and _plt, "No numpy or matplotlib")
def test_chimera_layout_edgelist_singletile(self):
G = dnx.chimera_graph(1, 1, 16, data=False)
pos = dnx.chimera_layout(G.edges())
@unittest.skipUnless(_numpy and _plt, "No numpy or matplotlib")
def test_draw_pegasus_biases(self):
G = dnx.chimera_graph(8)
h = {v: v % 12 for v in G}
J = {(u, v) if u % 2 else (v, u): (u+v) % 24 for u, v in G.edges()}
for v in G:
J[v, v] = .1
dnx.draw_chimera(G, linear_biases=h, quadratic_biases=J)
@unittest.skipUnless(_numpy and _plt, "No numpy or matplotlib")
def test_draw_pegasus_embedding(self):
C = dnx.chimera_graph(4)
G = nx.grid_graph([2, 3, 2])
emb = {(0, 0, 0): [80, 48], (0, 0, 1): [50, 52], (0, 1, 0): [85, 93],
(0, 1, 1): [84, 82], (0, 2, 0): [89], (0, 2, 1): [92],
(1, 0, 0): [49, 54], (1, 0, 1): [83, 51], (1, 1, 0): [81],
(1, 1, 1): [86, 94], (1, 2, 0): [87, 95], (1, 2, 1): [91]}
dnx.draw_chimera_embedding(C, emb)
dnx.draw_chimera_embedding(C, emb, embedded_graph=G)
dnx.draw_chimera_embedding(C, emb, interaction_edges=C.edges())
| 35.544554 | 77 | 0.614763 | 538 | 3,590 | 3.908922 | 0.200743 | 0.104612 | 0.120304 | 0.135996 | 0.668093 | 0.633381 | 0.594864 | 0.555873 | 0.526391 | 0.475036 | 0 | 0.049498 | 0.251532 | 3,590 | 100 | 78 | 35.9 | 0.73316 | 0 | 0 | 0.358025 | 0 | 0 | 0.069081 | 0 | 0 | 0 | 0 | 0 | 0.061728 | 1 | 0.135802 | false | 0 | 0.098765 | 0 | 0.246914 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d4eb77e993ae5d7ecbe885fa52cf0468a55034f | 6,634 | py | Python | priorgen/accuracy_utils.py | joshjchayes/PriorGen | 228be0b06dca29ad2ad33ae216f494eaead6161f | [
"MIT"
] | 1 | 2021-12-09T10:29:20.000Z | 2021-12-09T10:29:20.000Z | priorgen/accuracy_utils.py | joshjchayes/PriorGen | 228be0b06dca29ad2ad33ae216f494eaead6161f | [
"MIT"
] | null | null | null | priorgen/accuracy_utils.py | joshjchayes/PriorGen | 228be0b06dca29ad2ad33ae216f494eaead6161f | [
"MIT"
] | null | null | null | '''
accuracy_utils
Module for checking accuracy of retrieval
'''
from scipy.optimize import fsolve
from scipy.spatial import distance
import numpy as np
from ._scaler import Scaler
class RetrievalMetricCalculator:
def __init__(self, parameter_limits):
'''
The RetrievalMetricCalculator generates metrics which can be used to
quantify quality of retrieval. The two main metrics are the accuracy
metric, which is a dimensionless distance between two points
(assumed to be true values and retrieved values), and the precision
metric M2, which is defined as the number of standard deviations
away from the true value a retrieved value is. For more information
see Hayes et. al. (2019).
Parameters
----------
parameter_limits : array_like, shape (n_variables, 2)
The physical values of the limits on each parameter, provided in
(lower, upper) pairs.
'''
# set up the scaler
self.scaler = Scaler(parameter_limits)
self.n_variables = len(parameter_limits)
def calculate_accuracy_metric(self, true_parameters, retrieved_parameters):
'''
Calculates the accuracy metric, defined as the Euclidean distance
between two points in unit-normalised physical parameter space.
Parameters
----------
true_parameters : array_like, shape (n_parameters, )
The accepted 'true' values of the parameters, provided in physical
space (i.e. with units)
retrieved_parameters : array_like, shape (n_parameters, )
The retrieved values of the parameters, provided in physical space
(i.e. with units)
Returns
-------
accuracy_metric : float
The Euclidean distance between the two given points
'''
dimensionless_true = self.scaler.point_to_dimensionless(true_parameters)
dimensionless_retrieved = self.scaler.point_to_dimensionless(retrieved_parameters)
return distance.euclidean(dimensionless_true, dimensionless_retrieved)
def calculate_precision_metric(self, true_parameters, retrieved_parameters,
uncertainty):
'''
Calculates the precision metric, which is defined as the accuracy
metric scaled by the 1 sigma error in the direction of the vector
between the true and retrieved parameters
Parameters
----------
true_parameters : array_like, shape (n_parameters, )
The accepted 'true' values of the parameters, provided in physical
space (i.e. with units)
retrieved_parameters : array_like, shape (n_parameters, )
The retrieved values of the parameters, provided in physical space
(i.e. with units)
uncertainty : array_like, shape (n_parameters, ) or (n_parameters, 2)
The uncertainy associated with each retrieved parameter value.
If 1D array is provided, assumes uniform upper and lower errors.
If 2D array provided, assumes errors are provided as(lower, upper)
pairs.
Returns
-------
precision_metric : float
The precision metric associated with the retrieval results
sigma : float
The 1 sigma value in the direction of the vector between the true
and retrieved parameters.
'''
# Scale the points and errors
dimensionless_true = self.scaler.point_to_dimensionless(true_parameters)
dimensionless_retrieved = self.scaler.point_to_dimensionless(retrieved_parameters)
dimensionless_errors = self.scaler.errors_to_dimensionless(uncertainty)
# which values of error to use based on direction of the true value
# compared to the retrieved one. Note that we default to the upper
# error in the event that the retrieval is exact.
delta = dimensionless_true - dimensionless_retrieved
mask = np.vstack((delta < 0, delta >= 0)).T
# get the principal semi-axes which define the error ellipse
semiaxes = dimensionless_errors[mask]
# Find the intercept between the error ellipse and the line joining
# the true and retrieved position
intercept = _find_intercept(dimensionless_true, dimensionless_retrieved, semiaxes)
# The 1 sigma distance is the distance between this intercept and the
# retrieved parameter values (note dropping the scale factor from
# intercept)
sigma = distance.euclidean(dimensionless_retrieved, intercept[:-1])
# Calculate the precision metric
# Distance between points
precision_metric = distance.euclidean(dimensionless_true, dimensionless_retrieved)/sigma
return precision_metric, sigma
def calculate_metrics(self, true_parameters, retrieved_parameters,
uncertainty):
'''
Calculates the accuracy and precision metrics
'''
accuracy = self.calculate_accuracy_metric(true_parameters, retrieved_parameters)
precision, sigma = self.calculate_precision_metric(true_parameters, retrieved_parameters, uncertainty)
return accuracy, precision, sigma
def _intercept_eqn(p, true_pos, retr_pos, errors):
'''
Function to pass to fsolve to find the intercept between the
line between the retrieved and true position and the one sigma
error ellipsoid around the retrieved position
'''
A = p[-1] # Get the scalar
p = np.array(p[:-1]) # Get the variable coordinates
true_pos = np.asarray(true_pos)
retr_pos = np.asarray(retr_pos)
errors = np.asarray(errors)
diff = retr_pos - true_pos
line_results = p - true_pos - A*diff
ellipsoid_result = sum((p - retr_pos)**2 / errors**2) - 1
return tuple(line_results) + (ellipsoid_result, )
def _find_intercept(true_position, retrieved_position, errors):
'''
Finds the intercept between the line joining the true position
and the retrieved position in parameter space and the error ellipsoid
surrounding the retrieved position
Parameters
----------
true_position : array_like, shape (n_variables, )
The set of accepted 'true' values for the variables
retrieved_position : array_like, shape (n_variables, )
The set of values for the variables found through retrieval
'''
start_pos = np.array(tuple(true_position) + (0.3,))
return fsolve(_intercept_eqn, start_pos, args=(true_position, retrieved_position, errors))
| 39.023529 | 110 | 0.679379 | 789 | 6,634 | 5.56654 | 0.215463 | 0.047587 | 0.025501 | 0.027322 | 0.357924 | 0.275273 | 0.238616 | 0.238616 | 0.210838 | 0.192623 | 0 | 0.004474 | 0.258818 | 6,634 | 169 | 111 | 39.254438 | 0.888753 | 0.523364 | 0 | 0.142857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.095238 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d518bb241852d7660d529d7b5b0b7f7be5c652f | 1,708 | py | Python | djangocms_responsive_image/forms.py | febsn/djangocms-responsive-image | e7ab02a8ab4119d1a60aa21e50380072b53755ff | [
"MIT"
] | 2 | 2016-05-03T15:53:56.000Z | 2017-05-19T12:05:40.000Z | djangocms_responsive_image/forms.py | febsn/djangocms-responsive-image | e7ab02a8ab4119d1a60aa21e50380072b53755ff | [
"MIT"
] | null | null | null | djangocms_responsive_image/forms.py | febsn/djangocms-responsive-image | e7ab02a8ab4119d1a60aa21e50380072b53755ff | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from .models import ImagePlugin
from .conf import settings
class GlossaryFormBase(forms.ModelForm):
glossary_fields = tuple()
def __init__(self, data=None, files=None, **kwargs):
# set values for glossary form fields
# values from initial overrule those from instance
instance = kwargs.pop('instance', None)
initial = kwargs.pop('initial', {})
if instance and data is None:
for field in self.glossary_fields:
try:
initial[field] = initial.get(field, None) or instance.glossary[field]
except KeyError:
pass
return super(GlossaryFormBase, self).__init__(data=data, files=files,
instance=instance, initial=initial, **kwargs)
def save(self, commit=True):
if not commit:
for field in self.glossary_fields:
self.instance.glossary[field] = self.cleaned_data[field]
return super(GlossaryFormBase, self).save(commit=commit)
class ResponsiveImageForm(GlossaryFormBase):
STYLE_CHOICES = [
(key, value['name'])
for key, value
in settings.DJANGOCMS_RESPONSIVE_IMAGE_IMAGE_STYLE_CHOICES.items()
]
style = forms.ChoiceField(
choices=STYLE_CHOICES)
caption = forms.CharField(max_length=512, required=False)
alt = forms.CharField(max_length=512, required=False)
description = forms.CharField(widget=forms.Textarea, required=False)
glossary_fields = ('style', 'caption', 'alt', 'description',)
class Meta:
model = ImagePlugin
fields = ('image', )
| 34.16 | 89 | 0.649297 | 188 | 1,708 | 5.755319 | 0.414894 | 0.051756 | 0.018484 | 0.025878 | 0.123845 | 0.123845 | 0.072089 | 0 | 0 | 0 | 0 | 0.005482 | 0.252342 | 1,708 | 49 | 90 | 34.857143 | 0.841817 | 0.062061 | 0 | 0.054054 | 0 | 0 | 0.031309 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054054 | false | 0.027027 | 0.108108 | 0 | 0.486486 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d52b010fc72c96e98bed9f723780c67116e3f6a | 2,741 | py | Python | db/db.py | kovalev-vxx/A-Q_teleBot | 2184237cc7811aa44caa31281d932a78f682aa9e | [
"MIT"
] | null | null | null | db/db.py | kovalev-vxx/A-Q_teleBot | 2184237cc7811aa44caa31281d932a78f682aa9e | [
"MIT"
] | null | null | null | db/db.py | kovalev-vxx/A-Q_teleBot | 2184237cc7811aa44caa31281d932a78f682aa9e | [
"MIT"
] | null | null | null | from sqlalchemy import Column, Integer, String, Boolean, create_engine
from sqlalchemy.engine.base import Engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.session import Session
from config import config
Base = declarative_base()
engine = create_engine('sqlite:///db/users.db', connect_args={"check_same_thread": False})
local_session = sessionmaker(autocommit=False, autoflush=False)
def get_session(engine: Engine) -> Session:
local_session.configure(bind=engine)
return local_session()
class Users(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
user_id = Column(Integer)
is_bot = Column(Boolean)
first_name = Column(String)
last_name = Column(String)
username = Column(String)
language_code = Column(String)
is_admin = Column(Boolean)
class Bans(Base):
__tablename__ = "bans"
id = Column(Integer, primary_key=True)
user_id = Column(Integer)
first_name = Column(String)
last_name = Column(String)
username = Column(String)
Base.metadata.create_all(bind=engine)
def duplicate_check(session, user_id, db):
if session.query(db).filter(db.user_id == user_id).all():
return False
return True
def upload_to_users(session: Session, user):
is_admin = True if user["id"] in config["ADMINS"] else False
print(user)
if duplicate_check(session=session, user_id=user["id"], db=Users):
session.add(Users(user_id=user["id"],
is_bot=True if user["is_bot"] == "true" else False,
first_name=user["first_name"],
last_name=user["last_name"],
username=user["username"],
language_code=user["language_code"],
is_admin=is_admin
))
session.commit()
session.close()
return True
else:
session.commit()
session.close()
return False
def upload_to_bans(session: Session, user):
if duplicate_check(session=session, user_id=user.user_id, db=Bans):
session.add(Bans(user_id=user.user_id,
first_name=user.first_name,
last_name=user.last_name,
username=user.username,
))
session.commit()
session.close()
return True
else:
session.commit()
session.close()
return False
def remove_from_db(session: Session, db, user_id):
for item in session.query(db).filter(db.user_id == user_id).all():
session.delete(item)
session.commit()
session.close() | 32.630952 | 90 | 0.630427 | 331 | 2,741 | 5.018127 | 0.208459 | 0.061409 | 0.036123 | 0.075256 | 0.410596 | 0.397351 | 0.397351 | 0.397351 | 0.397351 | 0.344371 | 0 | 0 | 0.264867 | 2,741 | 84 | 91 | 32.630952 | 0.824318 | 0 | 0 | 0.416667 | 0 | 0 | 0.039752 | 0.007659 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069444 | false | 0 | 0.083333 | 0 | 0.486111 | 0.013889 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d52dba3286ff40b1b601b6c256425c0907c82ca | 5,760 | py | Python | github_poster/loader/multiple_loader.py | Likenttt/GitHubPoster | cd1226de8ebcc0e7b1a9b9a9082ff4e8807d6710 | [
"MIT"
] | null | null | null | github_poster/loader/multiple_loader.py | Likenttt/GitHubPoster | cd1226de8ebcc0e7b1a9b9a9082ff4e8807d6710 | [
"MIT"
] | null | null | null | github_poster/loader/multiple_loader.py | Likenttt/GitHubPoster | cd1226de8ebcc0e7b1a9b9a9082ff4e8807d6710 | [
"MIT"
] | 1 | 2021-10-13T06:31:36.000Z | 2021-10-13T06:31:36.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
from github_poster.loader.base_loader import BaseLoader
class MutipleLoader(BaseLoader):
def __init__(self, from_year, to_year, _type, **kwargs):
super().__init__(from_year, to_year, _type)
self.types = kwargs.get("types", "")
self.type_summary_dict = {}
self.loader_list = []
def set_loader_list(self, loader):
self.loader_list.append(loader)
@classmethod
def add_loader_arguments(cls, parser):
parser.add_argument(
"--types",
dest="types",
type=str,
required=True,
help="All types you want to ",
)
parser.add_argument(
"--twitter_user_name",
dest="twitter_user_name",
type=str,
help="twitter_user_name",
)
parser.add_argument(
"--github_user_name",
dest="github_user_name",
type=str,
help="github_user_name",
)
parser.add_argument(
"--dota2_id",
dest="dota2_id",
type=str,
help="dota2 id",
)
parser.add_argument(
"--leetcode_cookie",
dest="leetcode_cookie",
type=str,
help="",
)
parser.add_argument(
"--cn",
dest="cn",
action="store_true",
help="if accout is CN",
)
parser.add_argument(
"--ns_device_id",
dest="ns_device_id",
type=str,
help="",
)
parser.add_argument(
"--ns_session_token",
dest="ns_session_token",
type=str,
help="",
)
parser.add_argument(
"--strava_client_id",
dest="strava_client_id",
type=str,
help="",
)
parser.add_argument(
"--strava_client_secret",
dest="strava_client_secret",
type=str,
help="",
)
parser.add_argument(
"--strava_refresh_token",
dest="strava_refresh_token",
type=str,
help="",
)
parser.add_argument(
"--wakatime_key",
dest="wakatime_key",
type=str,
help="your wakatime api key here, "
"more info: https://wakatime.com/settings/api-key",
)
parser.add_argument(
"--gpx_dir",
dest="gpx_dir",
metavar="DIR",
type=str,
default="GPX_FOLDER",
help="Directory containing GPX files",
)
# for gitlab
parser.add_argument(
"--gitlab_user_name",
dest="gitlab_user_name",
type=str,
help="",
)
parser.add_argument(
"--base_url",
dest="base_url",
type=str,
default="https://gitlab.com",
help="specify the base url of your self-managed gitlab",
)
parser.add_argument(
"--session",
dest="session",
type=str,
default="",
help="use gitlab_session from Cookies "
"if your gitlab instance needs to sign in",
)
# github issue
parser.add_argument(
"--issue_number",
dest="issue_number",
type=str,
help="The issue number",
)
parser.add_argument(
"--repo_name",
dest="repo_name",
type=str,
help="The repo name",
)
parser.add_argument(
"--github_token",
dest="github_token",
type=str,
default="",
help="The GitHub token, required by private repo",
)
# duolingo
parser.add_argument(
"--duolingo_user_name",
dest="duolingo_user_name",
type=str,
help="",
)
# cichang
parser.add_argument(
"--cichang_user_name",
dest="cichang_user_name",
type=str,
help="The username of CiChang",
)
parser.add_argument(
"--cichang_password",
dest="cichang_password",
type=str,
help="The password of CiChang",
)
# bilibili
parser.add_argument(
"--bilibili_cookie",
dest="bilibili_cookie",
type=str,
help="The cookie for the bilibili website(XHR)",
)
# nrc
parser.add_argument(
"--nike_refresh_token",
dest="nike_refresh_token",
type=str,
help="",
)
# garmin
parser.add_argument(
"--garmin_user_name",
dest="garmin_user_name",
type=str,
help="The username of Garmin",
)
parser.add_argument(
"--garmin_password",
dest="garmin_password",
type=str,
help="The password of Garmin",
)
def get_api_data(self):
pass
def make_track_dict(self):
pass
def get_all_track_data(self):
"""
date_summary_dict:
-> {date: {github:1, twitter:2}, date2: {github: 2}}
"""
date_summary_dict = defaultdict(dict)
for loader in self.loader_list:
data, _ = loader.get_all_track_data()
for date, value in data.items():
date_summary_dict[date][loader._type] = value
return date_summary_dict, self.year_list
| 27.042254 | 68 | 0.483333 | 553 | 5,760 | 4.764919 | 0.22604 | 0.088805 | 0.167742 | 0.039848 | 0.273245 | 0.139658 | 0.118406 | 0.054649 | 0 | 0 | 0 | 0.002336 | 0.405556 | 5,760 | 212 | 69 | 27.169811 | 0.767231 | 0.030382 | 0 | 0.342246 | 0 | 0 | 0.233183 | 0.007935 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032086 | false | 0.042781 | 0.010695 | 0 | 0.053476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d533ac704007e352566535daa013459de87cb75 | 2,284 | py | Python | P2_image_captioning/model.py | arroqc/udacity | d531f99ac6379954ade3cb1c3b4ded1533ef6512 | [
"MIT"
] | null | null | null | P2_image_captioning/model.py | arroqc/udacity | d531f99ac6379954ade3cb1c3b4ded1533ef6512 | [
"MIT"
] | null | null | null | P2_image_captioning/model.py | arroqc/udacity | d531f99ac6379954ade3cb1c3b4ded1533ef6512 | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import torchvision.models as models
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers):
super().__init__()
self.lstm = nn.LSTM(input_size=embed_size,
hidden_size=hidden_size,
num_layers=num_layers,
batch_first=True)
self.embed = nn.Embedding(num_embeddings=vocab_size,
embedding_dim=embed_size)
self.lin_out = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
captions_embeded = self.embed(captions[:, :-1])
x = torch.cat([features.unsqueeze(1), captions_embeded], dim=1)
outputs, _ = self.lstm(x) # N, L, Hid
logits = self.lin_out(outputs) # N, L, Voc
return logits
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
# Assumes inputs is shape N, L, C
predicts = []
for i in range(max_len):
if i == 0:
outputs, (h, c) = self.lstm(inputs) # Keep the states, use 0 default at first step
else:
outputs, (h, c) = self.lstm(inputs, (h, c)) # Pass the states to the LSTM
# Predict
logits = self.lin_out(outputs)
new_word = logits.argmax(2).long()
predicts.append(new_word.item())
# Update new inputs
inputs = self.embed(new_word)
return predicts | 39.37931 | 125 | 0.580998 | 276 | 2,284 | 4.626812 | 0.398551 | 0.049334 | 0.03289 | 0.023493 | 0.115897 | 0.079875 | 0.043853 | 0 | 0 | 0 | 0 | 0.008344 | 0.317863 | 2,284 | 58 | 126 | 39.37931 | 0.811297 | 0.116462 | 0 | 0.043478 | 0 | 0.021739 | 0.054067 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108696 | false | 0 | 0.065217 | 0 | 0.282609 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d55ec0c123c6896d300cdeb5803b622b9e097e9 | 3,070 | py | Python | Labs/LinearRegression/plots.py | jessicaleete/numerical_computing | cc71f51f35ca74d00e617af3d1a0223e19fb9a68 | [
"CC-BY-3.0"
] | 10 | 2016-10-18T19:54:25.000Z | 2021-10-09T20:12:38.000Z | Labs/LinearRegression/plots.py | jessicaleete/numerical_computing | cc71f51f35ca74d00e617af3d1a0223e19fb9a68 | [
"CC-BY-3.0"
] | null | null | null | Labs/LinearRegression/plots.py | jessicaleete/numerical_computing | cc71f51f35ca74d00e617af3d1a0223e19fb9a68 | [
"CC-BY-3.0"
] | 2 | 2017-05-14T16:07:59.000Z | 2020-06-20T09:05:06.000Z | import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model
housing = np.load('housingprices.npy')
challenger = np.load('challenger.npy')
def raw():
plt.plot(housing[:,1], housing[:,0], 'o')
plt.savefig("california.pdf")
plt.clf()
def linear():
X=np.ones((42,2))
X[:,1]=housing[:,1]
Y = housing[:,0]
betahat = np.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y))
plt.plot(X[:,1],Y,'o')
xseq=np.arange(0,12,.1)
plt.plot(xseq,betahat[0]+betahat[1]*xseq)
plt.savefig("cali-linear.pdf")
plt.clf()
def cubic():
X=np.ones((42,4))
X[:,1]=housing[:,1]
X[:,2]=X[:,1]**2
X[:,3]=X[:,1]**3
Y = housing[:,0]
betahat = np.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y))
plt.plot(X[:,1],Y,'o')
xseq=np.arange(0,12,.1)
plt.plot(xseq,betahat[0]+betahat[1]*xseq+betahat[2]*xseq**2+betahat[3]*xseq**3)
plt.savefig("cali-quadratic.pdf")
plt.clf()
def quartic():
X=np.ones((42,5))
X[:,1]=housing[:,1]
X[:,2]=X[:,1]**2
X[:,3]=X[:,1]**3
X[:,4]=X[:,1]**4
Y=housing[:,0]
betahat = np.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y))
plt.plot(X[:,1],Y,'o')
xseq=np.arange(0,12,.1)
plt.plot(xseq,betahat[0]+betahat[1]*xseq
+betahat[2]*xseq**2+betahat[3]*xseq**3+betahat[4]*xseq**4)
plt.ylim([0,600000])
plt.savefig("cali-quartic.pdf")
def challenger_cubic():
plt.plot(challenger[:,0], challenger[:,1], 'o')
plt.xlim(30,100)
plt.xlabel('Ambient Temperature (F)')
plt.ylim(-0.5,1.5)
plt.ylabel('O-ring Damage Present')
plt.title('Potential for Shuttle Damage - With Cubic Approximation')
X=np.ones((challenger.shape[0],4))
X[:,1] = challenger[:,0]
Y=challenger[:,1]
X[:,2]=X[:,1]**2
X[:,3]=X[:,1]**3
betahat = np.linalg.inv(X.T.dot(X)).dot(X.T.dot(Y))
xseq=np.arange(30,100,.5)
plt.plot(xseq,betahat[0]+betahat[1]*xseq+betahat[2]*xseq**2+betahat[3]*xseq**3)
plt.savefig('cubicthrulogitpoints.pdf')
plt.clf()
def challenger_logistic():
###Logreg plot #2
plt.plot(challenger[:,0], challenger[:,1],'o')
plt.xlim(30,100)
plt.xlabel('Ambient Temperature (F)')
plt.ylim(-0.5,1.5)
plt.ylabel('O-ring Damage Present')
plt.title('Potential for Shuttle Damage - With Logistic Regression Prediction')
#X=np.ones((dat.shape[0],2))
#X[:,1]=dat[:,0]
X=challenger[:,0].reshape((23,1))
Y=challenger[:,1]
logreg = linear_model.LogisticRegression(C=1000000,penalty="l2")
logreg.fit(X,Y)
coef=logreg.coef_[0]
xseq=np.arange(30,100,.5)[:,np.newaxis]
#xseqmat=np.ones((len(xseq),2))
#xseqmat[:,1]=xseq
xB=logreg.intercept_[0]+logreg.coef_[0][0]*xseq
#plt.plot(xseq,1/(np.exp(-xB)+1))
plt.plot(xseq,logreg.predict_proba(xseq)[:,1])
plt.savefig("logreg.pdf")
plt.clf()
if __name__ == "__main__":
raw()
linear()
cubic()
quartic()
challenger_cubic()
challenger_logistic()
| 27.909091 | 83 | 0.594137 | 503 | 3,070 | 3.584493 | 0.192843 | 0.016639 | 0.022185 | 0.026622 | 0.465336 | 0.465336 | 0.445369 | 0.445369 | 0.445369 | 0.445369 | 0 | 0.059379 | 0.171661 | 3,070 | 110 | 84 | 27.909091 | 0.649626 | 0.043648 | 0 | 0.465909 | 0 | 0 | 0.126708 | 0.008197 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0 | 0.045455 | 0 | 0.113636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d5c64559255b57af22709eadd89e57c13a09bc1 | 3,210 | py | Python | funcs.py | iw4p/NimiPA | 3833b38416c6603955b972a8d9524330f1a739b0 | [
"MIT"
] | null | null | null | funcs.py | iw4p/NimiPA | 3833b38416c6603955b972a8d9524330f1a739b0 | [
"MIT"
] | null | null | null | funcs.py | iw4p/NimiPA | 3833b38416c6603955b972a8d9524330f1a739b0 | [
"MIT"
] | null | null | null | import zipfile
import os
import shutil
import plistlib
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
RootDir1 = ROOT_DIR + '/Extract'
# RootIcon = ROOT_DIR + '/Data/PNGs/AppIcon60x60@3x.png'
appName = ""
bundleID = ""
version = ""
buildNumber = ""
# ROOT_DIR = ""
# RootDir1 = ROOT_DIR + '/Extract'
# def main():
# ipaName = raw_input("Enter the ipa name without .ipa extension: ")
# with zipfile.ZipFile( ROOT_DIR + '/' + ipaName + '.ipa', 'r') as zip_ref:
# zip_ref.extractall( ROOT_DIR + '/Extract')
# makeFolder('/Data/PNGs')
# makeFolder('/Data/Plists')
def getIPA(ipaFile, path):
# with zipfile.ZipFile( ROOT_DIR + '/' + ipaName + '.ipa', 'r') as zip_ref:
# zip_ref.extractall( ROOT_DIR + '/Extract')
with zipfile.ZipFile(ipaFile, 'r') as zip_ref:
zip_ref.extractall( path + '/Extract')
makeFolder('/Data/PNGs')
makeFolder('/Data/Plists')
# shutil.rmtree(RootDir1 + '/Data')
def makeFolder(directoryName):
if not os.path.exists(ROOT_DIR + directoryName):
os.makedirs(ROOT_DIR + directoryName)
def execFileFinder(name):
pl = plistlib.readPlist(ROOT_DIR + "/Data/Plists/Info.plist")
item = pl[name]
return item
def findFiles(root, targetPath, extension):
for root, dirs, files in os.walk((os.path.normpath(RootDir1)), topdown=False):
for name in files:
if name.endswith(extension):
SourceFolder = os.path.join(root,name)
shutil.copy2(SourceFolder, targetPath)
def showData():
findFiles(RootDir1, ROOT_DIR + '/Data/Plists/', extension = '.plist')
findFiles(RootDir1, ROOT_DIR + '/Data/PNGs/', extension = '.png')
findFiles(RootDir1, ROOT_DIR + '/Data', extension = execFileFinder("CFBundleName"))
print("App Name (CFBundleName): " + execFileFinder("CFBundleName"))
print("Bundle ID (CFBundleIdentifier): " + execFileFinder("CFBundleIdentifier"))
print("Version (CFBundleShortVersionString): " + execFileFinder("CFBundleShortVersionString"))
print("Build Number (CFBundleVersion): " + execFileFinder("CFBundleVersion"))
# appName = execFileFinder("CFBundleName")
# bundleID = execFileFinder("CFBundleIdentifier")
# version = execFileFinder("CFBundleShortVersionString")
# buildNumber = execFileFinder("CFBundleVersion")
# print("Build Number (aefsdg): " + appName)
appName = execFileFinder("CFBundleName")
bundleID = execFileFinder("CFBundleIdentifier")
version = execFileFinder("CFBundleShortVersionString")
buildNumber = execFileFinder("CFBundleVersion")
shutil.rmtree(RootDir1)
def CFBundleName():
appName = execFileFinder("CFBundleName")
return appName
def CFBundleIdentifier():
appName = execFileFinder("CFBundleIdentifier")
return appName
def CFBundleShortVersionString():
appName = execFileFinder("CFBundleShortVersionString")
return appName
def CFBundleVersion():
appName = execFileFinder("CFBundleVersion")
return appName
# appName = execFileFinder("CFBundleName")
# bundleID = execFileFinder("CFBundleIdentifier")
# version = execFileFinder("CFBundleShortVersionString")
# buildNumber = execFileFinder("CFBundleVersion") | 30.865385 | 98 | 0.693769 | 308 | 3,210 | 7.146104 | 0.269481 | 0.047706 | 0.034075 | 0.012267 | 0.378919 | 0.340754 | 0.340754 | 0.291686 | 0.291686 | 0.291686 | 0 | 0.005273 | 0.172897 | 3,210 | 104 | 99 | 30.865385 | 0.823729 | 0.295016 | 0 | 0.113208 | 0 | 0 | 0.202052 | 0.067351 | 0 | 0 | 0 | 0 | 0 | 1 | 0.169811 | false | 0 | 0.075472 | 0 | 0.339623 | 0.075472 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d5cac5d4783cdfd5fbf54c092cbac91dd82e976 | 2,565 | py | Python | task_mail.py | Noel-git/OSS_final | 1199c5c7ce054da01c9dfb33c2035540a7693550 | [
"BSD-3-Clause"
] | null | null | null | task_mail.py | Noel-git/OSS_final | 1199c5c7ce054da01c9dfb33c2035540a7693550 | [
"BSD-3-Clause"
] | null | null | null | task_mail.py | Noel-git/OSS_final | 1199c5c7ce054da01c9dfb33c2035540a7693550 | [
"BSD-3-Clause"
] | 1 | 2022-02-13T00:46:27.000Z | 2022-02-13T00:46:27.000Z | import urllib.request
import re
import numpy as np
import os, sys, time
import datetime
from bs4 import BeautifulSoup
import smtplib
from email.mime.text import MIMEText
from time import sleep
import random
from urllib.request import HTTPError
from urllib.request import URLError
def mailfunc(year,semester,code,c_num,mail):
HTTPError_num = 0
URLError_num = 0
current_people = 0
total_people = 0
url = "https://hisnet.handong.edu/for_student/sugang/PLES230M.php?hak_year="+year+"&hak_term="+semester+"&hakbu=%C0%FC%C3%BC&isugbn=%C0%FC%C3%BC&injung=%C0%FC%C3%BC&eng=%C0%FC%C3%BC&prof_name=&gwamok=&gwamok_code="+code+"&ksearch=search"
executable = sys.executable
args = sys.argv[:]
args.insert(0, sys.executable)
req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})
while(True):
rand = random.randrange(2,8)
sleep(rand)
try:
global res
res = urllib.request.urlopen(req).read()
except HTTPError:
HTTPError_num = HTTPError_num + 1
pass
except URLError:
URLError_num = URLError_num + 1
pass
soup = BeautifulSoup(res,'html.parser')
keywords = str(soup.find_all('td', {'align': 'center'}))
keywords = re.sub('<.+?>', '', keywords, 0).strip()
keywords = keywords.replace(" ","")
code2 = code+',(.*?)%,'
keywords = re.findall(code2,keywords)
subject = []
for i in keywords:
part = i.split(',')
subject.append(part)
row_col = list(np.shape(subject))
for i in range(0,len(keywords)):
if c_num == subject[i][0]:
subject_name = subject[i][1]
current_people = subject[i][row_col[1]-2]
total_people = subject[i][row_col[1]-3]
print(current_people + total_people)
if int(current_people) < int(total_people):
s = smtplib.SMTP('smtp.gmail.com', 587)
s.ehlo()
s.starttls()
s.login('gmail ID', 'App PW')
msg = MIMEText(str(subject_name)+'과목 수강신청 가능합니다')
msg['Subject'] = (str(subject_name) +'과목 수강신청 가능합니다')
msg['From'] = 'sugang@gmail.com'
msg['To'] = mail
s.sendmail('sugang@gmail.com', mail , msg.as_string())
s.quit()
return "%s 과목 수강신청가능합니다." %subject_name
| 27.580645 | 242 | 0.557115 | 313 | 2,565 | 4.469649 | 0.434505 | 0.046462 | 0.017155 | 0.022873 | 0.07005 | 0.07005 | 0.040029 | 0 | 0 | 0 | 0 | 0.02035 | 0.310331 | 2,565 | 92 | 243 | 27.880435 | 0.770492 | 0 | 0 | 0.031746 | 0 | 0.015873 | 0.152597 | 0.043831 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015873 | false | 0.031746 | 0.190476 | 0 | 0.222222 | 0.015873 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d5ed91a6c42e6d6c4605027a657c48bce3dbbe1 | 10,954 | py | Python | ariadne/point_net/processor.py | t3hseus/ariadne | b4471a37741000e22281c4d6ff647d65ab9e1914 | [
"MIT"
] | 6 | 2020-08-28T22:44:07.000Z | 2022-01-24T20:53:00.000Z | ariadne/point_net/processor.py | t3hseus/ariadne | b4471a37741000e22281c4d6ff647d65ab9e1914 | [
"MIT"
] | 1 | 2021-02-20T09:38:46.000Z | 2021-02-20T09:38:46.000Z | ariadne/point_net/processor.py | t3hseus/ariadne | b4471a37741000e22281c4d6ff647d65ab9e1914 | [
"MIT"
] | 2 | 2021-10-04T09:25:06.000Z | 2022-02-09T09:09:09.000Z | import logging
import os
from typing import Tuple, Iterable, Optional, List
import gin
import pandas as pd
import numpy as np
from ariadne.point_net.point.points import Points, save_points_new
from ariadne.preprocessing import (
BaseTransformer,
DataProcessor,
DataChunk,
ProcessedDataChunk,
ProcessedData
)
from ariadne.transformations import Compose
LOGGER = logging.getLogger('ariadne.prepare')
@gin.configurable(denylist=['df_chunk_data'])
class PointsDataChunk(DataChunk):
def __init__(self, df_chunk_data: pd.DataFrame):
super().__init__(df_chunk_data)
class TransformedPointsDataChunk(ProcessedDataChunk):
def __init__(self,
processed_object: Optional[Points],
output_name: str):
super().__init__(processed_object)
self.processed_object = processed_object
self.output_name = output_name
class ProcessedPointsData(ProcessedData):
def __init__(self, processed_data: List[TransformedPointsDataChunk]):
super().__init__(processed_data)
self.processed_data = processed_data
@gin.configurable(denylist=['data_df'])
class PointNet_Processor(DataProcessor):
def __init__(self,
output_dir: str,
data_df: pd.DataFrame,
transforms: List[BaseTransformer] = None):
super().__init__(
processor_name='PointNet_Processor',
output_dir=output_dir,
data_df=data_df,
transforms=transforms)
def generate_chunks_iterable(self) -> Iterable[PointsDataChunk]:
return self.data_df.groupby('event')
def construct_chunk(self,
chunk_df: pd.DataFrame) -> PointsDataChunk:
processed = self.transformer(chunk_df)
return PointsDataChunk(processed)
def preprocess_chunk(self,
chunk: PointsDataChunk,
idx: str) -> ProcessedDataChunk:
chunk_df = chunk.df_chunk_data
chunk_id = int(chunk_df.event.values[0])
output_name = os.path.join(self.output_dir, f'points_{idx}_{chunk_id}')
out = (chunk_df[['r', 'phi', 'z']].values / [1., np.pi, 1.]).T
out = Points(
X=out.astype(np.float32),
track=(chunk_df[['track']] >= 0).values.squeeze(-1).astype(np.float32)
)
return TransformedPointsDataChunk(out, output_name)
def postprocess_chunks(self,
chunks: List[TransformedPointsDataChunk]) -> ProcessedPointsData:
return ProcessedPointsData(chunks)
def save_on_disk(self,
processed_data: ProcessedPointsData):
broken = 0
total = len(processed_data.processed_data)
for obj in processed_data.processed_data:
if obj.processed_object is None:
broken += 1
LOGGER.info(f'\n==Collected {broken} broken events out of {total} events.==\n')
save_points_new(processed_data.processed_data)
@gin.configurable(denylist=['data_df'])
class PointNet_ProcessorBMN7(PointNet_Processor):
def __init__(self,
output_dir: str,
data_df: pd.DataFrame,
stats_cols,
transforms: List[BaseTransformer] = None):
super().__init__(
output_dir=output_dir,
data_df=data_df,
transforms=transforms
)
self.stats_cols = stats_cols
self.maxis = {col: -1e7 for col in stats_cols}
self.minis = {col: 1e7 for col in stats_cols}
self.mean_hits = []
def preprocess_chunk(self,
chunk: PointsDataChunk,
idx: str) -> ProcessedDataChunk:
chunk_df = chunk.df_chunk_data
if chunk_df.empty:
return TransformedPointsDataChunk(None, '')
chunk_id = int(chunk_df.event.values[0])
output_name = os.path.join(self.output_dir, f'points_{idx}_{chunk_id}')
chunk_df = chunk_df[chunk_df.det == 1]
if not chunk_df[chunk_df.track < -1].empty:
LOGGER.info(f'\nPointNet_Processor got hit with id < -1!\n for event {chunk_id}')
return TransformedPointsDataChunk(None, '')
if chunk_df[chunk_df.track == -1].empty:
LOGGER.info(f'\nPointNet_Processor got event with no fakes!\n for event {chunk_id}')
return TransformedPointsDataChunk(None, '')
if chunk_df[chunk_df.track != -1].empty:
LOGGER.info(f'\nPointNet_Processor got event with no real hits!\n for event {chunk_id}')
return TransformedPointsDataChunk(None, '')
for col in self.stats_cols:
self.maxis[col] = max(chunk_df[col].max(), self.maxis[col])
self.minis[col] = min(chunk_df[col].min(), self.minis[col])
out = chunk_df[['x', 'y', 'z']].values.T
out = Points(
X=out.astype(np.float32),
track=(chunk_df[['track']] >= 0).values.squeeze(-1).astype(np.float32)
)
self.mean_hits.append(len(chunk_df))
return TransformedPointsDataChunk(out, output_name)
def save_on_disk(self,
processed_data: ProcessedPointsData):
LOGGER.info("\n\n=================\nSome stats:")
for col in self.stats_cols:
LOGGER.info(f'MAXIS {col}: {self.maxis[col]}')
LOGGER.info(f'MINIS {col}: {self.minis[col]}')
LOGGER.info(f'Mean hits per event: {np.mean(self.mean_hits)}')
LOGGER.info('End\n===============\n')
super(PointNet_ProcessorBMN7, self).save_on_disk(processed_data)
@gin.configurable(denylist=['data_df'])
class PointNet_ProcessorBMN7_dist(PointNet_ProcessorBMN7):
def __init__(self,
output_dir: str,
data_df: pd.DataFrame,
transforms: List[BaseTransformer] = None):
super().__init__(
output_dir=output_dir,
data_df=data_df,
transforms=transforms
)
raise NotImplementedError('implements cols for stats plz')
def preprocess_chunk(self,
chunk: PointsDataChunk,
idx: str) -> ProcessedDataChunk:
chunk_df = chunk.df_chunk_data
if chunk_df.empty:
return TransformedPointsDataChunk(None, '')
chunk_id = int(chunk_df.event.values[0])
output_name = os.path.join(self.output_dir, f'points_{idx}_{chunk_id}')
for col in ['x', 'y', 'z']:
self.maxis[col] = max(chunk_df[col].max(), self.maxis[col])
self.minis[col] = min(chunk_df[col].min(), self.minis[col])
chunk_df = chunk_df[chunk_df.det == 1]
if chunk_df.empty:
LOGGER.warning(f'SKIPPED broken {chunk_id} event')
return TransformedPointsDataChunk(None, output_name)
track_pnts = chunk_df[chunk_df.track >= 0]
xs = track_pnts[['x']].values
ys = track_pnts[['y']].values
zs = track_pnts[['z']].values
a = chunk_df.apply(lambda pnt: np.min(
np.linalg.norm(
np.hstack((
(xs - pnt.x, ys - pnt.y, zs - pnt.z)
)), axis=-1)), axis=1)
chunk_df['dist'] = a
out = chunk_df[['x', 'y', 'z']].values.T
out = Points(
X=out.astype(np.float32),
track=chunk_df[['dist']].values.squeeze(-1).astype(np.float32)
)
self.mean_hits.append(len(chunk_df))
return TransformedPointsDataChunk(out, output_name)
@gin.configurable(denylist=['data_df'])
class PointNet_ProcessorBMN7_impulse(PointNet_ProcessorBMN7):
def __init__(self,
output_dir: str,
data_df: pd.DataFrame,
stats_cols,
impulses,
transforms: List[BaseTransformer] = None):
super().__init__(
output_dir=output_dir,
data_df=data_df,
stats_cols=stats_cols,
transforms=transforms
)
self.impulse_minmax = impulses
def preprocess_chunk(self,
chunk: PointsDataChunk,
idx: str) -> ProcessedDataChunk:
chunk_df = chunk.df_chunk_data
if chunk_df.empty:
return TransformedPointsDataChunk(None, '')
chunk_id = int(chunk_df.event.values[0])
output_name = os.path.join(self.output_dir, f'points_{idx}_{chunk_id}')
chunk_df = chunk_df[chunk_df.det == 1]
if not chunk_df[chunk_df.track < -1].empty:
LOGGER.info(f'\nPointNet_Processor got hit with id < -1!\n for event {chunk_id}')
return TransformedPointsDataChunk(None, '')
if chunk_df[chunk_df.track == -1].empty:
LOGGER.info(f'\nPointNet_Processor got event with no fakes!\n for event {chunk_id}')
return TransformedPointsDataChunk(None, '')
if chunk_df[chunk_df.track != -1].empty:
LOGGER.info(f'\nPointNet_Processor got event with no real hits!\n for event {chunk_id}')
return TransformedPointsDataChunk(None, '')
for key, val in self.impulse_minmax.items():
vals = chunk_df[[key]].values
normed = vals / val[1]
chunk_df[key] = normed
true = chunk_df[chunk_df.track >= 0][key].values
false = chunk_df[chunk_df.track < 0][key].values
self.maxis[key + "_true"] = max(self.maxis[key + "_true"], true.max())
self.minis[key + "_true"] = min(self.minis[key + "_true"], true.min())
self.maxis[key + "_false"] = max(self.maxis[key + "_false"], false.max())
self.minis[key + "_false"] = min(self.minis[key + "_false"], false.min())
chunk_df['station'] = (chunk_df['station'].values / 5.0)
for col in self.stats_cols:
if '_' in col:
continue
self.maxis[col] = max(chunk_df[col].max(), self.maxis[col])
self.minis[col] = min(chunk_df[col].min(), self.minis[col])
# track_pnts = chunk_df[chunk_df.track >= 0]
# xs = track_pnts[['x']].values
# ys = track_pnts[['y']].values
# zs = track_pnts[['z']].values
# stats = track_pnts[['station']].values / 7.0
out = chunk_df[['x', 'y', 'z', 'station']].values.T
# if not chunk_df[(chunk_df.track != -1) & (chunk_df.track < 0) ].empty:
# LOGGER.info("\nPointNet_Processor got hit with id < -1!\n for event %d" % chunk_id)
# return TransformedPointsDataChunk(None, "")
tgt = chunk_df[['px', 'py', 'pz']].values
tgt_hit = (chunk_df['track'].values != -1).astype(np.float32).reshape(-1, 1)
tgt = np.hstack((tgt_hit, tgt))
out = Points(
X=out.astype(np.float32),
track=tgt
)
self.mean_hits.append(len(chunk_df))
return TransformedPointsDataChunk(out, output_name)
| 37.006757 | 100 | 0.595855 | 1,294 | 10,954 | 4.809892 | 0.127512 | 0.083226 | 0.048201 | 0.047237 | 0.623715 | 0.613914 | 0.594473 | 0.594473 | 0.542577 | 0.538239 | 0 | 0.008495 | 0.279989 | 10,954 | 295 | 101 | 37.132203 | 0.780652 | 0.034873 | 0 | 0.535088 | 0 | 0 | 0.092871 | 0.015715 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070175 | false | 0 | 0.039474 | 0.008772 | 0.214912 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d630971cba9f6e65624ec37bc4a69988cc209f0 | 2,972 | py | Python | setup.py | zydmayday/pamda | 6740d0294f3bedbeeef3bbc3042a43dceb3239b2 | [
"MIT"
] | 1 | 2022-03-14T07:35:13.000Z | 2022-03-14T07:35:13.000Z | setup.py | zydmayday/pamda | 6740d0294f3bedbeeef3bbc3042a43dceb3239b2 | [
"MIT"
] | 3 | 2022-03-24T02:30:18.000Z | 2022-03-31T07:46:04.000Z | setup.py | zydmayday/pamda | 6740d0294f3bedbeeef3bbc3042a43dceb3239b2 | [
"MIT"
] | null | null | null | from os.path import abspath, dirname, join
from setuptools import find_packages, setup
# Fetches the content from README.md
# This will be used for the "long_description" field.
with open(join(dirname(abspath(__file__)), "README.md"), encoding='utf-8') as f:
README_MD = f.read()
setup(
# The name of your project that we discussed earlier.
# This name will decide what users will type when they install your package.
# In my case it will be:
# pip install pydash-arnu515
# This field is REQUIRED
name="python_ramda",
# The version of your project.
# Usually, it would be in the form of:
# major.minor.patch
# eg: 1.0.0, 1.0.1, 3.0.2, 5.0-beta, etc.
# You CANNOT upload two versions of your package with the same version number
# This field is REQUIRED
version="0.2.2",
# The packages that constitute your project.
# For my project, I have only one - "pydash".
# Either you could write the name of the package, or
# alternatively use setuptools.findpackages()
#
# If you only have one file, instead of a package,
# you can instead use the py_modules field instead.
# EITHER py_modules OR packages should be present.
packages=find_packages(exclude="tests"),
# The description that will be shown on PyPI.
# Keep it short and concise
# This field is OPTIONAL
description="A small clone of ramda",
# The content that will be shown on your project page.
# In this case, we're displaying whatever is there in our README.md file
# This field is OPTIONAL
long_description=README_MD,
# Now, we'll tell PyPI what language our README file is in.
# In my case it is in Markdown, so I'll write "text/markdown"
# Some people use reStructuredText instead, so you should write "text/x-rst"
# If your README is just a text file, you have to write "text/plain"
# This field is OPTIONAL
long_description_content_type="text/markdown",
# The url field should contain a link to a git repository, the project's website
# or the project's documentation. I'll leave a link to this project's Github repository.
# This field is OPTIONAL
url="https://github.com/zydmayday/python_ramda",
# The author name and email fields are self explanatory.
# These fields are OPTIONAL
author_name="zydmayday",
author_email="zydmayday@gmail.com",
# Classifiers help categorize your project.
# For a complete list of classifiers, visit:
# https://pypi.org/classifiers
# This is OPTIONAL
classifiers=[
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3.9"
],
# Keywords are tags that identify your project and help searching for it
# This field is OPTIONAL
keywords="functional programming, ramda",
# For additional fields, check:
# https://github.com/pypa/sampleproject/blob/master/setup.py
)
| 37.15 | 92 | 0.693136 | 441 | 2,972 | 4.628118 | 0.414966 | 0.030867 | 0.037727 | 0.046546 | 0.049976 | 0.033317 | 0 | 0 | 0 | 0 | 0 | 0.008696 | 0.22611 | 2,972 | 79 | 93 | 37.620253 | 0.878696 | 0.654778 | 0 | 0 | 0 | 0 | 0.280612 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.095238 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d6899acaef28c3d3178f6885bc65bff3f550f05 | 26,957 | py | Python | sarcastic_comments.py | nagi1995/sarcastic-news-headlines-classification | 2fbc2c936cec946896161552fcffbe0cb71116c0 | [
"Apache-2.0"
] | null | null | null | sarcastic_comments.py | nagi1995/sarcastic-news-headlines-classification | 2fbc2c936cec946896161552fcffbe0cb71116c0 | [
"Apache-2.0"
] | null | null | null | sarcastic_comments.py | nagi1995/sarcastic-news-headlines-classification | 2fbc2c936cec946896161552fcffbe0cb71116c0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# <a href="https://colab.research.google.com/github/nagi1995/sarcastic-comment-detection/blob/main/Sarcastic_Comments.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# In[1]:
from google.colab import drive
drive.mount('/content/drive')
# In[2]:
get_ipython().system('ln -s /content/drive/MyDrive /mygdrive')
# In[3]:
get_ipython().system('ls /mygdrive')
# In[4]:
get_ipython().system('cp /mygdrive/Sarcasm_Headlines_Dataset_v2.json ./')
get_ipython().system('cp /mygdrive/Sarcasm_Headlines_Dataset.json ./')
# # Import Libraries
# In[38]:
import pandas as pd
import numpy as np
get_ipython().run_line_magic('matplotlib', 'inline')
from matplotlib import pyplot as plt
import seaborn as sns
import re
from collections import Counter
from wordcloud import WordCloud, STOPWORDS
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, confusion_matrix, auc, accuracy_score
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
import pickle
import cv2
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.callbacks import *
from tensorflow.keras import Model, Input, Sequential
from datetime import datetime
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import *
from tensorflow.keras.utils import plot_model
from google.colab.patches import cv2_imshow
from tqdm import tqdm
# In[1]:
from prettytable import PrettyTable
# In[6]:
tf.__version__, xgb.__version__, cv2.__version__, hub.__version__
# # Load data
# In[7]:
test = pd.read_json("Sarcasm_Headlines_Dataset.json", lines=True)
test.head()
# In[8]:
test.info()
# In[9]:
train = pd.read_json("Sarcasm_Headlines_Dataset_v2.json", lines=True)
train.head()
# In[10]:
train.info()
# In[11]:
plt.figure()
sns.countplot(data = train, x = "is_sarcastic")
plt.title("Class distribution")
plt.show()
# In[12]:
def length(phrase):
return len(phrase.split())
# In[13]:
train["length"] = train["headline"].apply(length)
train.head()
# In[14]:
plt.figure()
sns.displot(data = train, x = "length", kde = True)
plt.title("distribution of number of words in headlines")
plt.show()
# In[15]:
for i in [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]:
print("{0}th percentile is {1}".format(i, np.percentile(train["length"], i)))
print()
for i in [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100]:
print("{0}th percentile is {1}".format(i, np.percentile(train["length"], i)))
print()
for i in [99, 99.10, 99.20, 99.30, 99.40, 99.50, 99.60, 99.70, 99.80, 99.90]:
print("{0}th percentile is {1}".format(i, np.percentile(train["length"], i)))
print()
# In[16]:
# Reference: https://stackoverflow.com/a/47091490/6645883
def decontracted(phrase):
# specific
phrase = re.sub(r"won\'t", "will not", phrase)
phrase = re.sub(r"can\'t", "can not", phrase)
# general
phrase = re.sub(r"n\'t", " not", phrase)
phrase = re.sub(r"\'re", " are", phrase)
phrase = re.sub(r"\'s", " is", phrase)
phrase = re.sub(r"\'d", " would", phrase)
phrase = re.sub(r"\'ll", " will", phrase)
phrase = re.sub(r"\'t", " not", phrase)
phrase = re.sub(r"\'ve", " have", phrase)
phrase = re.sub(r"\'m", " am", phrase)
return phrase.lower()
# In[17]:
train["headline"] = train["headline"].apply(decontracted)
test["headline"] = test["headline"].apply(decontracted)
# In[18]:
# Reference: # https://www.geeksforgeeks.org/generating-word-cloud-python/
def wordcloud_plot(df):
comment_words = ""
stopwords = set(STOPWORDS)
# iterate through the csv file
for val in df.headline:
# typecaste each val to string
val = str(val)
# split the value
tokens = val.split()
# Converts each token into lowercase
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
comment_words += " ".join(tokens)+" "
wordcloud = WordCloud(width = 800, height = 800,
background_color = "white",
stopwords = stopwords,
min_font_size = 10).generate(comment_words)
# plot the WordCloud image
plt.figure(figsize = (8, 8), facecolor = None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad = 0)
plt.show()
# In[56]:
wordcloud_plot(train)
# In[57]:
wordcloud_plot(test)
# # BoW
# In[19]:
vectorizer = CountVectorizer(min_df = 10, max_df = 5000, ngram_range = (1, 3))
vectorizer.fit(train["headline"])
x_train = vectorizer.transform(train["headline"])
x_test = vectorizer.transform(test["headline"])
y_train = train["is_sarcastic"]
y_test = test["is_sarcastic"]
x_train.shape, x_test.shape
# ### Logistic Regression
# In[17]:
model = LogisticRegression(n_jobs = -1)
params = {"C" : [0.0001, .00033, .001, .0033, .01, .033, .1, .33, 1, 3.3, 10, 33, 100]}
gridsearch = GridSearchCV(model, params, cv = 5, scoring = "accuracy", return_train_score = True)
gridsearch.fit(x_train, y_train)
# In[18]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results = results.sort_values(['param_C'])
results.head()
# In[19]:
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
K = results['param_C']
plt.plot(K, train_auc, "bo-", label='Train accuracy')
plt.plot(K, cv_auc, "ro-", label='CV accuracy')
plt.xscale("log")
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("accuracy")
plt.title("Hyper parameter Vs accuracy plot")
plt.grid()
plt.show()
# In[73]:
model = LogisticRegression(C = 1, max_iter = 200)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# ### Naive Bayes
# In[83]:
model = MultinomialNB(class_prior = [.5, .5])
params = {"alpha" : [0.0001, .00033, .001, .0033, .01, .033, .1, .33, 1, 3.3, 10, 33, 100]}
gridsearch = GridSearchCV(model, params, cv = 5, scoring = "accuracy", return_train_score = True)
gridsearch.fit(x_train, y_train)
# In[84]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results = results.sort_values(['param_alpha'])
results.head()
# In[85]:
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
K = results['param_alpha']
plt.plot(K, train_auc, "bo-", label='Train accuracy')
plt.plot(K, cv_auc, "ro-", label='CV accuracy')
plt.xscale("log")
plt.legend()
plt.xlabel("alpha: hyperparameter")
plt.ylabel("accuracy")
plt.title("Hyper parameter Vs accuracy plot")
plt.grid()
plt.show()
# In[21]:
model = MultinomialNB(alpha = .033)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# ### Random Forest
# In[94]:
get_ipython().run_cell_magic('time', '', '\nmodel = RandomForestClassifier()\nparams = {"n_estimators" : [10, 50, 100, 150]}\n\ngridsearch = GridSearchCV(model, params, \n cv = 5, scoring = "accuracy", \n return_train_score = True, \n verbose = 1, n_jobs = -1)\ngridsearch.fit(x_train, y_train)')
# In[96]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results = results.sort_values(['param_n_estimators'])
results.head()
# In[98]:
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
K = results['param_n_estimators']
plt.plot(K, train_auc, "bo-", label='Train accuracy')
plt.plot(K, cv_auc, "ro-", label='CV accuracy')
plt.legend()
plt.xlabel("number of trees: hyperparameter")
plt.ylabel("accuracy")
plt.title("Hyper parameter Vs accuracy plot")
plt.grid()
plt.show()
# In[23]:
model = RandomForestClassifier(n_estimators = 50)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# ### GBDT
# In[131]:
model = xgb.XGBClassifier(verbosity = 1, use_label_encoder = False)
params = {"n_estimators" : [10, 50, 100, 150],
"max_depth" : [4, 8, 16, 32]}
gridsearch = GridSearchCV(model, params,
cv = 5, scoring = "accuracy",
return_train_score = True,
verbose = 1, n_jobs = -1)
gridsearch.fit(x_train, y_train)
# In[132]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results.head()
# In[133]:
hmap = results.pivot("param_max_depth", "param_n_estimators", "mean_train_score")
sns.heatmap(hmap, linewidth = 1, annot = True)
plt.ylabel("max_depth")
plt.xlabel("n_estimators")
plt.title("train accuracy in heatmap")
plt.show()
# In[134]:
hmap = results.pivot("param_max_depth", "param_n_estimators", "mean_test_score")
sns.heatmap(hmap, linewidth = 1, annot = True)
plt.ylabel("max_depth")
plt.xlabel("n_estimators")
plt.title("cv accuracy in heatmap")
plt.show()
# In[24]:
model = xgb.XGBClassifier(n_estimators = 150, max_depth = 32, verbosity = 1, use_label_encoder = False)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# # TFIDF
# In[25]:
vectorizer = TfidfVectorizer(min_df = 10, max_df = 5000, ngram_range = (1, 3))
vectorizer.fit(train["headline"])
x_train = vectorizer.transform(train["headline"])
x_test = vectorizer.transform(test["headline"])
y_train = train["is_sarcastic"]
y_test = test["is_sarcastic"]
x_train.shape, x_test.shape
# ### Logistic Regression
# In[109]:
model = LogisticRegression(n_jobs = -1)
params = {"C" : [0.0001, .00033, .001, .0033, .01, .033, .1, .33, 1, 3.3, 10, 33, 100]}
gridsearch = GridSearchCV(model, params, cv = 5, scoring = "accuracy", return_train_score = True)
gridsearch.fit(x_train, y_train)
# In[110]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results = results.sort_values(['param_C'])
results.head()
# In[111]:
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
K = results['param_C']
plt.plot(K, train_auc, "bo-", label='Train accuracy')
plt.plot(K, cv_auc, "ro-", label='CV accuracy')
plt.xscale("log")
plt.legend()
plt.xlabel("C: hyperparameter")
plt.ylabel("accuracy")
plt.title("Hyper parameter Vs accuracy plot")
plt.grid()
plt.show()
# In[26]:
model = LogisticRegression(C = 3.3, max_iter = 200)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# ### Naive Bayes
# In[115]:
model = MultinomialNB(class_prior = [.5, .5])
params = {"alpha" : [0.0001, .00033, .001, .0033, .01, .033, .1, .33, 1, 3.3, 10, 33, 100]}
gridsearch = GridSearchCV(model, params, cv = 5, scoring = "accuracy", return_train_score = True)
gridsearch.fit(x_train, y_train)
# In[116]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results = results.sort_values(['param_alpha'])
results.head()
# In[117]:
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
K = results['param_alpha']
plt.plot(K, train_auc, "bo-", label='Train accuracy')
plt.plot(K, cv_auc, "ro-", label='CV accuracy')
plt.xscale("log")
plt.legend()
plt.xlabel("alpha: hyperparameter")
plt.ylabel("accuracy")
plt.title("Hyper parameter Vs accuracy plot")
plt.grid()
plt.show()
# In[27]:
model = MultinomialNB(alpha = .01)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# ### Random Forest
# In[121]:
get_ipython().run_cell_magic('time', '', '\nmodel = RandomForestClassifier()\nparams = {"n_estimators" : [10, 50, 100, 150]}\n\ngridsearch = GridSearchCV(model, params, \n cv = 5, scoring = "accuracy", \n return_train_score = True, \n verbose = 1, n_jobs = -1)\ngridsearch.fit(x_train, y_train)')
# In[122]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results = results.sort_values(['param_n_estimators'])
results.head()
# In[123]:
train_auc= results['mean_train_score']
train_auc_std= results['std_train_score']
cv_auc = results['mean_test_score']
cv_auc_std= results['std_test_score']
K = results['param_n_estimators']
plt.plot(K, train_auc, "bo-", label='Train accuracy')
plt.plot(K, cv_auc, "ro-", label='CV accuracy')
plt.legend()
plt.xlabel("number of trees: hyperparameter")
plt.ylabel("accuracy")
plt.title("Hyper parameter Vs accuracy plot")
plt.grid()
plt.show()
# In[28]:
model = RandomForestClassifier(n_estimators = 50)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# ### GBDT
# In[125]:
model = xgb.XGBClassifier(verbosity = 1, use_label_encoder = False)
params = {"n_estimators" : [10, 50, 100, 150],
"max_depth" : [4, 8, 16, 32]}
gridsearch = GridSearchCV(model, params,
cv = 5, scoring = "accuracy",
return_train_score = True,
verbose = 1, n_jobs = -1)
gridsearch.fit(x_train, y_train)
# In[126]:
results = pd.DataFrame.from_dict(gridsearch.cv_results_)
results.head()
# In[127]:
hmap = results.pivot("param_max_depth", "param_n_estimators", "mean_train_score")
sns.heatmap(hmap, linewidth = 1, annot = True)
plt.ylabel("max_depth")
plt.xlabel("n_estimators")
plt.title("train accuracy in heatmap")
plt.show()
# In[128]:
hmap = results.pivot("param_max_depth", "param_n_estimators", "mean_test_score")
sns.heatmap(hmap, linewidth = 1, annot = True)
plt.ylabel("max_depth")
plt.xlabel("n_estimators")
plt.title("test accuracy in heatmap")
plt.show()
# In[29]:
model = xgb.XGBClassifier(n_estimators = 150, max_depth = 32, verbosity = 1, use_label_encoder = False)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# # Deep learning
# In[50]:
label_encoder = OneHotEncoder()
label_encoder.fit(np.array(train["is_sarcastic"]).reshape(-1, 1))
y_train_ohe = label_encoder.transform(np.array(train["is_sarcastic"]).reshape(-1, 1))
y_test_ohe = label_encoder.transform(np.array(test["is_sarcastic"]).reshape(-1, 1))
y_train_ohe.shape, y_test_ohe.shape
# In[51]:
with open("/mygdrive/glove_vectors", "rb") as fi:
glove_model = pickle.load(fi)
glove_words = set(glove_model.keys())
# In[52]:
t = Tokenizer()
t.fit_on_texts(train["headline"])
encoded_train = t.texts_to_sequences(train["headline"])
encoded_test = t.texts_to_sequences(test["headline"])
max_length = 25
padded_train = pad_sequences(encoded_train,
maxlen = max_length,
padding = "post",
truncating = "post")
padded_test = pad_sequences(encoded_test,
maxlen = max_length,
padding = "post",
truncating = "post")
print(padded_train.shape, padded_test.shape, type(padded_train))
vocab_size = len(t.word_index) + 1
vocab_size
# In[53]:
embedding_matrix = np.zeros((vocab_size, 300)) # vector len of each word is 300
for word, i in t.word_index.items():
if word in glove_words:
vec = glove_model[word]
embedding_matrix[i] = vec
embedding_matrix.shape
# ### callbacks
# In[26]:
get_ipython().run_line_magic('load_ext', 'tensorboard')
# In[54]:
def checkpoint_path():
return "./model/weights.{epoch:02d}-{val_accuracy:.4f}.hdf5"
def log_dir():
return "./logs/fit/" + datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
earlystop = EarlyStopping(monitor = "val_accuracy",
patience = 7,
verbose = 1,
restore_best_weights = True,
mode = 'max')
reduce_lr = ReduceLROnPlateau(monitor = "val_accuracy",
factor = .4642,
patience = 3,
verbose = 1,
min_delta = 0.001,
mode = 'max')
# ### model building
# In[55]:
tf.keras.backend.clear_session()
input = Input(shape = (max_length, ), name = "input")
embedding = Embedding(input_dim = vocab_size,
output_dim = 300, # glove vector size
weights = [embedding_matrix],
trainable = False)(input)
lstm = LSTM(32)(embedding)
flatten = Flatten()(lstm)
dense = Dense(16, activation = None,
kernel_initializer = "he_uniform")(flatten)
dropout = Dropout(.25)(dense)
activation = Activation("relu")(dropout)
output = Dense(2, activation = "softmax", name = "output")(activation)
model = Model(inputs = input, outputs = output)
model.compile(optimizer = "adam", loss = "sparse_categorical_crossentropy", metrics = ["accuracy"])
plot_model(model, to_file = "./model.png", show_shapes = True)
model.summary()
# In[56]:
cv2_imshow(cv2.imread("./model.png"))
# In[57]:
get_ipython().system('rm -rf ./logs/')
get_ipython().run_line_magic('tensorboard', '--logdir logs/fit')
# ### training model
# In[58]:
tensorboard_callback = TensorBoard(log_dir = log_dir(),
histogram_freq = 1,
write_images = True)
checkpoint = ModelCheckpoint(filepath = checkpoint_path(),
monitor='val_accuracy',
verbose = 1,
save_best_only = True,
mode = "max")
callbacks_list = [checkpoint, tensorboard_callback, earlystop, reduce_lr]
history = model.fit(padded_train, y_train,
validation_data = (padded_test, y_test),
epochs = 30,
batch_size = 32,
callbacks = callbacks_list)
# In[59]:
plt.figure()
L = len(history.history["loss"]) + 1
plt.plot(range(1, L), history.history["loss"], "bo-", label = "loss")
plt.plot(range(1, L), history.history["accuracy"], "g*-", label = "accuracy")
plt.plot(range(1, L), history.history["val_loss"], "y^-", label = "val_loss")
plt.plot(range(1, L), history.history["val_accuracy"], "ro-", label = "val_accuracy")
plt.legend()
plt.xlabel("epoch")
plt.grid()
plt.show()
# ### testing model
# In[60]:
y_pred_softmax = model.predict(padded_test)
y_pred = []
for i in range(len(y_pred_softmax)):
if y_pred_softmax[i][0] >= 0.5:
y_pred.append(0)
else:
y_pred.append(1)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# # BERT encodings
# ### creating BERT model
# In[20]:
max_length = 27
# In[21]:
tf.keras.backend.clear_session()
input_word_ids = Input(shape = (max_length,), dtype = tf.int32, name = "input_word_ids")
input_mask = Input(shape = (max_length,), dtype = tf.int32, name = "input_mask")
segment_ids = Input(shape = (max_length,), dtype = tf.int32, name = "segment_ids")
bert_layer = hub.KerasLayer("https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/1", trainable = False)
pooled_output, sequence_output = bert_layer([input_word_ids, input_mask, segment_ids])
bert_model = Model(inputs = [input_word_ids, input_mask, segment_ids], outputs = pooled_output)
# In[22]:
bert_model.summary()
# In[23]:
bert_model.output
# ### tokenization
# In[24]:
vocab_file = bert_layer.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = bert_layer.resolved_object.do_lower_case.numpy()
# In[25]:
get_ipython().system('pip install sentencepiece')
from tokenization import FullTokenizer
# In[26]:
tokenizer = FullTokenizer(vocab_file, do_lower_case)
# In[27]:
def my_tokens_util(series, max_length):
x_tokens = np.zeros((series.shape[0], max_length))
x_mask = np.ones((series.shape[0], max_length))
x_segment = np.zeros((series.shape[0], max_length))
for i in range(series.shape[0]):
tokens = tokenizer.tokenize(series.values[0])
if len(tokens) >= max_length - 2:
tokens = tokens[: (max_length - 2)]
tokens = ["[CLS]", *tokens, "[SEP]"]
pe_tokens = np.array(tokenizer.convert_tokens_to_ids(tokens))
length = len(tokens)
if length >= max_length:
x_tokens[i] = pe_tokens[:max_length]
else:
x_tokens[i, :length] = pe_tokens
x_mask[i, length:] = list(np.zeros(max_length - length))
return np.array(series), x_tokens, x_mask, x_segment
# In[28]:
X_train, X_train_tokens, X_train_mask, X_train_segment = my_tokens_util(train["headline"], max_length)
X_test, X_test_tokens, X_test_mask, X_test_segment = my_tokens_util(test["headline"], max_length)
# In[29]:
pickle.dump((X_train, X_train_tokens, X_train_mask, X_train_segment, y_train),open('/mygdrive/train_data.pkl','wb'))
pickle.dump((X_test, X_test_tokens, X_test_mask, X_test_segment, y_test),open('/mygdrive/test_data.pkl','wb'))
# In[30]:
X_train, X_train_tokens, X_train_mask, X_train_segment, y_train = pickle.load(open("/mygdrive/train_data.pkl", 'rb'))
X_test, X_test_tokens, X_test_mask, X_test_segment, y_test = pickle.load(open("/mygdrive/test_data.pkl", 'rb'))
# ### embeddings from BERT model
# In[31]:
X_train_pooled_output = bert_model.predict([X_train_tokens, X_train_mask, X_train_segment])
X_test_pooled_output = bert_model.predict([X_test_tokens, X_test_mask, X_test_segment])
# In[33]:
pickle.dump((X_train_pooled_output, X_test_pooled_output),open('/mygdrive/final_output.pkl','wb'))
# In[20]:
X_train_pooled_output, X_test_pooled_output = pickle.load(open('/mygdrive/final_output.pkl', 'rb'))
# In[21]:
X_train_pooled_output.shape, X_test_pooled_output.shape, y_train.shape, y_test.shape
# In[39]:
scaler = StandardScaler()
scaler.fit(X_train_pooled_output)
x_train = scaler.transform(X_train_pooled_output)
x_test = scaler.transform(X_test_pooled_output)
x_train.shape, x_test.shape
# ### training a NN with 768 features
# In[45]:
tf.keras.backend.clear_session()
model = Sequential()
model.add(Dense(128, activation = "relu", kernel_initializer = "he_uniform", input_shape = (768, )))
model.add(Dropout(.5))
model.add(Dense(32, activation = "relu", kernel_initializer = "he_uniform"))
model.add(Dropout(.5))
model.add(Dense(2, activation = "softmax"))
model.compile(loss = "sparse_categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"])
plot_model(model, to_file = "./model.png", show_shapes = True)
model.summary()
# In[46]:
cv2_imshow(cv2.imread("./model.png"))
# In[ ]:
get_ipython().run_line_magic('tensorboard', '--logdir logs/fit')
# In[47]:
tensorboard_callback = TensorBoard(log_dir = log_dir(),
histogram_freq = 1,
write_images = True)
checkpoint = ModelCheckpoint(filepath = checkpoint_path(),
monitor='val_accuracy',
verbose = 1,
save_best_only = True,
mode = "max")
callbacks_list = [checkpoint, tensorboard_callback, earlystop, reduce_lr]
history = model.fit(x_train, y_train,
validation_data = (x_test, y_test),
epochs = 30,
batch_size = 32,
callbacks = callbacks_list)
# In[48]:
plt.figure()
L = len(history.history["loss"]) + 1
plt.plot(range(1, L), history.history["loss"], "bo-", label = "loss")
plt.plot(range(1, L), history.history["accuracy"], "g*-", label = "accuracy")
plt.plot(range(1, L), history.history["val_loss"], "y^-", label = "val_loss")
plt.plot(range(1, L), history.history["val_accuracy"], "ro-", label = "val_accuracy")
plt.legend()
plt.xlabel("epoch")
plt.grid()
plt.show()
# ### testing model
# In[49]:
y_pred_softmax = model.predict(x_test)
y_pred = []
for i in range(len(y_pred_softmax)):
if y_pred_softmax[i][0] >= 0.5:
y_pred.append(0)
else:
y_pred.append(1)
print("Accuracy:", 100*accuracy_score(y_test, y_pred))
cm = confusion_matrix(y_test, y_pred)
sns.heatmap(cm, annot = True, fmt = "d")
plt.xlabel("predicted label")
plt.ylabel("actual label")
plt.title("test confusion matrix")
plt.show()
# In[3]:
p = PrettyTable(["Model", "Test Accuracy"])
p.add_row(["BoW with Logistic Regression", "91.3287%"])
p.add_row(["BoW with Naive Bayes", "86.9557%"])
p.add_row(["BoW with Random Forest", "99.8951%"])
p.add_row(["BoW with XGBoost", "87.9815%"])
p.add_row(["TF-IDF with Logistic Regression", "90.7671%"])
p.add_row(["TF-IDF with Naive Bayes", "87.0979%"])
p.add_row(["TF-IDF with Random Forest", "99.9063%"])
p.add_row(["TF-IDF with XGBoost", "91.1752%"])
p.add_row(["Neural network with Glove Embeddings", "99.9925%"])
print(p)
# In[ ]:
| 23.198795 | 371 | 0.669659 | 3,848 | 26,957 | 4.490385 | 0.14657 | 0.015279 | 0.011459 | 0.011575 | 0.656404 | 0.621217 | 0.590428 | 0.569825 | 0.557961 | 0.537936 | 0 | 0.034872 | 0.172386 | 26,957 | 1,161 | 372 | 23.218777 | 0.739623 | 0.0631 | 0 | 0.626761 | 0 | 0.003521 | 0.189754 | 0.022742 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010563 | false | 0 | 0.056338 | 0.005282 | 0.075704 | 0.03169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d699bede1bd0de3d82711a3efc8b955f53e785d | 14,098 | py | Python | motifscan/motif/__init__.py | shao-lab/Motifscan | eb5efbc9e450ba8e9c0e5b64a8c9c4340b765d02 | [
"BSD-3-Clause"
] | 14 | 2017-12-27T06:45:56.000Z | 2021-11-05T13:13:34.000Z | motifscan/motif/__init__.py | shao-lab/Motifscan | eb5efbc9e450ba8e9c0e5b64a8c9c4340b765d02 | [
"BSD-3-Clause"
] | 3 | 2020-05-22T05:53:35.000Z | 2021-01-22T07:58:27.000Z | motifscan/motif/__init__.py | shao-lab/Motifscan | eb5efbc9e450ba8e9c0e5b64a8c9c4340b765d02 | [
"BSD-3-Clause"
] | 1 | 2020-09-09T21:55:59.000Z | 2020-09-09T21:55:59.000Z | """
motifscan.motif
---------------
Module for motif related classes and functions.
"""
import logging
import os
import re
from motifscan.config import Config
from motifscan.exceptions import PfmsFileNotFoundError, \
PwmsFileNotFoundError, PfmsJasparFormatError, PwmsMotifScanFormatError
from motifscan.genome import bases
from motifscan.motif.matrix import PositionFrequencyMatrix, \
PositionWeightMatrix
logger = logging.getLogger(__name__)
pfms_path_fmt = os.path.join("{0}", "{1}_pfms.jaspar")
pwms_path_fmt = os.path.join("{0}", "{1}_{2}_pwms.motifscan")
class MotifMatrices:
"""Generic class for motif matrices."""
def __init__(self):
self._matrices = []
def __iter__(self):
yield from self._matrices
def __len__(self):
return len(self._matrices)
def append(self, item):
self._matrices.append(item)
def extend(self, items):
self._matrices.extend(items)
class MotifPfms(MotifMatrices):
"""Class for a set of motif PFMs (Position Frequency Matrices).
Parameters
----------
pfms : list of `PositionFrequencyMatrix`, optional
PFMs used to construct the class.
name : str, optional
The name of the motif PFMs set.
Attributes
----------
name : str or None
The name of the motif PFMs set, or None if not specified.
"""
def __init__(self, pfms=None, name=None):
super().__init__()
self.name = name
if pfms is not None:
for pfm in list(pfms):
if isinstance(pfm, PositionFrequencyMatrix):
self.append(pfm)
else:
raise ValueError(f"invalid PFM item: {pfm!r}")
@staticmethod
def _parse_jaspar_pfms(path):
"""Parse motif PFMs in JASPAR format.
The regular expression pattern of the header/matrix line is inspired by
`Biopython`.
JASPAR PFM Example:
>MA0006.1 Ahr::Arnt
A [ 3 0 0 0 0 0 ]
C [ 8 0 23 0 0 0 ]
G [ 2 23 0 23 0 24 ]
T [ 11 1 1 1 24 0 ]
Raises
------
PfmsJasparFormatError
If the file does not strictly follow the JASPAR PFMs format.
"""
header_pattern = re.compile(r"^>\s*(\S+)(\s+(\S+))?")
matrix_new_pattern = re.compile(r"\s*([ACGT])\s*\[\s*(.+)\s*\]")
matrix_old_pattern = re.compile(r"\s*(.+)\s*")
pfms = []
line_num = 0
expect_header = True # expect header (True) or matrix line (False)
with open(path, 'r') as fin:
for line in fin:
line_num += 1
line = line.strip()
if not line: # skip blank lines
continue
m_header = header_pattern.match(line)
m_matrix_new = matrix_new_pattern.match(line)
m_matrix_old = matrix_old_pattern.match(line)
if bool(m_header) != expect_header: # format check
raise PfmsJasparFormatError(line_num, line)
if m_header:
matrix_id = m_header.group(1)
name = m_header.group(3)
n_matrix = 0
values = []
expect_header = False
else:
if m_matrix_new:
base = m_matrix_new.group(1)
if base != bases[n_matrix]:
raise PfmsJasparFormatError(line_num, line)
tmp_values = m_matrix_new.group(2).split()
elif m_matrix_old:
tmp_values = m_matrix_old.group(1).split()
else: # neither a header nor a matrix line
raise PfmsJasparFormatError(line_num, line)
try:
values.append(list(map(int, tmp_values)))
except (ValueError, TypeError):
raise PfmsJasparFormatError(line_num, line)
n_matrix += 1
if n_matrix == 4:
pfm = PositionFrequencyMatrix(values=values, name=name,
matrix_id=matrix_id)
pfms.append(pfm)
expect_header = True
if not expect_header: # check whether the last matrix is complete
raise PfmsJasparFormatError(line_num + 1, '')
return pfms
def read_pfms(self, path, format='jaspar'):
"""Read motif PFMs.
Parameters
----------
path : str
Path to load the PFMs.
format : {'jaspar'}, optional
PFMs file format, default='jaspar'.
"""
if format not in ['jaspar']:
raise ValueError(f"invalid motif PFMs file format: {format!r}")
logger.debug(f"Reading motif PFMs from {path} [{format}]")
pfms = self._parse_jaspar_pfms(path)
self.extend(pfms)
logger.debug(f"Found {len(pfms)} motif PFMs")
class MotifPwms(MotifMatrices):
"""Class for a set of motif PWMs (Position Weight Matrices).
Parameters
----------
pwms : list of `PositionWeightMatrix`, optional
PWMs used to construct the class.
name : str, optional
The name of the motif PWMs set.
genome : str, optional
The name of the genome assembly under which these PWMs are built.
Attributes
----------
name : str or None
The name of the motif PWMs set, or None if not specified.
genome : str or None
The name of the genome assembly under which these PWMs are built, or
None if not specified.
"""
def __init__(self, pwms=None, name=None, genome=None):
super().__init__()
self.name = name
self.genome = genome
if pwms is not None:
for pwm in list(pwms):
if isinstance(pwm, PositionWeightMatrix):
self.append(pwm)
else:
raise ValueError(f"invalid PWM item: {pwm!r}")
def save_built_pwms(self):
"""Save built motif PWMs."""
logger.info(
f"Saving motif PWMs {self.name!r} under assembly {self.genome!r}")
motif_dir = Config().get_motif_path(self.name)
pwms_path = pwms_path_fmt.format(motif_dir, self.name, self.genome)
self.write_motifscan_pwms(pwms_path)
def write_motifscan_pwms(self, path):
"""Write motif PWMs in MotifScan format.
Parameters
----------
path : str
The file path to write the MotifScan PWMs.
"""
logger.debug(f"Writing MotifScan PWMs to {path}")
with open(path, 'w') as f_out:
for pwm in self:
f_out.write(f">{pwm.matrix_id}\t{pwm.name}\tPWM\n")
for idx, base in enumerate(bases):
values_str = '\t'.join(
map(lambda x: f'{x:8.5f}', pwm.matrix[idx]))
f_out.write(f"{base} [{values_str}]\n")
for p, cutoff in pwm.cutoffs.items():
f_out.write(f"Cutoff_p{p}\t{cutoff}\n")
def read_motifscan_pwms(self, path):
"""Read PWMs in MotifScan format.
MotifScan PWM Example:
>MA0006.1 Ahr::Arnt PWM
A [-0.85815 -5.68647 -5.68647 -5.68647 -5.68647 -5.68647]
C [ 0.48657 -5.32257 1.53966 -5.32257 -5.32257 -5.32257]
G [-0.90016 1.53922 -5.32301 1.53922 -5.32301 1.58174]
T [ 0.43981 -1.93828 -1.93828 -1.93828 1.21696 -5.68779]
Cutoff_p1e-3 0.55403
Cutoff_p1e-4 0.82985
Cutoff_p1e-5 1.0
Parameters
----------
path : str
The file path to read the MotifScan PWMs.
Raises
------
PwmsMotifScanFormatError
If the file does not strictly follow the MotifScan PWMs format.
"""
logger.debug(f"Reading MotifScan PWMs from {path}")
header_pattern = re.compile(r"^>(\S+)\t(\S+)\tPWM$")
matrix_pattern = re.compile(r"^([ACGT]) \[(.+)\]$")
cutoff_pattern = re.compile(r"^Cutoff_p(\S+)\t(\S+)")
pwms = []
line_num = 0
# expect_flag: 0=header, 1=matrix, 2=cutoff, 3=cutoff or header
# 1 header line + 4 matrix line + at least 1 cutoff line
expect_flag = 0
with open(path, 'r') as fin:
for line in fin:
line_num += 1
line = line.strip()
if not line: # skip blank lines
continue
m_header = header_pattern.match(line)
m_matrix = matrix_pattern.match(line)
m_cutoff = cutoff_pattern.match(line)
# format checker
if m_header:
if expect_flag != 0 and expect_flag != 3:
raise PwmsMotifScanFormatError(line_num, line)
elif m_matrix:
if expect_flag != 1:
raise PwmsMotifScanFormatError(line_num, line)
elif m_cutoff:
if expect_flag != 2 and expect_flag != 3:
raise PwmsMotifScanFormatError(line_num, line)
else: # does not match any pattern
raise PwmsMotifScanFormatError(line_num, line)
if m_header:
if expect_flag == 3: # already got a pwm and save it
pwm = PositionWeightMatrix(values=values, name=name,
matrix_id=matrix_id,
cutoffs=cutoffs)
pwms.append(pwm)
matrix_id = m_header.group(1)
name = m_header.group(2)
n_matrix = 0
values = []
cutoffs = {}
expect_flag = 1 # found header, expect matrix line next
elif m_matrix:
base = m_matrix.group(1)
if base != bases[n_matrix]:
raise PwmsMotifScanFormatError(line_num, line)
tmp_values = m_matrix.group(2).split()
try:
values.append(list(map(float, tmp_values)))
except (ValueError, TypeError):
raise PwmsMotifScanFormatError(line_num, line)
n_matrix += 1
if n_matrix == 4:
# got 4 matrix line, expect cutoff line next
expect_flag = 2
elif m_cutoff:
p = m_cutoff.group(1)
cutoff = m_cutoff.group(2)
cutoffs[p] = float(cutoff)
# got first cutoff, expect cutoff or header next
if expect_flag == 2:
expect_flag = 3
# check whether the last matrix is complete
if expect_flag == 1 or expect_flag == 2:
raise PwmsMotifScanFormatError(line_num + 1, '')
if expect_flag == 3:
pwm = PositionWeightMatrix(values=values, name=name,
matrix_id=matrix_id,
cutoffs=cutoffs)
pwms.append(pwm)
self.extend(pwms)
logger.debug(f"Found {len(pwms)} MotifScan PWMs")
def load_installed_pfms(name):
"""Load a pre-installed motif PFMs set.
Parameters
----------
name : str
Name of the pre-installed motif PFMs set to be loaded.
Return
------
pfms : `MotifPfms`
Loaded PFMs of the motif set.
Raises
------
PfmsFileNotFoundError
If the motif PFMs file does not exists.
"""
logger.info(f"Loading motif PFMs set {name!r}")
motif_dir = Config().get_motif_path(name)
pfms_path = pfms_path_fmt.format(motif_dir, name)
if os.path.isfile(pfms_path):
pfms = MotifPfms(name=name)
pfms.read_pfms(path=pfms_path, format='jaspar')
else:
raise PfmsFileNotFoundError(name)
return pfms
def load_built_pwms(name, genome):
"""Load built motif PWMs.
Parameters
----------
name : str
Name of the built motif PWMs set to be loaded.
genome : str
Genome assembly name under which these PWMs are built.
Raises
------
PwmsFileNotFoundError
If the motif PWMs file does not exists
"""
logger.info(
f"Loading motif PWMs set {name!r} under genome {genome!r}")
motif_dir = Config().get_motif_path(name)
pwms_path = pwms_path_fmt.format(motif_dir, name, genome)
pwms = MotifPwms(name=name, genome=genome)
if os.path.isfile(pwms_path):
pwms.read_motifscan_pwms(pwms_path)
else:
raise PwmsFileNotFoundError(name, genome)
return pwms
def get_score_cutoffs(sampling_scores):
"""Get motif score cutoffs given the motif score background distributions.
Parameters
----------
sampling_scores : array_like
The sampling scores of motifs in a shape of (n_motifs, n_sampling).
"""
pwms_cutoffs = []
n_pwms = len(sampling_scores)
for i, scores in enumerate(sampling_scores):
if len(scores) < 100:
raise ValueError(
"each motif must have at least 100 sampling scores")
logger.debug(f"Getting cutoff: {i + 1}/{n_pwms}")
pwm_cutoffs = {}
n_scores = len(scores)
n_bits = min(len(str(n_scores)), 7)
scores.sort(reverse=True)
for exponent in range(2, n_bits):
cutoff = scores[int(n_scores * 0.1 ** exponent) - 1]
pwm_cutoffs[f'1e-{exponent}'] = cutoff
pwms_cutoffs.append(pwm_cutoffs)
return pwms_cutoffs
| 35.069652 | 79 | 0.53348 | 1,627 | 14,098 | 4.47941 | 0.149355 | 0.015368 | 0.015093 | 0.034577 | 0.373628 | 0.305708 | 0.249726 | 0.198546 | 0.150659 | 0.114709 | 0 | 0.030327 | 0.370833 | 14,098 | 401 | 80 | 35.157107 | 0.791319 | 0.258122 | 0 | 0.352941 | 0 | 0 | 0.078856 | 0.015263 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067873 | false | 0 | 0.031674 | 0.004525 | 0.135747 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d6b77d7ffda23d35dcf570e0f37a02bba8cb003 | 6,742 | py | Python | tests/custom.py | vsoch/django-river-ml | 67bea67770809e75323a68cac660f528e673f5d2 | [
"Apache-2.0"
] | 6 | 2022-02-24T10:26:06.000Z | 2022-03-07T02:44:37.000Z | tests/custom.py | vsoch/django-river-ml | 67bea67770809e75323a68cac660f528e673f5d2 | [
"Apache-2.0"
] | null | null | null | tests/custom.py | vsoch/django-river-ml | 67bea67770809e75323a68cac660f528e673f5d2 | [
"Apache-2.0"
] | null | null | null | import collections
import functools
import random
from typing import Dict, List
from river import base, utils
def iter_counts(X: List[List[str]]):
"""
Given lists of words, return vocabularies with counts. This is useful
for the VariableVocabKMeans model that expects this input.
Parameters
----------
X
A list of lists of words (str)
Example
-------
>>> X = [
... ["one", "two"],
... ["one", "four"],
... ["one", "zero"],
... ["four", "two"],
... ["four", "four"],
... ["four", "zero"]
... ]
>>> for i, vocab in enumerate(stream.iter_counts(X)):
... print(vocab)
... {'one': 1, 'two': 1}
... {'one': 1, 'four': 1}
... {'one': 1, 'zero': 1}
... {'four': 1, 'two': 1}
... {'four': 2}
... {'four': 1, 'zero': 1}
"""
# Convert to counts (vocabulary)
counts = []
for words in X:
vocab = {}
for word in words:
if word not in vocab:
vocab[word] = 0
vocab[word] += 1
counts.append(vocab)
return counts
class VariableVocabKMeans(base.Clusterer):
"""Variable Vocabulary KMeans
Instead of requiring a fixed set of features that are numerical, this version
of Kmeans:
1. Allows for providing a vocabulary (string variables) that are stored with the model
2. Allows adding new words to the vocabulary.
When we encounter a new word, given that it isn't in the vocabulary this means that
we've never seen it before (and we know that the cluster centers can be given a 0
value. This is an example of a custom model for the django-river-ml server.
Parameters
----------
n_clusters
Maximum number of clusters to assign.
halflife
Amount by which to move the cluster centers, a reasonable value if between 0 and 1.
mu
Mean of the normal distribution used to instantiate cluster positions.
sigma
Standard deviation of the normal distribution used to instantiate cluster positions.
p
Power parameter for the Minkowski metric. When `p=1`, this corresponds to the Manhattan
distance, while `p=2` corresponds to the Euclidean distance.
seed
Random seed used for generating initial centroid positions.
Attributes
----------
vocab: dict
Vocabulary that matches str tokens to their index in each center vector
centers : dict
Central positions of each cluster.
Examples
--------
>>> from river import cluster
>>> from river import stream
# Instead of numbers, we provide vectors of tokens (or strings)
>>> X = [
... ["one", "two"],
... ["one", "four"],
... ["one", "zero"],
... ["five", "six"],
... ["seven", "eight"],
... ["nine", "nine"]
]
>>> model = cluster.VariableVocabKMeans(n_clusters=2, halflife=0.4, sigma=3, seed=0)
>>> for i, vocab in enumerate(stream.iter_counts(X)):
... model = model.learn_one(vocab)
... center = model.predict_one(vocab)
... print(f'{vocab} is assigned to cluster {center}')
... # Get coord/value for each word
... model.get_center_vocab(center)
...
... {'one': 1, 'two': 1} is assigned to cluster 0
... {'one': 1, 'four': 1} is assigned to cluster 0
... {'one': 1, 'zero': 1} is assigned to cluster 0
... {'five': 1, 'six': 1} is assigned to cluster 1
... {'seven': 1, 'eight': 1} is assigned to cluster 2
... {'nine': 2} is assigned to cluster 3
"""
def __init__(
self, n_clusters=5, halflife=0.5, mu=0, sigma=1, p=2, seed: int = None
):
self.n_clusters = n_clusters
self.halflife = halflife
self.mu = mu
self.sigma = sigma
self.p = p
self.seed = seed
self._rng = random.Random(seed)
rand_gauss = functools.partial(self._rng.gauss, self.mu, self.sigma)
# Vocab is a lookup between vocab items and vector indices
self.vocab = {}
# Current index into vocab array
self.index = 0
self.centers = {
i: collections.defaultdict(rand_gauss) for i in range(n_clusters)
} # type: ignore
def get_center_vocab(self, center: int):
"""
Given the id of a centroid, get the vocab and weights / counts for it.
"""
# We need to be able to look up words based on index
lookup = {idx: word for word, idx in self.vocab.items()}
return {lookup[x]: count for x, count in self.centers[center].items()}
def learn_predict_one(self, x: Dict[str, int]):
"""Equivalent to `k_means.learn_one(x).predict_one(x)`, but faster."""
# Find the cluster with the closest center
# Don't update vocab yet because it doesn't matter if we haven't
# seen a token - it will return a count of 0.
closest = self.predict_one(x)
# Ensure centers have all features for future learning
self.update_vocab(x)
# Move the cluster's center (ONLY the one closest to!)
# By this point all words are added to the vocabulary
for word, count in x.items():
xx = {self.vocab[word]: count}
for i, xi in xx.items():
self.centers[closest][i] += self.halflife * (
xi - self.centers[closest][i]
)
return closest
def learn_one(self, x, y=None):
self.learn_predict_one(x)
return self
def update_vocab(self, x: Dict[str, int]):
"""
Given a vector of features, ensure we have each in our vocab
"""
# We can do one dict update per new word
updates = {}
for word, count in x.items():
if word not in self.vocab:
self.vocab[word] = self.index
# The word has never been seen by any previous centroid
updates[self.index] = 0
self.index += 1
# This is akin to appending a new dimension to each vector
for _, center in self.centers.items():
center.update(updates)
def predict_one(self, x: List[List[str]]):
# Ensure we provide a lookup between features (vocab indices)
# This should only include words we have seen before
xx = {
self.vocab.get(word): count
for word, count in x.items()
if word in self.vocab
}
def get_distance(c):
return utils.math.minkowski_distance(a=self.centers[c], b=xx, p=self.p)
return min(self.centers, key=get_distance)
@classmethod
def _unit_test_params(cls):
yield {"n_clusters": 5}
| 31.504673 | 95 | 0.577573 | 900 | 6,742 | 4.278889 | 0.268889 | 0.016359 | 0.021813 | 0.034536 | 0.114516 | 0.096339 | 0.085692 | 0.074786 | 0.048299 | 0 | 0 | 0.011301 | 0.304361 | 6,742 | 213 | 96 | 31.652582 | 0.809808 | 0.553842 | 0 | 0.028986 | 0 | 0 | 0.00384 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.072464 | 0.014493 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d6b85556904e23be368445d1a45aa9c9e365db6 | 12,956 | py | Python | doc/examples/multi_taper_spectral_estimation.py | slnovak/nitime | 8182d032caf5ad9ca2f46fc428c87cb8d5dde133 | [
"BSD-3-Clause"
] | 1 | 2017-02-02T23:52:23.000Z | 2017-02-02T23:52:23.000Z | doc/examples/multi_taper_spectral_estimation.py | slnovak/nitime | 8182d032caf5ad9ca2f46fc428c87cb8d5dde133 | [
"BSD-3-Clause"
] | null | null | null | doc/examples/multi_taper_spectral_estimation.py | slnovak/nitime | 8182d032caf5ad9ca2f46fc428c87cb8d5dde133 | [
"BSD-3-Clause"
] | null | null | null | """
.. _multi-taper-psd:
===============================
Multi-taper spectral estimation
===============================
The distribution of power in a signal, as a function of frequency, known as the
power spectrum (or PSD, for power spectral density) can be estimated using
variants of the discrete Fourier transform (DFT). The naive estimate of the
power spectrum, based on the values of the DFT estimated directly from the
signal, using the fast Fourier transform algorithm (FFT) is referred to as a
periodogram (see :func:`algorithms.periodogram`). This estimate suffers from
several problems [NR2007]_:
- Inefficiency: In most estimation problems, additional samples, or a denser
sampling grid would usually lead to a better estimate (smaller variance of
the estimate, given a constant level of noise). However, this is not the case
for the periodogram. Even as we add more samples to our signal, or increase
our sampling rate, our estimate at frequency $f_k$ does not improve. This is
because of the effects these kinds of changes have on spectral
estimates. Adding additional samples will improve the frequency domain
resolution of our estimate and sampling at a finer rate will change the
Nyquist frequency, the highest frequency for which the spectrum can be
estimated. Thus, these changes do not improve the estimate at frequency
$f_k$.
The inefficiency problem can be solved by treating different parts of the
signal as different samples from the same distribution, while assuming
stationarity of the signal. In this method, a sliding window is applied to
different parts of the signal and the windowed spectrum is averaged from these
different samples. This is sometimes referred to as Welch's periodogram
[Welch1967]_ and it is the default method used in
:func:`algorithms.get_spectra` (with the hanning window as the window function
used and no overlap between the windows). However, it may lead to the
following problem:
- Spectral leakage and bias: Spectral leakage refers to the fact that the
estimate of the spectrum at any given frequency bin is contaminated with the
power from other frequency bands. This is a consequence of the fact that we
always look at a time-limited signal. In the naive peridogram estimate all
the samples within the time-limited signal are taken as they are (implicitly
multiplied by 1) and all the samples outside of this time-limited signal are
not taken at all (implicitly multiplied by 0). This is akin to what would
happen if the signal were multiplied sample-by-sample with a 'boxcar' window,
so called because the shape of this window is square, going from 0 to 1 over
one sampling window. Multiplying the signal with a boxcar window in the
time-domain is equivalent (due to the convolution theorem) to convolving it
in the frequency domain with the spectrum of the boxcar window. The spectral
leakage induced by this operation is demonstrated in the following example.
We start by importing the modules/functions we will need in this example
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sig
import scipy.stats.distributions as dist
import nitime.algorithms as tsa
import nitime.utils as utils
from nitime.viz import winspect
from nitime.viz import plot_spectral_estimate
"""
For demonstration, we will use a window of 128 points:
"""
npts = 128
fig01 = plt.figure()
# Boxcar with zeroed out fraction
b = sig.boxcar(npts)
zfrac = 0.15
zi = int(npts*zfrac)
b[:zi] = b[-zi:] = 0
name = 'Boxcar - zero fraction=%.2f' % zfrac
winspect(b, fig01, name)
"""
.. image:: fig/multi_taper_spectral_estimation_01.png
The figure on the left shows a boxcar window and the figure on the right
shows the spectrum of the boxcar function (in dB units, relative to the
frequency band of interest).
These two problems can together be mitigated through the use of other
windows. Other windows have been designed in order to optimize the amount of
spectral leakage and limit it to certain parts of the spectrum. The following
example demonstrates the spectral leakage for several different windows
(including the boxcar):
"""
fig02 = plt.figure()
# Boxcar with zeroed out fraction
b = sig.boxcar(npts)
zfrac = 0.15
zi = int(npts*zfrac)
b[:zi] = b[-zi:] = 0
name = 'Boxcar - zero fraction=%.2f' % zfrac
winspect(b, fig02, name)
winspect(sig.hanning(npts), fig02, 'Hanning')
winspect(sig.bartlett(npts), fig02, 'Bartlett')
winspect(sig.barthann(npts), fig02, 'Modified Bartlett-Hann')
"""
.. image:: fig/multi_taper_spectral_estimation_02.png
As before, the left figure displays the windowing function in the temporal
domain and the figure on the left displays the attentuation of spectral leakage
in the other frequency bands in the spectrum. Notice that though different
windowing functions have different spectral attenuation profiles, trading off
attenuation of leakage from frequency bands near the frequency of interest
(narrow-band leakage) with leakage from faraway frequency bands (broad-band
leakage) they are all superior in both of these respects to the boxcar window
used in the naive periodogram.
Another approach which deals with both the inefficiency problem and with the
spectral leakage problem is the use of taper functions. In this approach, the
entire signal is multiplied by a time-varying function. Several of these
functions may be used in order to emphasize and de-emphasize different parts of
the signal and these can be constructed to be orthogonal to each other,
constructing maximally independent samples at the length of the signal. As we
will see below, this allows for statistical estimation of the distribution of
the spectrum.
Discrete prolate spheroidal sequences (DPSS, also known as Slepian sequences)
[Slepian1978]_ are a class of taper functions which are constructed as a
solution to the problem of concentrating the spectrum to within a pre-specified
bandwidth. These tapers can be constructed using
:func:`algorithms.DPSS_windows`, but for the purpose of spectral estimation, it
is sufficient to specify the bandwidth (which defines the boundary between
narrow-band and broad-band leakage) as an input to
:func:`algorithms.mutli_taper_psd` and this function will then construct the
appropriate windows, calculate the tapered spectra and average them.
We will demonstrate the use of DPSS in spectral estimation on a time-series
with known spectral properties generated from an auto-regressive process.
We start by defining a function which will be used throughout this example:
"""
def dB(x, out=None):
if out is None:
return 10 * np.log10(x)
else:
np.log10(x, out)
np.multiply(out, 10, out)
"""
And the conversion factor from ln to dB:
"""
ln2db = dB(np.e)
"""
Next, we generate a sequence with known spectral properties:
"""
N = 512
ar_seq, nz, alpha = utils.ar_generator(N=N, drop_transients=10)
ar_seq -= ar_seq.mean()
"""
This is the true PSD for this sequence:
"""
fgrid, hz = tsa.freq_response(1.0, a=np.r_[1, -alpha], Nfreqs=N)
psd = (hz*hz.conj()).real
"""
This is a one-sided spectrum, so we double the power:
"""
psd *= 2
dB(psd, psd)
"""
We begin by using the naive periodogram function (:func:`tsa.periodogram` in
order to calculate the PSD and compare that to the true PSD calculated above.
"""
freqs, d_psd = tsa.periodogram(ar_seq)
dB(d_psd, d_psd)
fig03 = plot_spectral_estimate(freqs, psd, (d_psd,), elabels=("Periodogram",))
"""
.. image:: fig/multi_taper_spectral_estimation_03.png
Next, we use Welch's periodogram, by applying :func:`tsa.get_spectra`. Note
that we explicitely provide the function with a 'method' dict, which specifies
the method used in order to calculate the PSD, but the default method is 'welch'.
"""
welch_freqs, welch_psd = tsa.get_spectra(ar_seq,
method=dict(this_method='welch',NFFT=N))
welch_freqs *= (np.pi/welch_freqs.max())
welch_psd = welch_psd.squeeze()
dB(welch_psd, welch_psd)
fig04 = plot_spectral_estimate(freqs, psd, (welch_psd,), elabels=("Welch",))
"""
.. image:: fig/multi_taper_spectral_estimation_04.png
Next, we use the multi-taper estimation method. We estimate the spectrum:
"""
f, psd_mt, nu = tsa.multi_taper_psd(
ar_seq, adaptive=False, jackknife=False
)
dB(psd_mt, psd_mt)
"""
And get the number of tapers from here:
"""
Kmax = nu[0]/2
"""
We calculate a hypothetical 5% confidence interval from a chi-square distribution
with 2*Kmax degrees of freedom (see [Percival1993]_ eq 258)
"""
p975 = dist.chi2.ppf(.975, 2*Kmax)
p025 = dist.chi2.ppf(.025, 2*Kmax)
l1 = ln2db * np.log(2*Kmax/p975)
l2 = ln2db * np.log(2*Kmax/p025)
hyp_limits = (psd_mt + l1, psd_mt + l2 )
fig05 = plot_spectral_estimate(freqs, psd, (psd_mt,), hyp_limits,
elabels=('MT with hypothetical 5% interval',))
"""
.. image:: fig/multi_taper_spectral_estimation_05.png
An iterative method ([Thomson2007]_) can be used in order to adaptively set the
weighting of the different tapers, according to the actual spectral
concentration in the given signal (and not only the theoretical spectral
concentration calculated per default).
"""
f, adaptive_psd_mt, nu = tsa.multi_taper_psd(
ar_seq, adaptive=True, jackknife=False
)
dB(adaptive_psd_mt, adaptive_psd_mt)
fig06 = plot_spectral_estimate(freqs, psd, (psd_mt,),
elabels=('MT with adaptive weighting',))
"""
.. image:: fig/multi_taper_spectral_estimation_06.png
As metioned above, in addition to estimating the spectrum itself, an estimate
of the confidence interval of the spectrum can be generated using a
jack-knifing procedure [Thomson2007]_.
Let us define the following:
| **simple sample estimate**
| :math:`\hat{\theta} = \dfrac{1}{n}\sum_i Y_i`
This is the parameter estimate averaged from all the samples in the
distribution (all the tapered spectra).
| **leave-one-out measurement**
| :math:`\hat{\theta}_{-i} = \dfrac{1}{n-1}\sum_{k \neq i}Y_k`
This defines a group of estimates, where each sample is based on leaving one
measurement (one tapered spectrum) out.
| **pseudovalues**
| :math:`\hat{\theta}_i = n\hat{\theta} - (n-1)\hat{\theta}_{-i}`
The jackknifed esimator is computed as:
:math:`\tilde{\theta} = \dfrac{1}{n}\sum_i \hat{\theta}_i = n\hat{\theta} - \dfrac{n-1}{n}\sum_i \hat{\theta}_{-i}`
This estimator is known [Thomson2007]_ to be distributed about the true parameter \theta approximately as a Student's t distribution, with standard error defined as:
:math:`s^{2} = \dfrac{n-1}{n}\sum_i \left(\hat{\theta}_i - \tilde{\theta}\right)^{2}`
And degrees of freedom which depend on the number of tapers used (Kmax-1):
"""
_, _, jk_var = tsa.multi_taper_psd(ar_seq, adaptive=False, jackknife=True)
jk_p = (dist.t.ppf(.975, Kmax-1) * np.sqrt(jk_var)) * ln2db
jk_limits = ( psd_mt - jk_p, psd_mt + jk_p )
fig07 = plot_spectral_estimate(freqs, psd, (psd_mt,),
jk_limits,
elabels=('MT with JK 5% interval',))
"""
.. image:: fig/multi_taper_spectral_estimation_07.png
In addition, if the 'adaptive' flag is set to True, an iterative adaptive
method is used in order to correct bias in the spectrum.
Finally, we combine the adaptive estimation of the weights with the
jack-knifing procedure.
"""
_, _, adaptive_jk_var = tsa.multi_taper_psd(
ar_seq, adaptive=True, jackknife=True
)
# find 95% confidence limits from inverse of t-dist CDF
jk_p = (dist.t.ppf(.975, Kmax-1)*np.sqrt(adaptive_jk_var)) * ln2db
adaptive_jk_limits = ( adaptive_psd_mt - jk_p, adaptive_psd_mt + jk_p )
fig08 = plot_spectral_estimate(freqs, psd,(adaptive_psd_mt, ),
adaptive_jk_limits,
elabels=('adaptive-MT with JK 5% interval',))
"""
.. image:: fig/multi_taper_spectral_estimation_08.png
We call plt.show() in order to show all the figures:
"""
plt.show()
"""
References
.. [NR2007] W.H. Press, S.A. Teukolsky, W.T Vetterling and B.P. Flannery (2007)
Numerical Recipes: The Art of Scientific Computing. Cambridge:
Cambridge University Press. 3rd Ed.
.. [Thomson2007] D.J. Thomson, Jackknifing Multitaper Spectrum Estimates, IEEE
Signal Processing Magazine, 2007, pp. 20-30.
.. [Welch1967] P.D. Welch (1967), The use of the fast fourier transform for the
estimation of power spectra: a method based on time averaging
over short modified periodograms. IEEE Transcations on Audio and
Electroacoustics.
.. [Slepian1978] Slepian, D. Prolate spheroidal wave functions, Fourier
analysis, and uncertainty V: The discrete case. Bell System
Technical Journal, Volume 57 (1978), 1371430
.. [Percival1993] Percival D.B. and Walden A.T. (1993) Spectral Analysis for
Physical Applications: Multitaper and Conventional Univariate
Techniques. Cambridge University Press
"""
| 31.754902 | 165 | 0.737651 | 2,002 | 12,956 | 4.695804 | 0.273227 | 0.011701 | 0.017232 | 0.026806 | 0.154877 | 0.118392 | 0.08063 | 0.066908 | 0.062121 | 0.049144 | 0 | 0.020972 | 0.175594 | 12,956 | 407 | 166 | 31.832924 | 0.859189 | 0.244288 | 0 | 0.121951 | 0 | 0 | 0.072591 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012195 | false | 0 | 0.097561 | 0 | 0.121951 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d6ba565cf85ab39807e15a90e796ac2aba9b48f | 16,253 | py | Python | python/fixrgraph/musedev/test/test_api.py | LesleyLai/biggroum | e26de363e4bf4645dd5d90121742d3f3533f5a00 | [
"Apache-2.0"
] | 7 | 2019-02-14T17:28:29.000Z | 2021-01-11T07:12:34.000Z | python/fixrgraph/musedev/test/test_api.py | LesleyLai/biggroum | e26de363e4bf4645dd5d90121742d3f3533f5a00 | [
"Apache-2.0"
] | 23 | 2018-08-19T23:06:54.000Z | 2020-04-14T08:21:05.000Z | python/fixrgraph/musedev/test/test_api.py | LesleyLai/biggroum | e26de363e4bf4645dd5d90121742d3f3533f5a00 | [
"Apache-2.0"
] | 4 | 2018-06-28T18:22:55.000Z | 2019-03-21T06:36:56.000Z | """ Test the creation of the index
"""
import sys
import logging
import os
import json
import copy
import subprocess
import tempfile
import shutil
from flask import Flask, Response
from multiprocessing import Process
from cStringIO import StringIO
try:
import unittest2 as unittest
except ImportError:
import unittest
from fixrgraph.wireprotocol.search_service_wire_protocol import decompress
from fixrgraph.musedev.biggroumscript import main
from fixrgraph.musedev.api import (
biggroum_api_map,
GRAPH_EXTRACTOR_PATH, FIXR_SEARCH_ENDPOINT
)
from fixrgraph.musedev.residue import Residue
import fixrgraph.musedev.test
def compare_json_obj(obj1, obj2):
if (json.dumps(obj1, sort_keys=True) == json.dumps(obj2, sort_keys=True)):
return True
else:
print(json.dumps(obj1, indent=2, sort_keys=True))
print(json.dumps(obj2, indent=2, sort_keys=True))
return False
def get_extractor_path():
# TODO: refactor with TestPipeline (move all tests together)
repo_path = os.path.abspath(os.path.dirname(fixrgraph.musedev.test.__file__))
repo_path = os.path.join(repo_path, os.pardir)
repo_path = os.path.join(repo_path, os.pardir)
repo_path = os.path.join(repo_path, os.pardir)
repo_path = os.path.join(repo_path, os.pardir)
repo_path = os.path.abspath(repo_path)
extractor_path = os.path.join(repo_path,
"FixrGraphExtractor/target/scala-2.12/" \
"fixrgraphextractor_2.12-0.1.0-one-jar.jar")
return extractor_path
class TestScript(unittest.TestCase):
FILEPATH = os.path.join(os.path.dirname(__file__), "data")
JAVAFILE = "AwesomeApp/app/src/main/java/fixr/plv/colorado/edu/awesomeapp/MainActivity.java"
COMMIT = "04f68b69a6f9fa254661b481a757fa1c834b52e1"
ANOMALY1 = {
"className": "fixr.plv.colorado.edu.awesomeapp.MainActivity",
"methodName": "showDialog",
"error": "missing method calls",
"pattern": "android.app.AlertDialog$Builder.<init>($r11, $r12);\n$r13 = android.app.AlertDialog$Builder.setTitle(builder, \"\\u027e\\ufffd\\ufffd\");\n",
"packageName": "fixr.plv.colorado.edu.awesomeapp",
"patch": "public void showDialog(android.content.Context context) {\n android.app.AlertDialog.Builder dialogBuilder = new android.app.AlertDialog.Builder(context);\n java.lang.String title = \"Empty Field(s)\";\n java.lang.String message = \"Please ensure all fields are contain data\";\n dialogBuilder.setMessage(message);\n dialogBuilder.setNegativeButton(\"OK\", new android.content.DialogInterface.OnClickListener() {\n @java.lang.Override\n public void onClick(android.content.DialogInterface dialog, int which) {\n }\n });\n dialogBuilder.setPositiveButton(\"Cancel\", new android.content.DialogInterface.OnClickListener() {\n public void onClick(android.content.DialogInterface dialog, int which) {\n // continue with delete\n }\n });\n dialogBuilder.create();\n dialogBuilder.show();\n // [0] The change should end here (before calling the method exit)\n}",
"line": 47,
"id": 1,
"fileName": "[MainActivity.java](https://github.com/cuplv/AwesomeApp/blob/04f68b69a6f9fa254661b481a757fa1c834b52e1/app/src/main/java/fixr/plv/colorado/edu/awesomeapp/MainActivity.java)"
}
ANOMALY2 = {
"className": "fixr.plv.colorado.edu.awesomeapp.MainActivity",
"methodName": "showDialog",
"error": "missing method calls",
"pattern": "android.app.AlertDialog$Builder.<init>($r0, this);\n$r1 = android.app.AlertDialog$Builder.setTitle($r0, \"Exit\");\n",
"packageName": "fixr.plv.colorado.edu.awesomeapp",
"patch": "public void showDialog(android.content.Context context) {\n android.app.AlertDialog.Builder dialogBuilder = new android.app.AlertDialog.Builder(context);\n java.lang.String title = \"Empty Field(s)\";\n java.lang.String message = \"Please ensure all fields are contain data\";\n dialogBuilder.setMessage(message);\n dialogBuilder.setNegativeButton(\"OK\", new android.content.DialogInterface.OnClickListener() {\n @java.lang.Override\n public void onClick(android.content.DialogInterface dialog, int which) {\n }\n });\n dialogBuilder.setPositiveButton(\"Cancel\", new android.content.DialogInterface.OnClickListener() {\n public void onClick(android.content.DialogInterface dialog, int which) {\n // continue with delete\n }\n });\n dialogBuilder.create();\n dialogBuilder.show();\n // [0] The change should end here (before calling the method exit)\n}",
"line": 47,
"id": 2,
"fileName": "[MainActivity.java](https://github.com/cuplv/AwesomeApp/blob/04f68b69a6f9fa254661b481a757fa1c834b52e1/app/src/main/java/fixr/plv/colorado/edu/awesomeapp/MainActivity.java)"
}
class TestSearchService:
@staticmethod
def process():
expected_output = [TestScript.ANOMALY1, TestScript.ANOMALY2]
return Response(json.dumps(expected_output),
status=200,
mimetype='application/json')
@staticmethod
def run_service(app):
app.run(
host = "localhost",
port = 8081
)
def __init__(self):
self.app = Flask(__name__)
self.app.route('/process_muse_data', methods=['POST'])(
TestScript.TestSearchService.process)
self.server = Process(target=TestScript.TestSearchService.run_service,
args=[(self.app)])
self.server.start()
def stop(self):
self.server.terminate()
self.server.join()
@staticmethod
def get_args(cmd):
return ["biggroumscript.py",
TestScript.FILEPATH,
TestScript.COMMIT,
cmd,
get_extractor_path(),
"http://localhost:8081/process_muse_data"
]
def test_applicable(self):
myinput = StringIO()
outstream = StringIO()
self.assertTrue(main(TestScript.get_args("applicable"),
myinput,
outstream,
biggroum_api_map) == 0)
self.assertTrue(outstream.getvalue() == "true")
def test_version(self):
myinput = StringIO()
outstream = StringIO()
self.assertTrue(main(TestScript.get_args("version"),
myinput,
outstream,
biggroum_api_map) == 0)
self.assertTrue(outstream.getvalue() == "3")
def test_run(self):
# Mock for calling run multiple times
runs = []
for file_name in [TestScript.JAVAFILE, TestScript.JAVAFILE]:
runs.append({"cwd" : "", "cmd" : "", "args" : "",
"classpath" : [],
"files": [file_name]})
residue = {"residue" : {}}
for run in runs:
outstream = StringIO()
myinput = StringIO()
run["residue"] = residue["residue"]
myinput.write(json.dumps(run))
myinput.reset()
self.assertTrue(main(TestScript.get_args("run"),
myinput, outstream, biggroum_api_map) == 0)
try:
residue_json = outstream.getvalue()
residue = json.loads(residue_json)
except:
raise Exception("Malformed JSON")
expected_res = {
"residue": {
"compilation_infos" : [{"cwd" : "", "cmd" : "", "args" : "",
"classpath" : [],
"files": [file_name]},
{"cwd" : "", "cmd" : "", "args" : "",
"classpath" : [],
"files": [file_name]}
]},
"toolNotes": []
}
self.assertTrue(compare_json_obj(residue, expected_res))
def test_finalize(self):
myinput, outstream = StringIO(), StringIO()
# Extract the app data
tmpdir = tempfile.mkdtemp("tmp_test_finalize")
try:
app_zip = os.path.join(os.path.dirname(__file__), "data", "AwesomeApp.zip")
decompress(app_zip, tmpdir)
# Create a mock residue from run
main_act_path = os.path.join(tmpdir,TestScript.JAVAFILE)
input_res = {
"residue": {
"compilation_infos" : [{"cwd" : "", "cmd" : "", "args" : "",
"classpath" : [],
"files": [main_act_path]}
]},
"toolNotes": []
}
myinput.write(json.dumps(input_res))
myinput.reset()
# Start a mock service
service = TestScript.TestSearchService()
try:
args = TestScript.get_args("finalize")
args[1] = tmpdir # set the working directory
api_res = main(args, myinput, outstream, biggroum_api_map)
self.assertTrue(api_res == 0)
out_json = json.loads(outstream.getvalue())
res_path = os.path.abspath(os.path.dirname(fixrgraph.musedev.test.__file__))
res_path = os.path.join(res_path, "data", "finalize_result.json")
with open(res_path, 'r') as f:
expected_res = json.load(f)
self.assertTrue(compare_json_obj(out_json["toolNotes"],
expected_res["toolNotes"]))
self.assertTrue(compare_json_obj(out_json["residue"]["anomalies"],
expected_res["residue"]["anomalies"]))
finally:
service.stop()
finally:
shutil.rmtree(tmpdir)
def test_talk(self):
residue_empty = {
"anomalies" : {}
}
inputs_errors = [
{"residue" : {}, "messageText" : "biggroum", "user" : "", "noteID" : ""},
{"residue" : {}, "messageText" : "biggroum wrongrequest", "user" : "", "noteID" : ""},
{"residue" : {}, "messageText" : "biggroum inspect", "user" : ""},
{"residue" : {}, "messageText" : "biggroum pattern", "user" : ""},
{"residue" : residue_empty, "messageText" : "biggroum inspect", "user" : "", "noteID" : "1"},
{"residue" : residue_empty, "messageText" : "biggroum pattern", "user" : "", "noteID" : "1"},
]
for single_input in inputs_errors:
myinput, outstream = StringIO(), StringIO()
myinput.write(json.dumps(single_input))
myinput.reset()
self.assertTrue(main(TestScript.get_args("talk"),
myinput, outstream, biggroum_api_map) != 0)
residue = {
"anomalies" : {
"1" : TestScript.ANOMALY1,
"2" : TestScript.ANOMALY2
}
}
myinput, outstream = StringIO(), StringIO()
myinput.write(json.dumps({"residue" : residue,
"messageText" : "biggroum inspect",
"user" : "", "noteID" : u'1'},))
myinput.reset()
self.assertTrue(main(TestScript.get_args("talk"),
myinput, outstream, biggroum_api_map) == 0)
# TODO: test output
myinput, outstream = StringIO(), StringIO()
myinput.write(json.dumps({"residue" : residue,
"messageText" : "biggroum pattern",
"user" : "", "noteID" : "1"},))
myinput.reset()
self.assertTrue(main(TestScript.get_args("talk"),
myinput, outstream, biggroum_api_map) == 0)
# TODO: test output
def test_reaction(self):
myinput, outstream = StringIO(), StringIO()
myinput.write(json.dumps({}))
myinput.reset()
self.assertTrue(main(TestScript.get_args("reaction"), myinput, outstream,
biggroum_api_map) == 0)
class TestResidue(unittest.TestCase):
def test_compilation_info(self):
def test_res(residue, expected_residue, ci, fi):
self.assertTrue(compare_json_obj(residue, expected_residue))
self.assertTrue(compare_json_obj(Residue.get_compilation_infos(residue), ci))
res_files = []
for ci in Residue.get_compilation_infos(residue):
res_files = res_files + Residue.get_files(ci)
self.assertTrue(set(fi) == set(res_files))
f1 = ["file1", "file2"]
f2 = ["file3", "file4"]
ci1 = {
"cwd" : "cwd",
"cmd" : "cmd",
"args" : "args",
"classpath" : "classpath",
"files" : f1,
}
ci2 = copy.deepcopy(ci1)
ci2["files"] = f2
expected_residue = {"compilation_infos" : [ci1]}
residue = Residue.append_compilation_info(None, ci1)
test_res(residue, expected_residue, [ci1], f1)
expected_residue = {"compilation_infos" : [ci1,ci2]}
residue = Residue.append_compilation_info(residue, ci2)
test_res(residue, expected_residue, [ci1,ci2], f1+f2)
def test_anomaly(self):
residue = Residue.store_anomaly(None, TestScript.ANOMALY1, "1")
self.assertTrue(compare_json_obj(residue, {"anomalies" : {"1" : TestScript.ANOMALY1}}))
residue = Residue.store_anomaly(residue, TestScript.ANOMALY2, "2")
self.assertTrue(compare_json_obj(TestScript.ANOMALY1, Residue.retrieve_anomaly(residue, "1")))
self.assertTrue(compare_json_obj(TestScript.ANOMALY2, Residue.retrieve_anomaly(residue, "2")))
class TestBash(unittest.TestCase):
SCRIPTPATH = "biggroumcheck.sh"
@unittest.skip("Skip, this must be fixed according to issue #64")
def test_bash(self):
previous = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
exec_file = os.path.join(previous, TestBash.SCRIPTPATH)
my_env = os.environ.copy()
my_env[GRAPH_EXTRACTOR_PATH] = get_extractor_path()
my_env[FIXR_SEARCH_ENDPOINT] = "http://localhost:8081/process_muse_data"
my_env["ENV_SETUP"] = "1"
# Must fail, wrong command
args = [exec_file, TestScript.FILEPATH, TestScript.COMMIT, "nothing"]
proc = subprocess.Popen(args, cwd = previous,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = my_env)
stdout, stderr = proc.communicate()
self.assertTrue(proc.returncode == 1)
# Must succeed on the run command
script_input = {
"residue" : {},
"cwd" : "",
"cmd" : "",
"args" : "",
"classpath" : [],
"files" : ["file1.java", "file2.java"]
}
args = [exec_file, TestScript.FILEPATH, TestScript.COMMIT, "run"]
proc = subprocess.Popen(args, cwd = previous,
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = my_env)
proc.stdin.write(json.dumps(script_input))
stdout, stderr = proc.communicate()
proc.stdin.close()
self.assertTrue(proc.returncode == 0)
compare_json_obj(json.loads(stdout),
{
"toolNotes" : [],
"residue" : {
"compilation_infos" : script_input
}
}
)
| 42.883905 | 952 | 0.559035 | 1,576 | 16,253 | 5.630076 | 0.199873 | 0.033134 | 0.015778 | 0.025245 | 0.574214 | 0.490026 | 0.441339 | 0.405274 | 0.375747 | 0.364477 | 0 | 0.01764 | 0.316372 | 16,253 | 378 | 953 | 42.997355 | 0.780938 | 0.019566 | 0 | 0.280528 | 0 | 0.036304 | 0.248979 | 0.090595 | 0 | 0 | 0 | 0.002646 | 0.069307 | 1 | 0.056106 | false | 0 | 0.062706 | 0.0033 | 0.168317 | 0.006601 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d6c97ab775b3924a519a3875bbacd8525d4d56d | 4,074 | py | Python | camkes/parser/tests/teststage6.py | aisamanra/camkes-tool | 4bcf3f22ef7e73f8755ca1b5e7165dd6a23e89f3 | [
"BSD-2-Clause"
] | null | null | null | camkes/parser/tests/teststage6.py | aisamanra/camkes-tool | 4bcf3f22ef7e73f8755ca1b5e7165dd6a23e89f3 | [
"BSD-2-Clause"
] | null | null | null | camkes/parser/tests/teststage6.py | aisamanra/camkes-tool | 4bcf3f22ef7e73f8755ca1b5e7165dd6a23e89f3 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017, Data61
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# ABN 41 687 119 230.
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(DATA61_BSD)
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
import os, six, sys, unittest
ME = os.path.abspath(__file__)
# Make CAmkES importable
sys.path.append(os.path.join(os.path.dirname(ME), '../../..'))
from camkes.ast import Assembly, Component, Instance
from camkes.internal.tests.utils import CAmkESTest
from camkes.parser.stage0 import CPP, Reader
from camkes.parser.stage1 import Parse1
from camkes.parser.stage2 import Parse2
from camkes.parser.stage3 import Parse3
from camkes.parser.stage4 import Parse4
from camkes.parser.stage5 import Parse5
from camkes.parser.stage6 import Parse6
class TestStage6(CAmkESTest):
def setUp(self):
super(TestStage6, self).setUp()
r = Reader()
s1 = Parse1(r)
s2 = Parse2(s1)
s3 = Parse3(s2, debug=True)
s4 = Parse4(s3)
s5 = Parse5(s4)
self.parser = Parse6(s5)
def test_basic(self):
ast, _ = self.parser.parse_string('''
component A {}
assembly {
composition {
component A a;
component A b;
}
}
''')
self.assertLen(ast.items, 2)
A, assembly = ast.items
self.assertIsInstance(A, Component)
self.assertIsInstance(assembly, Assembly)
def test_assembly_combining_basic(self):
ast, _ = self.parser.parse_string('''
component A {}
assembly ass {
composition {
component A a;
}
}
assembly ass2 {
composition {
component A b;
}
}
''')
self.assertLen(ast.items, 2)
A, ass = ast.items
self.assertIsInstance(A, Component)
self.assertIsInstance(ass, Assembly)
self.assertEqual(ass.name, 'ass')
self.assertLen(ass.composition.instances, 2)
a, b = ass.composition.instances
self.assertIsInstance(a, Instance)
self.assertEqual(a.name, 'a')
self.assertIsInstance(b, Instance)
self.assertEqual(b.name, 'b')
def test_assembly_combining_with_groups(self):
ast, _ = self.parser.parse_string('''
component A {}
assembly ass {
composition {
group {
component A a;
}
}
}
assembly ass2 {
composition {
group foo {
component A b;
}
}
}
''')
self.assertLen(ast.items, 2)
A, ass = ast.items
self.assertIsInstance(A, Component)
self.assertIsInstance(ass, Assembly)
self.assertEqual(ass.name, 'ass')
self.assertLen(ass.composition.instances, 2)
a, b = ass.composition.instances
self.assertIsInstance(a, Instance)
self.assertEqual(a.name, 'a')
six.assertRegex(self, a.address_space, r'^unamed_group_.*$')
self.assertIsInstance(b, Instance)
self.assertEqual(b.name, 'b')
self.assertEqual(b.address_space, 'foo')
def test_assembly_line_numbering(self):
ast, _ = self.parser.parse_string('''
assembly A {
composition {}
}
assembly B {
composition {}
}
''')
self.assertLen(ast.items, 1)
A = ast.items[0]
self.assertIsInstance(A, Assembly)
self.assertEqual(A.lineno, 3)
if __name__ == '__main__':
unittest.main()
| 26.802632 | 73 | 0.556946 | 424 | 4,074 | 5.247642 | 0.334906 | 0.098876 | 0.050337 | 0.030562 | 0.426966 | 0.426966 | 0.38382 | 0.38382 | 0.357753 | 0.297528 | 0 | 0.023543 | 0.343152 | 4,074 | 151 | 74 | 26.980132 | 0.807922 | 0.089102 | 0 | 0.477064 | 0 | 0 | 0.293752 | 0 | 0 | 0 | 0 | 0 | 0.238532 | 1 | 0.045872 | false | 0 | 0.100917 | 0 | 0.155963 | 0.009174 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d6cb2fb072b36fb24704252a550de48cd21f375 | 551 | py | Python | buttercup/main.py | GrafeasGroup/buttercup | 77eef4cb7f34723b246b6b2f11af92a8d7c06cae | [
"MIT"
] | 6 | 2020-12-03T20:29:30.000Z | 2022-02-08T06:07:44.000Z | buttercup/main.py | GrafeasGroup/buttercup | 77eef4cb7f34723b246b6b2f11af92a8d7c06cae | [
"MIT"
] | 110 | 2020-07-29T07:58:28.000Z | 2022-02-08T14:54:23.000Z | buttercup/main.py | GrafeasGroup/buttercup | 77eef4cb7f34723b246b6b2f11af92a8d7c06cae | [
"MIT"
] | 2 | 2021-12-07T16:22:19.000Z | 2021-12-07T16:40:33.000Z | import sys
from buttercup import logger
from buttercup.bot import ButtercupBot
EXTENSIONS = [
# The config cog has to be first!
"config",
"admin",
"handlers",
"welcome",
"name_validator",
"find",
"search",
"stats",
"heatmap",
"history",
"ping",
"rules",
"leaderboard",
]
logger.configure_logging()
config_path = sys.argv[1] if len(sys.argv) > 1 else "config.toml"
bot = ButtercupBot(command_prefix="!", config_path=config_path, extensions=EXTENSIONS)
bot.run(bot.config["Discord"]["token"])
| 19.678571 | 86 | 0.649728 | 65 | 551 | 5.415385 | 0.646154 | 0.085227 | 0.045455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004545 | 0.201452 | 551 | 27 | 87 | 20.407407 | 0.795455 | 0.056261 | 0 | 0 | 0 | 0 | 0.218147 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.136364 | 0 | 0.136364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d6e1945cc59f4a6cbe5c9f8035ff97b4f932f73 | 6,245 | py | Python | posts_apis/tests/tests.py | islam-kamel/CODERUSH_APIS | ac9b21ab81da65b3e2a84549a9b6ea6665afe047 | [
"MIT"
] | 2 | 2021-12-13T05:11:22.000Z | 2021-12-15T04:39:34.000Z | posts_apis/tests/tests.py | islam-kamel/CODERUSH_APIS | ac9b21ab81da65b3e2a84549a9b6ea6665afe047 | [
"MIT"
] | 1 | 2021-12-15T03:04:11.000Z | 2021-12-16T04:08:57.000Z | posts_apis/tests/tests.py | islam-kamel/CODERUSH_APIS | ac9b21ab81da65b3e2a84549a9b6ea6665afe047 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2021 islam kamel
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APITestCase, APIClient
from posts_apis.models import Posts
class TestPostsAPIView(APITestCase):
def setUp(self):
db = get_user_model()
self.user = db.objects.create_user(
username='user', nickname='user', email='user@super.com',
password='user', phone='01066373279')
self.post = Posts.objects.create(
title='test post', content='test content',
image='0_m4ilBBUlxq00O-rI.png', create_by_id=self.user.pk)
self.post_details_url = reverse('post_details', kwargs={
'slug': 'test-post', 'pk': self.post.pk})
self.client = APIClient()
self.posts_list_url = reverse('posts-list')
self.token_url = reverse('token-obtain-pair')
def test_post_model(self):
self.assertEqual(self.post.get_absolute_url(), self.post_details_url)
self.assertEqual(str(self.post), 'test post')
with self.assertRaises(Exception):
self.post = Posts.objects.create(
title='test post', content='test content',
image='0_mI.png', create_by_id=self.user.pk).save()
def test_create_post(self):
self.assertEqual(self.post.title, 'test post')
self.assertEqual(self.post.content, 'test content')
self.assertEqual(self.post.create_by_id, self.user.pk)
self.assertEqual(self.post.slug, 'test-post')
self.assertTrue(self.post.published)
def test_posts_list_view(self):
response = self.client.get(self.posts_list_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()[0]['title'], 'test post')
self.assertEqual(response.json()[0]['content'], 'test content')
self.assertEqual(response.json()[0]['create_by'], self.user.pk)
self.assertEqual(response.json()[0]['slug'], 'test-post')
def test_post_details(self):
response = self.client.get(self.post_details_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['title'], 'test post')
self.assertEqual(response.json()['content'], 'test content')
self.assertEqual(response.json()['create_by'], self.user.pk)
self.assertEqual(response.json()['slug'], 'test-post')
def test_update_post(self):
response = self.client.get(self.post_details_url)
credentials = {
"username": 'user',
"password": 'user'
}
x = self.client.post(self.token_url, credentials)
# 'Authorization': 'Bearer '+x.json()['access']
t = 'Bearer ' + x.json()['access']
self.client.credentials(HTTP_AUTHORIZATION=t)
update_data = {
'title': 'Update',
'content': 'content',
'slug': '',
'create_by': response.json()['create_by']
}
update = self.client.put(
self.post_details_url, data=update_data)
self.post_details_url = reverse('post_details', kwargs={
'slug': 'update', 'pk': update.json()['id']})
response = self.client.get(self.post_details_url)
self.assertEqual(update.status_code, 200)
self.assertEqual(response.json()['title'], 'Update')
self.assertEqual(response.json()['content'], 'content')
self.assertEqual(response.json()['create_by'], self.user.pk)
self.assertEqual(response.json()['slug'], 'update')
def test_delete_post(self):
self.post = Posts.objects.create(
title='test Delete', content='test content',
create_by_id=self.user.pk)
self.post_details_url = reverse('post_details', kwargs={
'slug': 'test-delete', 'pk': self.post.pk})
credentials = {
"username": 'user',
"password": 'user'
}
x = self.client.post(self.token_url, credentials)
# 'Authorization': 'Bearer '+x.json()['access']
t = 'Bearer ' + x.json()['access']
self.client.credentials(HTTP_AUTHORIZATION=t)
response = self.client.delete(self.post_details_url)
self.assertEqual(response.status_code, 200)
def test_create_post_api_view(self):
credentials = {
"username": 'user',
"password": 'user'
}
x = self.client.post(self.token_url, credentials)
# 'Authorization': 'Bearer '+x.json()['access']
t = 'Bearer ' + x.json()['access']
self.client.credentials(HTTP_AUTHORIZATION=t)
data = {
'title': 'Test',
'content': 'Test',
'slug': '',
'create_by': self.user.pk
}
response = self.client.post(self.posts_list_url, data=data)
self.assertEqual(response.status_code, 201)
self.assertEqual(response.json()['title'], 'Test')
self.assertEqual(response.json()['content'], 'Test')
self.assertEqual(response.json()['create_by'], self.user.pk)
self.assertEqual(response.json()['slug'], 'test')
| 38.549383 | 80 | 0.63731 | 762 | 6,245 | 5.112861 | 0.233596 | 0.103953 | 0.11807 | 0.110883 | 0.527977 | 0.465092 | 0.430185 | 0.374743 | 0.36191 | 0.326489 | 0 | 0.008105 | 0.229464 | 6,245 | 161 | 81 | 38.78882 | 0.801538 | 0.194876 | 0 | 0.352381 | 0 | 0 | 0.132347 | 0.004398 | 0 | 0 | 0 | 0 | 0.27619 | 1 | 0.07619 | false | 0.038095 | 0.038095 | 0 | 0.12381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d6e5306af7ec10787ad04ff1c709c25e136dd97 | 1,750 | py | Python | bench2csv/bench2csv_sat.py | jansemat/verifire | 5e178cc87d634e6b495ad77d34d05592379f624e | [
"MIT"
] | null | null | null | bench2csv/bench2csv_sat.py | jansemat/verifire | 5e178cc87d634e6b495ad77d34d05592379f624e | [
"MIT"
] | null | null | null | bench2csv/bench2csv_sat.py | jansemat/verifire | 5e178cc87d634e6b495ad77d34d05592379f624e | [
"MIT"
] | null | null | null | import sys
import math
import random
# Function: main()
# --- Arguments: none; takes slice.csv as command line argument
# --- Return: none
# --- Purpose: takes slice.csv discard slice as argument, prints out SAT firewall policy (default-deny format)
#
def main():
# ensure proper arguments
if len(sys.argv) != 2:
print("Usage: python3 " + sys.argv[0] + " discard_slice.csv >> sat_fw.csv")
sys.exit(1)
# ensure slice.csv can be read
try:
with open(sys.argv[1], "r") as fd:
ds_rules = fd.read().splitlines()
except:
sys.exit("Couldn't open " + sys.argv[1])
# create accepting rules that are denied by slice
acc_rules, deny_rule = ds_rules[:-1], ds_rules[-1]
if len(acc_rules) == 0:
sys.exit("Can't create SAT firewall policy for discard slice with only a default-deny rule. Will always be UNSAT.")
ten_percent = math.ceil(0.1 * len(ds_rules))
idx = random.sample([i for i in range(len(acc_rules))], ten_percent)
for i in idx:
acc_rules[i] = ','.join(acc_rules[i].split(",")[:-1]) + ",deny"
if len(acc_rules) >= 4:
# add accepting rules to original slice rules, to create SAT firewall
acc_all_idx = random.sample([i for i in range(len(acc_rules))], 4)
acc_all_strs = []
for i in range(4):
acc_all_str = deny_rule.split(",")[:-1]
acc_all_str[0] = str(64*i) + ".0.0.0/2"
acc_all_str = ','.join(acc_all_str) + ",allow"
acc_rules.insert(random.randint(0,len(acc_rules)), acc_all_str)
else:
for i in range(4):
acc_all_str = deny_rule.split(",")[:-1]
acc_all_str[0] = str(64*i) + ".0.0.0/2"
acc_all_str = ','.join(acc_all_str) + ",allow"
acc_rules.append(acc_all_str)
# print SAT firewall policy
for rule in acc_rules: print(rule)
print(deny_rule)
if __name__ == "__main__":
main() | 29.661017 | 117 | 0.670286 | 301 | 1,750 | 3.714286 | 0.305648 | 0.064401 | 0.080501 | 0.039356 | 0.22898 | 0.22898 | 0.22898 | 0.22898 | 0.22898 | 0.22898 | 0 | 0.022712 | 0.169714 | 1,750 | 59 | 118 | 29.661017 | 0.746731 | 0.228 | 0 | 0.216216 | 0 | 0.027027 | 0.158091 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.081081 | 0 | 0.108108 | 0.081081 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d70a1af99e208123df9b6b1c5b50f5689c85198 | 1,079 | py | Python | Python/python-tutorials/algo/hailstone.py | zhongyangynag/code-study | 5410929554107a384a09d899c6fa3d16ed383d2b | [
"MIT"
] | null | null | null | Python/python-tutorials/algo/hailstone.py | zhongyangynag/code-study | 5410929554107a384a09d899c6fa3d16ed383d2b | [
"MIT"
] | null | null | null | Python/python-tutorials/algo/hailstone.py | zhongyangynag/code-study | 5410929554107a384a09d899c6fa3d16ed383d2b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# inst.eecs.berkeley.edu/~cs61a/sp12/hw/hw1.html
# Q4. Douglas Hofstadter’s Pulitzer-prize-winning book, Gödel, Escher, Bach, poses the following mathematical puzzle.
#
# Pick a positive number n
# If n is even, divide it by 2.
# If n is odd, multipy it by 3 and add 1.
# Continue this process until n is 1.
# The number n will travel up and down but eventually end at 1 (at least for all numbers that have ever been tried -- nobody has ever proved that the sequence will always terminate).
#
# The sequence of values of n is often called a Hailstone sequence, because hailstones also travel up and down in the atmosphere before falling to earth. Write a function that takes a single argument with formal parameter name n, prints out the hailstone sequence starting at n, and returns the number of steps in the sequence:
l=[]
def f(n):
l.append(n)
if n==1:
return
if n%2==0:
n=n/2
else:
n=n*3+1
f(n)
if __name__=='__main__':
import sys
if len(sys.argv)!=2:
print('Usage: %s num'% sys.argv[0])
sys.exit(1)
f(int(sys.argv[1]))
print(l)
| 34.806452 | 327 | 0.721038 | 201 | 1,079 | 3.830846 | 0.59204 | 0.015584 | 0.01039 | 0.038961 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023783 | 0.18165 | 1,079 | 30 | 328 | 35.966667 | 0.848245 | 0.762743 | 0 | 0 | 0 | 0 | 0.085714 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.176471 | 0.117647 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d7c7f7bfd4d9dc9e55cdc2091cf0b436100b69f | 43,323 | py | Python | train.py | xlDownxl/SfmLearner-Pytorch | e04becc8f16725d15cb603f4cf438a6059aa8adf | [
"MIT"
] | null | null | null | train.py | xlDownxl/SfmLearner-Pytorch | e04becc8f16725d15cb603f4cf438a6059aa8adf | [
"MIT"
] | null | null | null | train.py | xlDownxl/SfmLearner-Pytorch | e04becc8f16725d15cb603f4cf438a6059aa8adf | [
"MIT"
] | null | null | null | import argparse
import time
import csv
from collections import OrderedDict
import os
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import custom_transforms
import models
from utils import tensor2array, save_checkpoint, save_path_formatter, log_output_tensorboard
from path import Path
from loss_functions import smooth_loss, compute_smooth_loss, photometric_reconstruction_loss
from loss_functions import compute_depth_errors, compute_pose_errors
from inverse_warp import pose_vec2mat, inverse_rotate, pose_vec2mat_new
from models import PoseResNet
from logger import TermLogger, AverageMeter
from tensorboardX import SummaryWriter
import datetime
from imageio import imread, imsave
from skimage.transform import resize
import glob
from matplotlib import pyplot as plt
import matplotlib as mpl
mpl.use('Agg')
from datasets.pair_folders import PairFolder
parser = argparse.ArgumentParser(description='Structure from Motion Learner training on KITTI and CityScapes Dataset',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--dataset-format', default='sequential', metavar='STR',
help='dataset format, stacked: stacked frames (from original TensorFlow code) '
'sequential: sequential folders (easier to convert to with a non KITTI/Cityscape dataset')
parser.add_argument('--sequence-length', type=int, metavar='N', help='sequence length for training', default=3)
parser.add_argument('--rotation-mode', type=str, choices=['euler', 'quat'], default='euler',
help='rotation mode for PoseExpnet : euler (yaw,pitch,roll) or quaternion (last 3 coefficients)')
parser.add_argument('--padding-mode', type=str, choices=['zeros', 'border'], default='zeros',
help='padding mode for image warping : this is important for photometric differenciation when going outside target image.'
' zeros will null gradients outside target image.'
' border will only null gradients of the coordinate outside (x or y)')
parser.add_argument('--with-gt', action='store_true', help='use depth ground truth for validation. '
'You need to store it in npy 2D arrays see data/kitti_raw_loader.py for an example')
parser.add_argument('--with-pose', action='store_true', help='use pose ground truth for validation. '
'You need to store it in text files of 12 columns see data/kitti_raw_loader.py for an example '
'Note that for kitti, it is recommend to use odometry train set to test pose')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--epoch-size', default=0, type=int, metavar='N',
help='manual epoch size (will match dataset size if not set)')
parser.add_argument('-b', '--batch-size', default=4, type=int,
metavar='N', help='mini-batch size')
parser.add_argument('--lr', '--learning-rate', default=2e-4, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum for sgd, alpha parameter for adam')
parser.add_argument('--beta', default=0.999, type=float, metavar='M',
help='beta parameters for adam')
parser.add_argument('--weight-decay', '--wd', default=0, type=float,
metavar='W', help='weight decay')
parser.add_argument('--print-freq', default=10, type=int,
metavar='N', help='print frequency')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained-disp', dest='pretrained_disp', default=None, metavar='PATH',
help='path to pre-trained dispnet model')
parser.add_argument('--pretrained-exppose', dest='pretrained_exp_pose', default=None, metavar='PATH',
help='path to pre-trained Exp Pose net model')
parser.add_argument('--seed', default=0, type=int, help='seed for random functions, and network initialization')
parser.add_argument('--log-summary', default='progress_log_summary.csv', metavar='PATH',
help='csv where to save per-epoch train and valid stats')
parser.add_argument('--log-full', default='progress_log_full.csv', metavar='PATH',
help='csv where to save per-gradient descent train stats')
parser.add_argument('-p', '--photo-loss-weight', type=float, help='weight for photometric loss', metavar='W', default=1)
parser.add_argument('-s', '--smooth-loss-weight', type=float, help='weight for disparity smoothness loss', metavar='W', default=0.1)
parser.add_argument('-c', '--geometry-consistency-weight', type=float, help='weight for depth consistency loss', metavar='W', default=0.1)
parser.add_argument('--log-output', action='store_true', help='will log dispnet outputs and warped imgs at validation step')
parser.add_argument('--val-left-imgs',type=int,
help='use all left images from the target image during validation, by default enabled',
metavar='N',default=1)
parser.add_argument('-f', '--training-output-freq', type=int,
help='frequence for outputting dispnet outputs and warped imgs at training for all scales. '
'if 0, will not output',
metavar='N', default=0)
parser.add_argument('--use-edge-smooth', type=int,
help='frequence for outputting dispnet outputs and warped imgs at training for all scales. '
'if 0, will not output',
metavar='N', default=0)
parser.add_argument('--height', type=int,
help='number of images feed into depthnet',
metavar='N',)
parser.add_argument('--width', type=int,
help='number of images feed into depthnet',
metavar='N',)
parser.add_argument('--minimum-reprojection-error', type=int,
help='put 1 if minimum reprojetion error should be used',
metavar='N',default=0)
parser.add_argument('--use-add-aug', type=int,
help='if set to 1 uses random gauß, vertical flip and colorjitter',
metavar='N',default=0)
parser.add_argument('--disp-resnet-layers', type=int, help='resnet layer, set to 18 or 50', metavar='N',default=18)
parser.add_argument('--pretrained-dispnet', type=int, help='set to 1 to use imagenet pretrained dispnet', metavar='N',default=0)
parser.add_argument('--with-object-mask', type=int, help='make the posenet predict an object mask and object motion', metavar='N',default=0)
parser.add_argument('--compensate-rotation', type=int, help='is set use rotation prediction to unrotate image before feeding into depthnet. Should be on for kitti and off for crane', metavar='N',default=2)
parser.add_argument('--log-compensated', action='store_true',
help='log rotation compensated images to tensorboard during validation')
parser.add_argument('--ssim-weight', type=float, default=0.0,
help='the percentage to which the ssim should be accounted for in combinaiton with pixelwise photoemtric loss')
parser.add_argument('--basic-mode', type=int, default=0,
help='the percentage to which the ssim should be accounted for in combinaiton with pixelwise photoemtric loss')
parser.add_argument('--with-mask', type=int, default=0,
help='geometric inconsistency mask')
parser.add_argument('--with-automask', type=int, default=0,
help='mask against static pixels')
best_error = -1
n_iter = 0
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
compensate_rotation= True
def main():
def make_param_file(args,save_path):
args_dict = vars(args)
data_folder_name = str(Path(args_dict['data']).normpath().name)
folder_string = []
folder_string.append('{} epochs'.format(args_dict['epochs']))
keys_with_prefix = OrderedDict()
keys_with_prefix['epoch_size'] = 'epoch_size '
keys_with_prefix['sequence_length'] = 'sequence_length '
keys_with_prefix['rotation_mode'] = 'rot'
keys_with_prefix['padding_mode'] = 'padding '
keys_with_prefix['batch_size'] = 'batch_size '
keys_with_prefix['lr'] = 'lr '
keys_with_prefix['photo_loss_weight'] = 'photo_loss_weight '
keys_with_prefix['smooth_loss_weight'] = 'smooth_loss_weight '
keys_with_prefix['geometry_consistency_weight'] = 'geometry-consistency-weight '
keys_with_prefix['use_edge_smooth'] = 'use_edge_smooth '
keys_with_prefix['width'] = 'width '
keys_with_prefix['height'] = 'height '
keys_with_prefix['with_gt'] = 'with_gt '
keys_with_prefix['ssim_weight'] = 'ssim weight '
keys_with_prefix['minimum_reprojection_error'] = 'minimum reprojection error '
keys_with_prefix['use_add_aug'] = 'use additional augmentation '
keys_with_prefix['disp_resnet_layers'] = 'number of dispnet layers '
keys_with_prefix['pretrained_dispnet'] = 'using pretrained dispnet '
keys_with_prefix['log_compensated'] = 'log compensated source images in tensorboard '
keys_with_prefix['with_object_mask'] = 'use sfm net object mask and object pose prediction '
keys_with_prefix['with_mask'] = 'use geometric consistency mask '
keys_with_prefix['with_automask'] = 'use auto mask against static pixels '
for key, prefix in keys_with_prefix.items():
value = args_dict[key]
folder_string.append('{}{}'.format(prefix, value))
folder_string.append('{}{}'.format("compensate rotation through prediction ", compensate_rotation))
timestamp = datetime.datetime.now().strftime("%m-%d-%H:%M")
folder_string.append('timestamp '+timestamp)
commit_id = Path(os.popen("git log --pretty=format:'%h' -n 1").read())
folder_string.append('Git Commit ID '+commit_id)
commit_message = Path(os.popen("git log -1").read())
folder_string.append('Git Message '+commit_message)
params = '\n'.join(folder_string)
with open(save_path/'params.txt', 'w') as f:
f.write(params)
global best_error, n_iter, device, compensate_rotation
args = parser.parse_args()
if args.dataset_format == 'stacked':
from datasets.stacked_sequence_folders import SequenceFolder
elif args.dataset_format == 'sequential':
from datasets.sequence_folders import SequenceFolder
save_path = save_path_formatter(args, parser)
args.save_path = 'checkpoints'/save_path
if os.path.isdir(args.save_path):
print("dir already exist (probably bash mode), want to override?")
input1 = input("press enter to override, any other key to cancel")
if str(input1)!="":
exit()
print('=> will save everything to {}'.format(args.save_path))
if args.compensate_rotation==2:
if "crane" in args.data:
compensate_rotation=False
else:
compensate_rotation=True
else:
compensate_rotation=args.compensate_rotation
args.save_path.makedirs_p()
make_param_file(args,args.save_path)
torch.manual_seed(args.seed)
if args.evaluate:
args.epochs = 0
tb_writer = SummaryWriter(args.save_path)
# Data loading code
normalize = custom_transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
if args.use_add_aug:
train_transform = custom_transforms.Compose([
custom_transforms.RandomGauss(),
custom_transforms.ColorJitter(),
custom_transforms.RandomVerticalFlip(),
custom_transforms.RandomHorizontalFlip(),
custom_transforms.RandomScaleCrop(),
custom_transforms.ArrayToTensor(),
normalize
])
else:
train_transform = custom_transforms.Compose([
custom_transforms.RandomHorizontalFlip(),
custom_transforms.RandomScaleCrop(),
custom_transforms.ArrayToTensor(),
normalize
])
valid_transform = custom_transforms.Compose([custom_transforms.ArrayToTensor(), normalize])
print("=> fetching scenes in '{}'".format(args.data))
if True:
train_set = PairFolder(
args.data,
seed=args.seed,
train=True,
transform=train_transform
)
else:
train_set = SequenceFolder(
args.data,
height=args.height,
width=args.width,
transform=train_transform,
seed=args.seed,
train=True,
sequence_length=args.sequence_length
)
# if no Groundtruth is avalaible, Validation set is the same type as training set to measure photometric loss from warping
if args.with_gt:
if args.with_pose:
from datasets.validation_folders import ValidationSetWithPose
val_set = ValidationSetWithPose(
args.data,
height=args.height,
width=args.width,
seed=args.seed,
sequence_length=args.sequence_length,
transform=valid_transform)
else:
from datasets.validation_folders import ValidationSet
val_set = ValidationSet(
args.data,
transform=valid_transform
)
else:
val_set = SequenceFolder(
args.data,
transform=valid_transform,
seed=args.seed,
train=False,
sequence_length=args.sequence_length,
)
print('{} samples found in {} train scenes'.format(len(train_set), len(train_set.scenes)))
print('{} samples found in {} valid scenes'.format(len(val_set), len(val_set.scenes)))
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_set, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.epoch_size == 0:
args.epoch_size = len(train_loader)
print("=> creating model with {} layers and pretrained {}".format(args.disp_resnet_layers,args.pretrained_dispnet))
if args.basic_mode:
disp_net = models.DispResNet(input_images=1, output_channels=1,num_layers = args.disp_resnet_layers, pretrained=args.pretrained_dispnet).to(device)
else:
disp_net = models.DispResNet(input_images=2, output_channels=2,num_layers = args.disp_resnet_layers, pretrained=args.pretrained_dispnet).to(device)
pose_exp_net = models.PoseResNet(args.with_object_mask,num_frames_to_predict_for=1).to(device)
if args.pretrained_exp_pose:
print("=> using pre-trained weights for explainabilty and pose net")
weights = torch.load("/home/niclas/SfmLearner-Pytorch/ckpts/weights_3f/pose.pth")
weights2 = torch.load("/home/niclas/SfmLearner-Pytorch/ckpts/weights_3f/pose_encoder.pth")
weights.update(weights2)
pose_exp_net.load_state_dict(weights, strict=False)
else:
pose_exp_net.init_weights()
if args.pretrained_disp:
print("=> using pre-trained weights for Dispnet")
weights = torch.load(args.pretrained_disp)
disp_net.load_state_dict(weights['state_dict'])
else:
disp_net.init_weights()
cudnn.benchmark = True
disp_net = torch.nn.DataParallel(disp_net)
pose_exp_net = torch.nn.DataParallel(pose_exp_net)
print('=> setting adam solver')
if args.use_edge_smooth:
print('=> using edge aware smooth loss')
else:
print('=> using basic smooth loss')
optim_params = [
{'params': disp_net.parameters(), 'lr': args.lr},
{'params': pose_exp_net.parameters(), 'lr': args.lr}
]
optimizer = torch.optim.Adam(optim_params,
betas=(args.momentum, args.beta),
weight_decay=args.weight_decay)
with open(args.save_path/args.log_summary, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter='\t')
writer.writerow(['train_loss', 'validation_loss'])
with open(args.save_path/args.log_full, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter='\t')
writer.writerow(['train_loss', 'photo_loss', 'explainability_loss', 'smooth_loss'])
logger = None#TermLogger(n_epochs=args.epochs, train_size=min(len(train_loader), args.epoch_size), valid_size=len(val_loader))
if args.pretrained_disp or args.evaluate:
#logger.reset_valid_bar()
if args.with_gt and args.with_pose:
errors, error_names = validate_with_gt_pose(args, val_loader, disp_net, pose_exp_net, 0, logger, tb_writer)
elif args.with_gt:
errors, error_names = validate_with_gt(args, val_loader, disp_net, pose_exp_net, 0, logger, tb_writer)
else:
errors, error_names = validate_without_gt(args, val_loader, disp_net, pose_exp_net, 0, logger, tb_writer)
for error, name in zip(errors, error_names):
tb_writer.add_scalar(name, error, 0)
error_string = ', '.join('{} : {:.3f}'.format(name, error) for name, error in zip(error_names[2:9], errors[2:9]))
for epoch in range(args.epochs):
print(epoch)
start = time.time()
train_loss = train(args, train_loader, disp_net, pose_exp_net, optimizer, args.epoch_size, logger, tb_writer, epoch)
if args.with_gt and args.with_pose:
errors, error_names = validate_with_gt_pose(args, val_loader, disp_net, pose_exp_net, epoch, logger, tb_writer)
elif args.with_gt:
errors, error_names = validate_with_gt(args, val_loader, disp_net,pose_exp_net, epoch, logger, tb_writer)
else:
errors, error_names = validate_without_gt(args, val_loader, disp_net, pose_exp_net, epoch, logger, tb_writer)
error_string = ', '.join('{} : {:.3f}'.format(name, error) for name, error in zip(error_names, errors))
#logger.valid_writer.write(' * Avg {}'.format(error_string))
for error, name in zip(errors, error_names):
tb_writer.add_scalar(name, error, epoch)
if "crane" in args.data and not args.basic_mode:
if epoch %3==0:
validate_vslam(args, disp_net, epoch, tb_writer)
# Up to you to chose the most relevant error to measure your model's performance, careful some measures are to maximize (such as a1,a2,a3)
decisive_error = errors[1]
if best_error < 0:
best_error = decisive_error
# remember lowest error and save checkpoint
is_best = decisive_error < best_error
best_error = min(best_error, decisive_error)
save_checkpoint(
args.save_path, {
'epoch': epoch + 1,
'state_dict': disp_net.module.state_dict()
}, {
'epoch': epoch + 1,
'state_dict': pose_exp_net.module.state_dict()
},
is_best)
with open(args.save_path/args.log_summary, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter='\t')
writer.writerow([train_loss, decisive_error])
end = time.time()
print("epoch took {} seconds".format(end-start))
def train(args, train_loader, disp_net, pose_exp_net, optimizer, epoch_size, logger, tb_writer, epoch):
global n_iter, device
losses = AverageMeter(precision=4)
w1, w3, w4 = args.photo_loss_weight, args.smooth_loss_weight, args.geometry_consistency_weight
batches_to_log = list(np.linspace(0, len(train_loader), 20).astype(int))
# switch to train mode
disp_net.train()
pose_exp_net.train()
for i, (tgt_img, ref_imgs, intrinsics, intrinsics_inv) in enumerate(train_loader):
log_losses = i > 0 and n_iter % args.print_freq == 0
log_output = args.training_output_freq > 0 and n_iter % args.training_output_freq == 0
tgt_img = tgt_img.to(device)
ref_imgs = [img.to(device) for img in ref_imgs]
intrinsics = intrinsics.to(device)
poses, _ = pose_exp_net(tgt_img, ref_imgs)
pose_matrices = pose_vec2mat_new(poses, args.rotation_mode)
if args.basic_mode:
tgt_depth, ref_depths = compute_depth(disp_net, tgt_img, ref_imgs)
else:
refs_compensated =[]
if compensate_rotation:
for k in range(len(ref_imgs)):
ref = ref_imgs[k]
pose = pose_matrices[:,k]
inv_pose= pose[:,:,:-1].inverse()
ref_compensated = inverse_rotate(ref, inv_pose, intrinsics)
refs_compensated.append(ref_compensated)
else:
refs_compensated=ref_imgs
tgt_disparities=[]
ref_disparities=[]
for ref in refs_compensated:
depth_input = torch.cat((ref,tgt_img),1)
depth_output =disp_net(depth_input)
tgt_disparities.append(depth_output[1])
ref_disparities.append(depth_output[0])
avg_disparities=[]
for size in range(len(tgt_disparities[0])):
sized_images = []
for l in range(1): #hier sequence length
sized_images.append(tgt_disparities[l][size])
avg = torch.mean(torch.stack(sized_images),dim=0)
avg_disparities.append(avg)
tgt_disparities=avg_disparities
tgt_depth = [1/disp for disp in tgt_disparities]
ref_depths=[]
for o in range(len(ref_disparities)):
ref_depths.append([1/disp for disp in ref_disparities[o]])
loss_1, loss_4, warped, diff =photometric_reconstruction_loss(tgt_img, ref_imgs, intrinsics, tgt_depth, ref_depths, poses,args.ssim_weight,args.padding_mode, args.with_mask, args.with_automask)
loss_3 = compute_smooth_loss(tgt_depth,tgt_img,args.use_edge_smooth)
loss = w1*loss_1 + w3*loss_3 + w4*loss_4
if log_losses:
tb_writer.add_scalar('photometric_error', loss_1.item(), n_iter)
tb_writer.add_scalar('disparity_smoothness_loss', loss_3.item(), n_iter)
tb_writer.add_scalar('geometric_consistency_loss', loss_4.item(), n_iter)
tb_writer.add_scalar('total_loss', loss.item(), n_iter)
if args.log_output and i in batches_to_log and not args.basic_mode: # log first output of wanted batches
index = batches_to_log.index(i)
if epoch == 0:
tb_writer.add_image('train Target Image/{}'.format(index), tensor2array(tgt_img[0]), 0)
for j, ref in enumerate(ref_imgs):
tb_writer.add_image('train Source Image {}/{}'.format(j,index), tensor2array(ref[0]), 1)
if not args.log_compensated:
refs_compensated=None
log_output_tensorboard(tb_writer, 'train', index, epoch, tgt_depth[0], tgt_disparities[0], [warped[0]], tgt_disparities, None, refs_compensated)
# record loss and EPE
losses.update(loss.item(), args.batch_size)
# compute gradient and do Adam step
optimizer.zero_grad()
loss.backward()
optimizer.step()
with open(args.save_path/args.log_full, 'a') as csvfile:
writer = csv.writer(csvfile, delimiter='\t')
writer.writerow([loss.item(), loss_1.item(), loss_3.item(), loss_4.item()])
if i >= epoch_size - 1:
break
n_iter += 1
return losses.avg[0]
@torch.no_grad()
def validate_without_gt(args, val_loader, disp_net, pose_exp_net, epoch, logger, tb_writer, sample_nb_to_log=40):
global device, compensate_rotation
losses = AverageMeter(i=4, precision=4)
log_outputs = sample_nb_to_log > 0
# Output the logs throughout the whole dataset
batches_to_log = list(np.linspace(0, len(val_loader), sample_nb_to_log).astype(int))
w1, w3, w4 = args.photo_loss_weight, args.smooth_loss_weight, args.geometry_consistency_weight
#log_poses = np.zeros(((len(val_loader)-1) * args.batch_size * (args.sequence_length-1), 6))
disp_values = np.zeros(((len(val_loader)-1) * args.batch_size * 3))
# switch to evaluate mode
disp_net.eval()
pose_exp_net.eval()
for i, (tgt_img, ref_imgs, intrinsics, intrinsics_inv) in enumerate(val_loader):
if i in batches_to_log:
tgt_img = tgt_img.to(device)
ref_imgs = [img.to(device) for img in ref_imgs]
intrinsics = intrinsics.to(device)
intrinsics_inv = intrinsics_inv.to(device)
poses, object_masks = pose_exp_net(tgt_img, ref_imgs)
pose_matrices = pose_vec2mat_new(poses, args.rotation_mode)
if args.basic_mode:
tgt_depth, ref_depths, disp, _ = compute_depth2(disp_net, tgt_img, ref_imgs)
tgt_disparities=None
disp_uncertainty=None
refs_compensated=None
else:
refs_compensated =[]
if compensate_rotation:
for k in range(len(ref_imgs)):
ref = ref_imgs[k]
pose = pose_matrices[:,k]
inv_pose= pose[:,:,:-1].inverse()
ref_compensated = inverse_rotate(ref, inv_pose, intrinsics)
refs_compensated.append(ref_compensated)
else:
refs_compensated=ref_imgs
tgt_disparities=[]
ref_disparities=[]
for ref in refs_compensated: ##put only the left to the average target image depth
depth_input = torch.cat((ref,tgt_img),1)
depth_output =disp_net(depth_input)
tgt_disparities.append(depth_output[1])
ref_disparities.append(depth_output[0])
if args.val_left_imgs:
tgt_disparities = tgt_disparities[:len(ref_imgs)//2]
disp = torch.mean(torch.stack(tgt_disparities),dim=0)
if len(tgt_disparities)>1:
disp_uncertainty = torch.var(torch.stack(tgt_disparities),dim=0)
else:
disp_uncertainty = None
tgt_depth = 1/disp
ref_depths=[]
for o in range(len(ref_disparities)):
ref_depths.append([1/ref_disparities[o]])
loss_1, loss_4, warped, diff =photometric_reconstruction_loss(tgt_img, ref_imgs, intrinsics, tgt_depth, ref_depths, poses,args.ssim_weight,args.padding_mode, args.with_mask, args.with_automask )
loss_3 = compute_smooth_loss([tgt_depth],tgt_img,args.use_edge_smooth).item()
if log_outputs and i in batches_to_log: # log first output of wanted batches
index = batches_to_log.index(i)
if epoch == 0:
tb_writer.add_image('Target Image/{}'.format(index), tensor2array(tgt_img[0]), 0)
for j, ref in enumerate(ref_imgs):
tb_writer.add_image('Source Image {}/{}'.format(j,index), tensor2array(ref[0]), 1)
if not args.log_compensated:
refs_compensated=None
log_output_tensorboard(tb_writer, 'val', index, epoch, 1./disp, disp, warped, tgt_disparities, disp_uncertainty, refs_compensated)
if i in batches_to_log:
if log_outputs and i < len(val_loader)-1:
step = args.batch_size*(args.sequence_length-1)
#log_poses[i * step:(i+1) * step] = poses.cpu().view(-1, 6).numpy()
step = args.batch_size * 3
disp_unraveled = disp.cpu().view(args.batch_size, -1)
disp_values[i * step:(i+1) * step] = torch.cat([disp_unraveled.min(-1)[0],
disp_unraveled.median(-1)[0],
disp_unraveled.max(-1)[0]]).numpy()
loss = w1*loss_1 + w3*loss_3 + w4*loss_4
losses.update([loss, loss_1, loss_3, loss_4])
if log_outputs:
prefix = 'valid poses'
coeffs_names = ['tx', 'ty', 'tz']
if args.rotation_mode == 'euler':
coeffs_names.extend(['rx', 'ry', 'rz'])
elif args.rotation_mode == 'quat':
coeffs_names.extend(['qx', 'qy', 'qz'])
for i in range(poses.shape[1]):
tb_writer.add_histogram('{} {}'.format(prefix, coeffs_names[i]), poses[:, i], epoch)
tb_writer.add_histogram('disp_values', disp_values, epoch)
return losses.avg, ['Validation Total loss', 'Validation Photo loss', 'Validation Smooth loss', 'Validation Geo loss']
@torch.no_grad()
def validate_with_gt_pose(args, val_loader, disp_net, pose_exp_net, epoch, logger, tb_writer, sample_nb_to_log=3):
global device, compensate_rotation
depth_error_names = ['abs_diff', 'abs_rel', 'sq_rel', 'a1', 'a2', 'a3']
depth_errors = AverageMeter(i=len(depth_error_names), precision=4)
pose_error_names = ['ATE', 'RTE']
pose_errors = AverageMeter(i=2, precision=4)
log_outputs = sample_nb_to_log > 0
# Output the logs throughout the whole dataset
batches_to_log = list(np.linspace(0, len(val_loader), sample_nb_to_log).astype(int))
poses_values = np.zeros(((len(val_loader)-1) * args.batch_size * (args.sequence_length-1), 6))
disp_values = np.zeros(((len(val_loader)-1) * args.batch_size * 3))
disp_net.eval()
pose_exp_net.eval()
for i, (tgt_img, ref_imgs, gt_depth, gt_poses, intrinsics) in enumerate(val_loader):
tgt_img = tgt_img.to(device)
gt_depth = gt_depth.to(device)
gt_poses = gt_poses.to(device)
intrinsics = intrinsics.to(device)
ref_imgs = [img.to(device) for img in ref_imgs]
b = tgt_img.shape[0]
output_poses,_ = pose_exp_net(tgt_img, ref_imgs)
pose_matrices = pose_vec2mat_new(output_poses, args.rotation_mode)
if args.basic_mode:
output_depth, _ = compute_depth(disp_net, tgt_img, ref_imgs)
else:
refs_compensated =[]
if compensate_rotation:
for k in range(len(ref_imgs)):
ref = ref_imgs[k]
pose = pose_matrices[:,k]
inv_pose= pose[:,:,:-1].inverse()
ref_compensated = inverse_rotate(ref, inv_pose, intrinsics)
refs_compensated.append(ref_compensated)
else:
refs_compensated=ref_imgs
tgt_disparities,ref_disparities =[], []
for ref in refs_compensated:
depth_input = torch.cat((ref,tgt_img),1)
depth_output =disp_net(depth_input)
tgt_disparities.append(depth_output[1])
ref_disparities.append(depth_output[0])
if args.val_left_imgs:
tgt_disparities = tgt_disparities[:len(ref_imgs)//2]
output_disp = torch.mean(torch.stack(tgt_disparities),dim=0)
output_depth = 1/output_disp[:, 0]
reordered_output_poses = torch.cat([output_poses[:, :gt_poses.shape[1]//2],
torch.zeros(b, 1, 6).to(output_poses),
output_poses[:, gt_poses.shape[1]//2:]], dim=1)
# pose_vec2mat only takes B, 6 tensors, so we simulate a batch dimension of B * seq_length
unravelled_poses = reordered_output_poses.reshape(-1, 6)
unravelled_matrices = pose_vec2mat(unravelled_poses, rotation_mode=args.rotation_mode)
inv_transform_matrices = unravelled_matrices.reshape(b, -1, 3, 4)
rot_matrices = inv_transform_matrices[..., :3].transpose(-2, -1)
tr_vectors = -rot_matrices @ inv_transform_matrices[..., -1:]
transform_matrices = torch.cat([rot_matrices, tr_vectors], axis=-1)
first_inv_transform = inv_transform_matrices.reshape(b, -1, 3, 4)[:, :1]
final_poses = first_inv_transform[..., :3] @ transform_matrices
final_poses[..., -1:] += first_inv_transform[..., -1:]
final_poses = final_poses.reshape(b, -1, 3, 4)
if log_outputs and i in batches_to_log:
index = batches_to_log.index(i)
if epoch == 0:
tb_writer.add_image('Target Image/{}'.format(index), tensor2array(tgt_img[0]), 0)
for j, ref in enumerate(ref_imgs):
tb_writer.add_image('Source Image {}/{}'.format(j,index), tensor2array(ref[0]), 0)
depth_to_show = gt_depth[0]
tb_writer.add_image('Target Depth Label/{}'.format(index),
tensor2array(depth_to_show, max_value=None),
epoch)
depth_to_show[depth_to_show == 0] = 1000
disp_to_show = (1/depth_to_show).clamp(0, 10)
tb_writer.add_image('Target Disp Label/{}'.format(index),
tensor2array(disp_to_show, max_value=None, colormap='magma'),
epoch)
tb_writer.add_image('Disp Prediction/{}'.format(index),
tensor2array(output_disp[0], max_value=None, colormap='magma'),
epoch)
tb_writer.add_image('Depth Prediction/{}'.format(index),
tensor2array(output_depth[0], max_value=None),
epoch)
for j, ref_disp in enumerate(ref_disparities):
tb_writer.add_image('Disp Prediction Source Nr {}/{}'.format(j,index),
tensor2array(ref_disp[0], max_value=None, colormap='magma'),
epoch)
if log_outputs and i < len(val_loader)-1:
step = args.batch_size*(args.sequence_length-1)
poses_values[i * step:(i+1) * step] = output_poses.cpu().view(-1, 6).numpy()
step = args.batch_size * 3
disp_unraveled = output_disp.cpu().view(args.batch_size, -1)
disp_values[i * step:(i+1) * step] = torch.cat([disp_unraveled.min(-1)[0],
disp_unraveled.median(-1)[0],
disp_unraveled.max(-1)[0]]).numpy()
depth_errors.update(compute_depth_errors(gt_depth, output_depth))
pose_errors.update(compute_pose_errors(gt_poses, final_poses))
if log_outputs:
prefix = 'valid poses'
coeffs_names = ['tx', 'ty', 'tz']
if args.rotation_mode == 'euler':
coeffs_names.extend(['rx', 'ry', 'rz'])
elif args.rotation_mode == 'quat':
coeffs_names.extend(['qx', 'qy', 'qz'])
for i in range(poses_values.shape[1]):
tb_writer.add_histogram('{} {}'.format(prefix, coeffs_names[i]), poses_values[:, i], epoch)
tb_writer.add_histogram('disp_values', disp_values, epoch)
#logger.valid_bar.update(len(val_loader))
return depth_errors.avg + pose_errors.avg, depth_error_names + pose_error_names
@torch.no_grad()
def validate_vslam(args, disp_net, epoch, tb_writer):
test_files = glob.glob(args.data+"/vslam_0/v_slam/*.jpg") #os.listdir(args.data+"/vslam_0/v_slam/")
#print('{} files to test'.format(len(test_files)))
#os.makedirs(args.save_path/str('vslam/depth/'+str(epoch)))
#os.makedirs(args.save_path/str('vslam/disp/'+str(epoch)))
previous_img = test_files[0]
previous_img = imread(previous_img)
h,w,_ = previous_img.shape
if (h != args.height or w != args.width):
previous_img = resize(previous_img, (args.height, args.width))
previous_img = np.transpose(previous_img, (2, 0, 1))
previous_img = torch.from_numpy(previous_img.astype(np.float32)).unsqueeze(0)
previous_img = ((previous_img - 0.5)/0.5).to(device)
for i in range(1,len(test_files)):
current_img = test_files[i]
current_img = imread(current_img)
h,w,_ = current_img.shape
if (h != args.height or w != args.width):
current_img = resize(current_img, (args.height, args.width))
current_img = np.transpose(current_img, (2, 0, 1))
current_img = torch.from_numpy(current_img.astype(np.float32)).unsqueeze(0)
current_img = ((current_img - 0.5)/0.5).to(device)
if i%2==0:
output = disp_net(torch.cat((previous_img,current_img),1))[0]
tb_writer.add_image('vslam/{}'.format(i),
tensor2array(output, max_value=None, colormap='magma'),
epoch)
depth = 1/output
tb_writer.add_image('vslam/{}'.format(i),
tensor2array(depth, max_value=None),
epoch)
previous_img=current_img
@torch.no_grad()
def validate_with_gt(args, val_loader, disp_net, pose_exp_net, epoch, logger, tb_writer, sample_nb_to_log=40):
global device
batch_time = AverageMeter()
error_names = ['abs_diff', 'abs_rel', 'sq_rel', 'a1', 'a2', 'a3']
errors = AverageMeter(i=len(error_names))
log_outputs = sample_nb_to_log > 0
# Output the logs throughout the whole dataset
batches_to_log = list(np.linspace(0, len(val_loader)-1, sample_nb_to_log).astype(int))
# switch to evaluate mode
disp_net.eval()
end = time.time()
#logger.valid_bar.update(0)
for i, (tgt_img, depth, ref_imgs, intrinsics) in enumerate(val_loader):
tgt_img = tgt_img.to(device)
ref_imgs = [img.to(device) for img in ref_imgs]
depth = depth.to(device)
intrinsics=intrinsics.to(device)
if args.basic_mode:
# compute output
#output_disp = disp_net(tgt_img)
#output_depth = 1/output_disp[:, 0]
output_depth, _ ,output_disp, ref_disparities= compute_depth2(disp_net, tgt_img, ref_imgs)
output_depth =output_depth[:,0]
else:
poses,_ = pose_exp_net(tgt_img, ref_imgs)
pose_matrices = pose_vec2mat_new(poses, args.rotation_mode)
refs_compensated =[]
if compensate_rotation:
for k in range(len(ref_imgs)):
ref = ref_imgs[k]
pose = pose_matrices[:,k]
inv_pose= pose[:,:,:-1].inverse()
ref_compensated = inverse_rotate(ref, inv_pose, intrinsics)
refs_compensated.append(ref_compensated)
else:
refs_compensated=ref_imgs
tgt_disparities=[]
ref_disparities=[]
for ref in refs_compensated:
depth_input = torch.cat((ref,tgt_img),1)
depth_output =disp_net(depth_input)
tgt_disparities.append(depth_output[1])
ref_disparities.append(depth_output[0])
#if args.val_left_imgs:
# tgt_disparities = tgt_disparities[:len(ref_imgs)//2]
output_disp = torch.mean(torch.stack(tgt_disparities),dim=0)
output_depth = 1/output_disp[:, 0]
if log_outputs and i in batches_to_log:
index = batches_to_log.index(i)
if epoch == 0:
tb_writer.add_image('Target Image//{}'.format(index), tensor2array(tgt_img[0]), 0)
for j, ref in enumerate(ref_imgs):
tb_writer.add_image('Source Image {}/{}'.format(j,index), tensor2array(ref[0]), 0)
depth_to_show = depth[0]
tb_writer.add_image('Target Depth Label/{}'.format(index),
tensor2array(depth_to_show, max_value=None),
epoch)
depth_to_show[depth_to_show == 0] = 1000
disp_to_show = (1/depth_to_show).clamp(0, 10)
tb_writer.add_image('Target Disp Label/{}'.format(index),
tensor2array(disp_to_show, max_value=None, colormap='magma'),
epoch)
tb_writer.add_image('Disp Prediction/{}'.format(index),
tensor2array(output_disp[0], max_value=None, colormap='magma'),
epoch)
tb_writer.add_image('Depth Prediction/{}'.format(index),
tensor2array(output_depth[0], max_value=None),
epoch)
for j, ref_disp in enumerate(ref_disparities):
tb_writer.add_image('Disp Prediction Source Nr {}/{}'.format(j,index),
tensor2array(ref_disp[0], max_value=None, colormap='magma'),
epoch)
if "kitti" in args.data:
crop=True
else:
crop=False
if depth.nelement() != output_depth.nelement():
b, h, w = depth.size()
output_depth = torch.nn.functional.interpolate(output_depth.unsqueeze(1), [h, w]).squeeze(1)
errors.update(compute_depth_errors(depth, output_depth,crop))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
return errors.avg, error_names
def compute_depth(disp_net, tgt_img, ref_imgs):
tgt_depth = [1/disp for disp in disp_net(tgt_img)]
ref_depths = []
for ref_img in ref_imgs:
ref_depth = [1/disp for disp in disp_net(ref_img)]
ref_depths.append(ref_depth)
return tgt_depth, ref_depths
def compute_depth2(disp_net, tgt_img, ref_imgs):
tgt_disp=disp_net(tgt_img)
tgt_depth = 1/tgt_disp
ref_depths = []
ref_disps=[]
for ref_img in ref_imgs:
ref_disp = disp_net(ref_img)
ref_depth = 1/ref_disp
ref_disps.append(ref_disp)
ref_depths.append([ref_depth])
return tgt_depth, ref_depths, tgt_disp, ref_disps
if __name__ == '__main__':
main()
| 48.083241 | 206 | 0.616116 | 5,457 | 43,323 | 4.651274 | 0.111233 | 0.014498 | 0.02813 | 0.012607 | 0.54385 | 0.491845 | 0.463439 | 0.430423 | 0.407494 | 0.39004 | 0 | 0.01248 | 0.273134 | 43,323 | 900 | 207 | 48.136667 | 0.793554 | 0.03707 | 0 | 0.414634 | 0 | 0.001355 | 0.142823 | 0.011204 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012195 | false | 0 | 0.04336 | 0 | 0.063686 | 0.02168 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d7c8351f2e5f82917fe01cfc0e7ffa0119aecd0 | 13,038 | py | Python | discrete_opt/optimization.py | claudiofahey/discrete_opt | 0bd794e1f3d8552653e49df748fc90f97e6f2af8 | [
"MIT"
] | null | null | null | discrete_opt/optimization.py | claudiofahey/discrete_opt | 0bd794e1f3d8552653e49df748fc90f97e6f2af8 | [
"MIT"
] | null | null | null | discrete_opt/optimization.py | claudiofahey/discrete_opt | 0bd794e1f3d8552653e49df748fc90f97e6f2af8 | [
"MIT"
] | null | null | null | import logging
import itertools
import numpy as np
from scipy.optimize import OptimizeResult, minimize_scalar
import scipy.constants
from .util import find_vertex_x_of_positive_parabola
def scalar_discrete_gap_filling_minimizer(
fun, bracket, args=(), tol=1.0, maxfev=None, maxiter=100, callback=None, verbose=False,
parabolic_method=False, golden_section_method=False, best_x_aggregator=None, **options):
"""Find a local minimum of a scalar function of a single integer variable.
The domain of the function is all integers between, and including, the bracket.
The function may have flat spots where f(a) == f(b) for a != b and this method will
attempt to search around and within the flat spots.
The function must have exactly one local minimum in the bracket.
This method maintains a left and right bracket, where the function value is greater than the best known minimum.
It also maintains a list of best x values, and the function values at all of these x values equals the best known
minimum.
At each iteration, it finds the largest gap in these x values (including the brackets) and selects
the point in the center of the largest gap.
It will then either adjust the bracket or add to the list of best x values.
The method terminates when the largest gap is less than or equal to tol.
Parameters
----------
bracket : array_like
A tuple of the bounds of the function (x_min, x_max).
Optionally, a 3-tuple can be specified and the middle point will be the initial best point.
tol : float
The method terminates when the largest gap is less than or equal to this value.
Returns
-------
OptimizeResult
The result of the minimization.
"""
# bestx is a list.
# besty is a scalar and equals f(x) for all x in bestx.
funcalls = 0
# print('parabolic_method=%s,golden_section_method=%s' % (parabolic_method,golden_section_method))
if len(bracket) == 2:
bracket_left_x = bracket[0]
bracket_right_x = bracket[1]
bestx = [np.round(np.mean([bracket_left_x, bracket_right_x]))]
a = bracket_left_x
b = bracket_right_x
if golden_section_method:
bestx = [np.round(b - (b - a) / scipy.constants.golden)]
else:
bestx = [np.round(np.mean([a, b]))]
elif len(bracket) == 3:
bracket_left_x = bracket[0]
bracket_right_x = bracket[2]
bestx = [bracket[1]]
else:
raise ValueError('Invalid bracket')
assert isinstance(bestx, list)
if not (bracket_left_x <= bestx[0] <= bracket_right_x):
raise ValueError('Invalid bracket')
if best_x_aggregator is None:
best_x_aggregator = lambda x: x[int((len(x)-1)/2)]
# Evaluate function at bestx.
besty = fun(bestx[0])
funcalls += 1
assert np.isscalar(besty)
# Evaluate function at brackets to determine if they are better than the initial bestx.
bracket_left_y = fun(bracket_left_x, *args)
bracket_right_y = fun(bracket_right_x, *args)
funcalls += 2
if bracket_left_y < besty:
bestx = [bracket_left_x]
besty = bracket_left_y
if bracket_right_y < besty:
bestx = [bracket_right_x]
besty = bracket_right_y
if verbose: logging.info('bracket=(%f,%s,%f); besty=%f' % (bracket_left_x, str(bestx), bracket_right_x, besty))
niter = 0
while niter < maxiter:
niter += 1
X = np.array([bracket_left_x] + bestx + [bracket_right_x])
Y = np.array([bracket_left_y] + [besty] * len(bestx) + [bracket_right_y])
# if verbose:
# logging.info('X=%s' % str(X))
# logging.info('Y=%s' % str(Y))
testx = None
testx_index = None
#
# Step 1: Determine the value of x to test next (testx).
#
# If we have exactly one bestx, then fit a parabola to the 3 points and test the vertex.
if parabolic_method and len(bestx) == 1:
if verbose: logging.info('Attempting parabolic method')
try:
# Attempt to fit a parabola to the 3 points and find the vertex.
testx = find_vertex_x_of_positive_parabola(X, Y)
if verbose: logging.info('Parabolic method returned testx=%f' % testx)
testx = np.round(testx)
if testx <= bracket_left_x or testx >= bracket_right_x or testx == bestx[0]:
testx = None
elif testx <= bestx[0]:
testx_index = 0
else:
testx_index = 1
except:
# This will happen if a parabola can't be fit through the 3 points.
# Ignore error and use the gap method below.
testx = None
if testx is None:
# Measure gaps in brackets and bestx and find the largest one.
if verbose: logging.info('Attempting gap method')
gaps = np.diff(X)
testx_index = np.argmax(gaps)
gapsize = gaps[testx_index]
if gapsize <= tol:
if verbose: logging.info('Achieved gap size tol')
break
# Pick a point between the largest gap.
a = X[testx_index]
b = X[testx_index + 1]
if golden_section_method:
golden_distance = (b - a) / scipy.constants.golden
if bool(np.random.randint(low=0, high=2)):
testx = np.round(b - golden_distance)
else:
testx = np.round(a + golden_distance)
else:
testx = np.round(np.mean([a, b]))
if verbose: logging.info('gapsize=%f, len(bestx)=%d, testx=%f' % (gapsize, len(bestx), testx))
assert(testx is not None)
assert(testx_index is not None)
assert(bracket_left_x <= testx <= bracket_right_x)
#
# Step 2: Evaluate function at testx.
#
testy = fun(testx, *args)
funcalls += 1
#
# Step 3: Update bracket, etc. based on function value testy at testx.
#
add_to_bestx = False
if testy < besty:
# Found a point better than all others so far.
# The new bracket will be the points to the immediate left and right of the test point.
bestx = [testx]
besty = testy
bracket_left_x = X[testx_index]
bracket_left_y = Y[testx_index]
bracket_right_x = X[testx_index + 1]
bracket_right_y = Y[testx_index + 1]
elif testy > besty:
# Point is worse than best. Reduce bracket.
if testx_index == 0:
# Test point was adjacent to left bracket.
bracket_left_x = testx
bracket_left_y = testy
elif testx_index == len(X) - 2:
# Test point was adjacent to right bracket.
bracket_right_x = testx
bracket_right_y = testy
else:
# Test point was inside the set of bestx points but is worse than besty.
# This indicates more than one local minima or a round off error.
# We will assume a round off error and handle it as if it had the same besty.
add_to_bestx = True
else:
# Point is same as best. Add it to the bestx list.
add_to_bestx = True
if add_to_bestx:
bestx = sorted(bestx + [testx])
if verbose: logging.info('bracket=(%f,%s,%f); besty=%f' % (bracket_left_x, str(bestx), bracket_right_x, besty))
if callback is not None:
callback(bestx)
if maxfev is not None and funcalls >= maxfev:
break
# Return the x that is in the median of bestx.
bestx = best_x_aggregator(np.array(bestx))
return OptimizeResult(fun=besty, x=bestx, nit=niter, nfev=funcalls, success=(niter > 1))
def multivariate_discrete_gap_filling_minimizer(
fun, x0, bounds, args=(), tol=1.0, maxfev=None, maxiter=2, callback=None, verbose=False,
scalar_options={}, axes=None, **options):
"""It is assumed that there is exactly one local minimum in the domain.
For each dimension, the domain of the function consists of all integers between, and including, the bounds.
The function may have flat spots where f(a) == f(b) for a != b and this method will
attempt to search around and within the flat spots.
This multivariate method uses `scalar_gap_filling_minimizer` repeatedly along each dimension
for a fixed number of iterations. There is currently no other stopping criteria.
Parameters
----------
fun: Function of a single variable of a list-type.
x0 : array_like
Initial guess.
bounds
List-type of (min, max) pairs for each element in x, defining the bounds in that dimension.
tol
See `scalar_discrete_gap_filling_minimizer`.
axes : array_like
Number of columns must equal length of x0.
The rows will determine the set of axes that this function will optimize along.
Leave as None to use unit axes along each dimension.
Returns
-------
OptimizeResult
The result of the minimization.
"""
ndims = len(x0)
bounds = np.array(bounds)
if bounds.shape != (ndims, 2):
raise ValueError()
if axes is None:
axes = np.eye(ndims)
if axes.shape[1] != ndims:
raise ValueError()
naxes = len(axes)
if naxes <= 0:
raise ValueError()
bestx = x0
besty = np.inf
niter = 0
funcalls = 0
while niter < maxiter:
niter += 1
for i in range(naxes):
axis = axes[i]
if verbose:
logging.info('multivariate_discrete_gap_filling_minimizer: axis %d, %s' % (i, str(axis)))
def transform(t):
return bestx + t*axis
# Function of single variable (t) that we will optimize during this iteration.
def scalar_fun(t):
testx = transform(t)
return fun(testx)
# logging.info(transform(0.0))
# logging.info(scalar_fun(0.0))
# Determine bracket along optimization axis (for t).
# All axes must remain within their respective bounds.
bracket = np.array([-np.inf, 0.0, np.inf])
for j in range(ndims):
if axis[j] != 0.0:
btj = np.sort((bounds[j] - bestx[j]) / axis[j])
if bracket[0] < btj[0]:
bracket[0] = btj[0]
if bracket[2] > btj[1]:
bracket[2] = btj[1]
# if verbose:
# logging.info('multivariate_discrete_gap_filling_minimizer: bracket=%s' % str(bracket))
optresult = minimize_scalar(
scalar_fun, bracket=bracket, tol=tol, method=scalar_discrete_gap_filling_minimizer,
options=scalar_options)
if verbose:
logging.info('minimize_scalar returned t=%f, y=%f' % (optresult.x, optresult.fun))
bestx = transform(optresult.x)
besty = optresult.fun
if verbose:
logging.info(
'multivariate_gap_filling_minimizer: niter=%d, axis=%d, best f(%s) = %f'
% (niter, i, str(bestx), besty))
funcalls += optresult.nfev
if maxfev is not None and funcalls >= maxfev:
break
return OptimizeResult(fun=besty, x=bestx, nit=niter, nfev=funcalls, success=(niter > 1))
def simple_global_minimizer_spark(
fun, x0, bounds, sc=None, verbose=True, **options):
"""Exhaustive global minimizer with same calling convention as `multivariate_discrete_gap_filling_minimizer`.
Parameters
----------
fun
Function of a single variable of a list-type.
x0
Unused but kept for compatibility.
bounds
List-type of (min, max) pairs for each element in x, defining the bounds in that dimension.
sc (SparkContext)
The SparkContext.
Returns
-------
OptimizeResult
The result of the minimization.
"""
axis_domains = [range(a, b+1) for a, b in bounds]
domain_size = np.product([len(d) for d in axis_domains])
if verbose:
logging.info('simple_global_minimizer_spark: domain_size=%d' % domain_size)
domain = itertools.product(*axis_domains)
domain_rdd = sc.parallelize(domain)
# Evaluate function at each point in parallel using Spark.
fun_eval_rdd = domain_rdd.map(lambda x: (fun(x), x))
# Find the minimum y value. Secondary sort on the x value for tie breaking.
best_y, best_x = fun_eval_rdd.min(lambda yx: (yx[0][0], yx[1]) if isinstance(yx[0], tuple) else (yx[0], yx[1]))
return OptimizeResult(fun=best_y, x=best_x, nfev=domain_size, success=True)
| 37.465517 | 119 | 0.60224 | 1,759 | 13,038 | 4.346219 | 0.169983 | 0.028777 | 0.021975 | 0.034009 | 0.300065 | 0.227077 | 0.186658 | 0.155396 | 0.148332 | 0.122433 | 0 | 0.009129 | 0.311091 | 13,038 | 347 | 120 | 37.573487 | 0.842017 | 0.354656 | 0 | 0.244444 | 0 | 0 | 0.053047 | 0.013447 | 0 | 0 | 0 | 0 | 0.027778 | 1 | 0.027778 | false | 0 | 0.033333 | 0.005556 | 0.088889 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d7e74a1b9166e627ccdb832e0e9c0dbaf7e1c61 | 5,240 | py | Python | firm/forms.py | mstgnz/WMS | 64aaa43fdd8fb682dedb792831b13d64046b385f | [
"Apache-2.0"
] | null | null | null | firm/forms.py | mstgnz/WMS | 64aaa43fdd8fb682dedb792831b13d64046b385f | [
"Apache-2.0"
] | null | null | null | firm/forms.py | mstgnz/WMS | 64aaa43fdd8fb682dedb792831b13d64046b385f | [
"Apache-2.0"
] | null | null | null | from django import forms
from .models import Firm, Worksite, Subcontractor, Contract, Specification, Project
class DateInput(forms.DateInput):
input_type = 'date'
# FİRMA FORM
class FirmForm(forms.ModelForm):
class Meta:
model = Firm
fields = ['tax','name','full_name','phone','fax','web','email','address','image']
def __init__(self, *args, **kwargs):
super(FirmForm, self).__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
if instance and instance.pk:
self.fields['name'].disabled = True
self.fields['tax'].disabled = True
if instance:
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
# ŞANTİYE FORM
class WorksiteForm(forms.ModelForm):
class Meta:
model = Worksite
fields = ['name','employer','name_of_job','control','construction_area','threader_no','island_no','parcel_no','phone','fax','address','image','start_date','end_date','active']
widgets = {
'start_date': DateInput(),
'end_date': DateInput()
}
def __init__(self, *args, **kwargs):
super(WorksiteForm, self).__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
if instance and instance.pk:
self.fields['name'].disabled = True
if instance:
for field in self.fields:
if field != "active":
if field == "image":
self.fields[field].widget.attrs['class'] = 'custom-file-input'
else:
self.fields[field].widget.attrs['class'] = 'form-control'
if field in ['threader_no','island_no','parcel_no']:
self.fields[field].widget.attrs['class'] += ' uppercase'
# TAŞERON FORM
class SubcontractorForm(forms.ModelForm):
class Meta:
model = Subcontractor
fields = ['worksite','tax','name','email','phone','subject','address']
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(SubcontractorForm, self).__init__(*args, **kwargs)
if self.user:
self.fields['worksite'].queryset = self.user.worksite.filter(active=True)
instance = getattr(self, 'instance', None)
if instance and instance.pk:
self.fields['name'].disabled = True
if instance:
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
# SÖZLEŞME FORM
class ContractForm(forms.ModelForm):
class Meta:
model = Contract
fields = ['worksite','subcontractor','no','date','name','price','guarantee','advance','progress','note','file']
widgets = {
'date': DateInput(),
'file': forms.FileInput(attrs={'accept': '.pdf'})
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(ContractForm, self).__init__(*args, **kwargs)
if self.user:
self.fields['worksite'].queryset = self.user.worksite.filter(active=True)
# Seçim listesini manuel ayarlamak için kullanılır
#worksites = [(i.id, i.name) for i in self.user.worksite.filter(active=True)]
#self.fields['worksite'] = forms.ChoiceField(choices=worksites)
instance = getattr(self, 'instance', None)
if instance and instance.pk:
self.fields['worksite'].disabled = True
self.fields['subcontractor'].disabled = True
if instance:
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
# ŞARTNAME FORM
class SpecificationForm(forms.ModelForm):
class Meta:
model = Specification
fields = ['contract','name','file']
widgets = {
'file': forms.FileInput(attrs={'accept': '.pdf'})
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(SpecificationForm, self).__init__(*args, **kwargs)
if self.user:
self.fields['contract'].queryset = Contract.objects.filter(worksite__in=self.user.worksite.filter(active=True))
instance = getattr(self, 'instance', None)
if instance:
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
# PROJE FORM
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
fields = ['worksite','no','date','name','category','file']
widgets = {
'date': DateInput(),
'file': forms.FileInput(attrs={'accept': '.dwg'})
}
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super(ProjectForm, self).__init__(*args, **kwargs)
if self.user:
self.fields['worksite'].queryset = self.user.worksite.filter(active=True)
instance = getattr(self, 'instance', None)
if instance:
for field in self.fields:
self.fields[field].widget.attrs['class'] = 'form-control'
| 37.971014 | 183 | 0.579389 | 558 | 5,240 | 5.327957 | 0.195341 | 0.08409 | 0.040363 | 0.056509 | 0.639421 | 0.582913 | 0.527077 | 0.514968 | 0.500841 | 0.462159 | 0 | 0 | 0.273855 | 5,240 | 137 | 184 | 38.248175 | 0.780815 | 0.05 | 0 | 0.579439 | 0 | 0 | 0.14093 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056075 | false | 0 | 0.018692 | 0 | 0.205607 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d7fac852950bdea4eeed4896c20818406f39dcc | 2,976 | py | Python | No_0188_Best Time to Buy and Sell Stock IV/by_dynamic_programming.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | 32 | 2020-01-05T13:37:16.000Z | 2022-03-26T07:27:09.000Z | No_0188_Best Time to Buy and Sell Stock IV/by_dynamic_programming.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | null | null | null | No_0188_Best Time to Buy and Sell Stock IV/by_dynamic_programming.py | coderMaruf/leetcode-1 | 20ffe26e43999e44c8acf9800acb371a49bb5853 | [
"MIT"
] | 8 | 2020-06-18T16:17:27.000Z | 2022-03-15T23:58:18.000Z | '''
Description:
Say you have an array for which the i-th element is the price of a given stock on day i.
Design an algorithm to find the maximum profit. You may complete at most k transactions.
Note:
You may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).
Example 1:
Input: [2,4,1], k = 2
Output: 2
Explanation: Buy on day 1 (price = 2) and sell on day 2 (price = 4), profit = 4-2 = 2.
Example 2:
Input: [3,2,6,5,0,3], k = 2
Output: 7
Explanation: Buy on day 2 (price = 2) and sell on day 3 (price = 6), profit = 6-2 = 4.
Then buy on day 5 (price = 0) and sell on day 6 (price = 3), profit = 3-0 = 3.
'''
# n : the length of input prices
## Time Complexity: O( n )
#
# The overhead in time is the cost of for-loop iteration, which is of O( n )
## Space Complexity: O( n )
#
# The overhead in space is the storage for dynamic programming table, which is of O( n )
from typing import List
class Solution:
def maxProfit(self, k: int, prices: List[int]) -> int:
# ------------------------------------------------------------------
def max_profit_k_inf( prices ):
# Solve max profit with k = ∞ in DP, with space optimization
dp_not_hold, dp_hold = 0, -float('inf')
for stock_price in prices:
prev_not_hold, prev_hold = dp_not_hold, dp_hold
dp_not_hold = max(prev_not_hold, prev_hold + stock_price )
dp_hold = max(prev_hold, prev_not_hold - stock_price)
return dp_not_hold
# ------------------------------------------------------------------
n = len(prices)
if k > n // 2:
# k is larger than threshold, in this case, it is the same as k = ∞
return max_profit_k_inf( prices )
else:
# k is smaller than or equal to threshold, solve in DP with [k transactions][n stock prices]
dp = [ [ 0 for _ in range(n) ] for _ in range(k+1) ]
for i in range(1, k+1):
local_max_profit = -prices[0]
for j in range(1, n):
dp[i][j] = max( dp[i][j-1], local_max_profit + prices[j] )
local_max_profit = max( local_max_profit, dp[i-1][j-1] - prices[j] )
return dp[k][n-1]
import unittest
class Testing( unittest.TestCase ):
def test_case_1( self ):
result = Solution().maxProfit( prices=[2,4,1], k = 2)
self.assertEqual(result, 2)
def test_case_2( self ):
result = Solution().maxProfit( prices=[3,2,6,5,0,3], k = 2)
self.assertEqual(result, 7)
if __name__ == '__main__':
unittest.main() | 26.810811 | 112 | 0.510753 | 419 | 2,976 | 3.51074 | 0.281623 | 0.023793 | 0.024473 | 0.024473 | 0.265126 | 0.069341 | 0.010877 | 0.010877 | 0 | 0 | 0 | 0.032761 | 0.353831 | 2,976 | 111 | 113 | 26.810811 | 0.731149 | 0.426411 | 0 | 0 | 0 | 0 | 0.006528 | 0 | 0 | 0 | 0 | 0 | 0.064516 | 1 | 0.129032 | false | 0 | 0.064516 | 0 | 0.354839 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
1d81a396174df861ce3d129dfba4a5169ad7c8bc | 3,491 | py | Python | src/screen.py | JushBJJ/crapindow | 0774c7149d3246b713c94f4d1f4252a2b47e9f84 | [
"MIT"
] | null | null | null | src/screen.py | JushBJJ/crapindow | 0774c7149d3246b713c94f4d1f4252a2b47e9f84 | [
"MIT"
] | null | null | null | src/screen.py | JushBJJ/crapindow | 0774c7149d3246b713c94f4d1f4252a2b47e9f84 | [
"MIT"
] | null | null | null | """ Basically the whole terminal screen itself. """
import sys
import os
import colorama
import cursor
foreground_colours = {
"BLACK": "\x1b[30m",
"RED": "\x1b[31m",
"GREEN": "\x1b[32m",
"YELLOW": "\x1b[33m",
"BLUE": "\x1b[34m",
"MAGENTA": "\x1b[35m",
"CYAN": "\x1b[36m",
"WHITE": "\x1b[37m",
"RESET": "\x1b[39m"
}
background_colours = {
"BLACK": "\x1b[40m",
"RED": "\x1b[41m",
"GREEN": "\x1b[42m",
"YELLOW": "\x1b[43m",
"BLUE": "\x1b[44m",
"MAGENTA": "\x1b[45m",
"CYAN": "\x1b[46m",
"WHITE": "\x1b[47m",
"RESET": "\x1b[49m"
}
class Screen:
"""
Screen class basically initialises the screen which will modify
stdout.
Includes clear_line, clear_screen, manual flushing and getting the
screen amount of columns and lines
"""
def __init__(self):
""" Automatically initialises the terminal and cursor. """
colorama.init(wrap=True) # Wraps stdout to enable most ansi support
self.stdout = sys.stdout
self.cursor = cursor.Cursor(self)
self.cursor.pos(self.get_width()-1, self.get_height()-1)
self.width = self.get_width()
self.height = self.get_height()
self.clear_screen()
# foreground_colours and background_colours is pretty much copy and-
# pasted from colourama's ansi source
# but put into a dictionary just so things are easier when printing
# out forground colours
def foreground_colour(self, colour, flush=True):
""" Change foreground colour (text colour) """
self.write(foreground_colours[colour], flush=flush)
def background_colour(self, colour, flush=True):
""" Change background colour """
self.write(background_colours[colour], flush=flush)
def flush(self):
""" Flushes stdout """
self.stdout.flush()
def autoposition(self, string):
""" This automatcally updates the position variables of the cursor.
Can possibly lead to peformance issues within writing to stdout """
for char in string:
if char == "\n":
self.cursor.x = 1
self.cursor.y += 1
else:
self.cursor.x += len(char)
def write(self, string, ansi=False, flush=True):
""" Manually writes stdout and automatically updates cursor position
If ansi is true, it doesn't update the cursor position. """
self.stdout.write(string)
if flush:
self.stdout.flush()
if not ansi:
self.autoposition(string)
def clear_line(self):
""" Clears the current line the cursor is in. """
self.write(colorama.ansi.clear_line(), ansi=True)
def clear_screen(self):
""" Clears the screen by moving the cursor to the end of the
terminal screen and calls ansi sequence.
Puts the cursor at the end of the terminal screen """
self.cursor.pos(self.get_width(), self.get_height())
self.write(colorama.ansi.clear_screen())
self.cursor.reset_pos()
def get_width(self):
""" Get the amount of columns of the terminal
(constantly updated automatically) """
return os.get_terminal_size()[0]-1
def get_height(self):
""" Get the amount of lines of th terminal
(constantly updated automatically) """
return os.get_terminal_size()[1]-1
| 27.928 | 79 | 0.599542 | 429 | 3,491 | 4.804196 | 0.351981 | 0.027171 | 0.017467 | 0.016497 | 0.205725 | 0.137797 | 0.059195 | 0.059195 | 0.059195 | 0 | 0 | 0.024672 | 0.280149 | 3,491 | 124 | 80 | 28.153226 | 0.795464 | 0.338585 | 0 | 0.03125 | 0 | 0 | 0.110638 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15625 | false | 0 | 0.0625 | 0 | 0.265625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |