hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e6266840cb7ce270f6afeec9709e2ac1a2d1d286
| 1,426
|
py
|
Python
|
scripts/sequence/replace_selenocystein.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | 10
|
2015-04-28T14:15:04.000Z
|
2021-03-15T00:07:38.000Z
|
scripts/sequence/replace_selenocystein.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | null | null | null |
scripts/sequence/replace_selenocystein.py
|
mahajrod/MAVR
|
4db74dff7376a2ffe4426db720b241de9198f329
|
[
"MIT"
] | 6
|
2017-03-16T22:38:41.000Z
|
2021-08-11T00:22:52.000Z
|
#!/usr/bin/env python
__author__ = 'Sergei F. Kliver'
import argparse
import os
from copy import deepcopy
from Bio import SeqIO
from Bio.Seq import Seq
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input_file", action="store", dest="input_file",
help="Input file with sequences")
parser.add_argument("-c", "--symbol_to_use", action="store", dest="char_to_use",
default="X",
help="Symbol to use to replace selenocystein. Default - 'X'")
parser.add_argument("-o", "--output", action="store", dest="output",
help="File to write output")
parser.add_argument("-f", "--format", action="store", dest="format", default="fasta",
help="Format of input and output files. Allowed formats genbank, fasta(default)")
args = parser.parse_args()
tmp_index_file = "temp.idx"
print("Parsing %s..." % args.input_file)
sequence_dict = SeqIO.index_db(tmp_index_file, args.input_file, format=args.format)
def record_with_replacenment_generator(sequence_dict):
for record_id in sequence_dict:
new_record = deepcopy(sequence_dict[record_id])
new_record.seq = Seq(str(sequence_dict[record_id].seq).replace("U", args.char_to_use).replace("u", args.char_to_use))
yield new_record
SeqIO.write(record_with_replacenment_generator(sequence_dict), args.output, args.format)
os.remove(tmp_index_file)
| 33.952381
| 125
| 0.691445
| 196
| 1,426
| 4.795918
| 0.377551
| 0.076596
| 0.07234
| 0.065957
| 0.13617
| 0.13617
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173212
| 1,426
| 41
| 126
| 34.780488
| 0.797286
| 0.014025
| 0
| 0
| 0
| 0
| 0.228245
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.185185
| 0
| 0.222222
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e626e6e7f40b567d4b7615f9b578110b40aa795b
| 438
|
py
|
Python
|
Aulas Gustavo Guanabara/Aula018.1.py
|
RobertoRanulfo/Phyton
|
d7ba1aaffac2f3d78e46fc96b480b6a62d6dfe01
|
[
"MIT"
] | null | null | null |
Aulas Gustavo Guanabara/Aula018.1.py
|
RobertoRanulfo/Phyton
|
d7ba1aaffac2f3d78e46fc96b480b6a62d6dfe01
|
[
"MIT"
] | null | null | null |
Aulas Gustavo Guanabara/Aula018.1.py
|
RobertoRanulfo/Phyton
|
d7ba1aaffac2f3d78e46fc96b480b6a62d6dfe01
|
[
"MIT"
] | null | null | null |
teste = list()
teste.append('Gustavo')
teste.append(40)
galera = []
galera.append(teste) #neste caso estamos criando uma ligação entre as duas listas
teste[0] = 'Maria'
teste[1] = 22
galera.append(teste)
print(teste)
print(galera) # No caso os elementos não se acumularam porque não foi feita uma cópia dos elementos da lista
# e sim um elo que espelha a lista... dessa forma ela foi copiada mais uma vez do jeito que estava
| 39.818182
| 112
| 0.730594
| 72
| 438
| 4.444444
| 0.694444
| 0.06875
| 0.10625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01676
| 0.182648
| 438
| 11
| 112
| 39.818182
| 0.877095
| 0.568493
| 0
| 0.2
| 0
| 0
| 0.064171
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e628182131b1688593a8c2682f0d77aa16ecd697
| 1,287
|
py
|
Python
|
camcommander/watcher.py
|
tparker-usgs/camcommander
|
0e508a1b24cc99496745652e52118000470d7e32
|
[
"CC0-1.0"
] | null | null | null |
camcommander/watcher.py
|
tparker-usgs/camcommander
|
0e508a1b24cc99496745652e52118000470d7e32
|
[
"CC0-1.0"
] | null | null | null |
camcommander/watcher.py
|
tparker-usgs/camcommander
|
0e508a1b24cc99496745652e52118000470d7e32
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
#
# I waive copyright and related rights in the this work worldwide
# through the CC0 1.0 Universal public domain dedication.
# https://creativecommons.org/publicdomain/zero/1.0/legalcode
#
# Author(s):
# Tom Parker <tparker@usgs.gov>
""" watch for new webcam images."""
import zmq
import tomputils.util as tutil
class Watcher:
def __init__(self, config, proxy_frontend, context=None):
global logger
logger = tutil.setup_logging("watcher errors")
self.config = config
self.context = context or zmq.Context().instance()
self.socket = self.context.socket(zmq.SUB)
self.socket.connect(proxy_frontend)
def watch(self):
pass
def watcher_factory(config, proxy_frontend):
if config["type"] == "console":
msg = "Creating %s watcher %s."
logger.debug(msg.format(config["name"], config["type"]))
return ConsoleWatcher(config, proxy_frontend)
else:
error_msg = "Unkown watcher type %s for source %s"
tutil.exit_with_error(error_msg.format(config["type"], config["name"]))
class ConsoleWatcher(Watcher):
def watch(self):
run = True
while run:
image = self.socket.recv()
logger.info("New Image: %s", image)
| 26.8125
| 79
| 0.655012
| 164
| 1,287
| 5.054878
| 0.554878
| 0.062726
| 0.068758
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006012
| 0.224553
| 1,287
| 47
| 80
| 27.382979
| 0.824649
| 0.212121
| 0
| 0.076923
| 0
| 0
| 0.113
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0.038462
| 0.076923
| 0
| 0.346154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e629f1eb273463da4f3c8be6f4e44ca1b639ae9f
| 1,866
|
py
|
Python
|
Filter/kalman_filter.py
|
KNakane/filter
|
43ece9771003b63b477499dab2eb8d69e5bfdabe
|
[
"MIT"
] | null | null | null |
Filter/kalman_filter.py
|
KNakane/filter
|
43ece9771003b63b477499dab2eb8d69e5bfdabe
|
[
"MIT"
] | null | null | null |
Filter/kalman_filter.py
|
KNakane/filter
|
43ece9771003b63b477499dab2eb8d69e5bfdabe
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
class KalmanFilter():
def __init__(self, data, dim=1):
self.data = data.values
self.timelength = len(self.data)
# 潜在変数
self.x = np.zeros((self.timelength+1, dim))
self.x_filter = np.zeros((self.timelength+1, dim))
# 共分散行列
self.sigma = np.zeros((self.timelength+1, dim))
self.sigma_filter = np.zeros((self.timelength+1, dim))
# 状態遷移行列
self.A = np.ones(dim)
# 観測行列
self.C = np.ones(dim)
# ノイズ
self.Q = 1.0
self.R = 1.0
self.W = np.random.normal(loc=0, scale=self.Q, size=self.x.shape)
self.V = np.random.normal(loc=0, scale=self.R, size=self.x.shape)
def __call__(self):
#for t in tqdm(range(self.timelength-1)):
for t in (range(self.timelength-1)):
# 状態量推定
self.x[t+1] = self.A * self.x[t] + self.W[t]
self.sigma[t+1] = self.Q + self.A * self.sigma[t] * self.A.T
# 更新
#Kalman_gain = self.sigma[t+1] * self.C.T * (self.C * self.sigma[t+1] * self.sigma[t+1].T + self.R).T
Kalman_gain = self.sigma[t+1] / (self.sigma[t+1] + self.R)
self.x_filter[t+1] = self.x[t+1] + Kalman_gain * (self.data[t+1] - self.C * self.x[t+1])
self.sigma_filter[t+1] = self.sigma[t+1] - Kalman_gain * self.C * self.sigma[t+1]
self.draw_graph()
return
def draw_graph(self):
# グラフ描画
plt.figure(figsize=(16,8))
plt.plot(range(self.timelength), self.data, label='Grand Truth')
plt.plot(range(self.timelength), self.x_filter[:-1], "g", label='Prediction')
plt.legend()
plt.subplots_adjust(left=0.1, right=0.95, bottom=0.1, top=0.95)
plt.savefig('./Kalman_filter.png')
return
| 33.927273
| 113
| 0.559486
| 293
| 1,866
| 3.491468
| 0.259386
| 0.02737
| 0.064516
| 0.086022
| 0.408602
| 0.343109
| 0.27175
| 0.043011
| 0
| 0
| 0
| 0.030325
| 0.275456
| 1,866
| 54
| 114
| 34.555556
| 0.726331
| 0.096999
| 0
| 0.058824
| 0
| 0
| 0.024492
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.088235
| 0
| 0.264706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e62b0481e9ee04d621f3915eddb5dfd2397e270a
| 4,394
|
py
|
Python
|
mwarp1d/ui/figures/artists/draggable_points.py
|
0todd0000/mwarp1d
|
7b40a47e6c112a8da5a1b67aff890fc77fe83d71
|
[
"MIT"
] | null | null | null |
mwarp1d/ui/figures/artists/draggable_points.py
|
0todd0000/mwarp1d
|
7b40a47e6c112a8da5a1b67aff890fc77fe83d71
|
[
"MIT"
] | 6
|
2019-11-25T08:15:05.000Z
|
2020-02-07T13:05:59.000Z
|
mwarp1d/ui/figures/artists/draggable_points.py
|
0todd0000/mwarp1d
|
7b40a47e6c112a8da5a1b67aff890fc77fe83d71
|
[
"MIT"
] | 2
|
2019-11-28T02:58:14.000Z
|
2019-12-18T11:45:33.000Z
|
from PyQt5 import QtWidgets, QtCore
from math import floor
import numpy as np
from . _base import _SelectableArtist2D
class _DraggablePoints(_SelectableArtist2D):
dragged = QtCore.pyqtSignal(object, int, int, float)
dragging_stopped = QtCore.pyqtSignal()
point_added = QtCore.pyqtSignal(int, int)
point_deleted = QtCore.pyqtSignal(int)
point_delete_failed = QtCore.pyqtSignal()
maxpointsreached = QtCore.pyqtSignal(int)
color_active = 0.98, 0.7, 0.3
color_inactive = '0.7'
dragging_enabled = True
dragging = False
# n = 0 #number of points
nmax = 8 #maximum number of points
selected_ind = None
xminmax = None
def __init__(self, ax, x, y_constraint=None, collection=None):
super().__init__(ax, collection)
self.Q = y_constraint.size
# self.n = len(x)
self.h = self.ax.plot(x, y_constraint[x], 'o', ms=8, color=self.color_active, markeredgecolor='w', zorder=self.zorder)[0]
self.y_constraint = y_constraint
self.ax.figure.canvas.mpl_connect('button_release_event', self.on_release)
self.ax.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
@property
def n(self):
return self.h.get_xdata().size
@property
def values(self):
return self.h.get_xdata()
def add_point(self, x):
if self.n < self.nmax:
y = self.y_constraint[x]
x0,y0 = self.get_point_coordinates()
x0,y0 = np.append(x0, x), np.append(y0, y)
ind = np.argsort(x0)
self.set_point_coordinates(x0[ind], y0[ind])
# self.n += 1
self.ax.figure.canvas.draw()
col = x0[ind].tolist().index(x)
self.point_added.emit(col, x)
else:
self.maxpointsreached.emit(self.nmax)
def delete_point(self, ind):
deleted = False
if self.n > 1:
x,y = self.get_point_coordinates()
x = np.hstack((x[:ind], x[ind+1:]))
y = np.hstack((y[:ind], y[ind+1:]))
self.set_point_coordinates(x, y)
deleted = True
self.point_deleted.emit(ind)
self.ax.figure.canvas.draw()
else:
self.point_delete_failed.emit()
return deleted
def get_point_coordinates(self):
x,y = self.h.get_xdata(), self.h.get_ydata()
return x,y
def get_previous_point(self, ind):
return None if (ind==0) else (ind-1)
def get_previous_x(self, ind0):
ind = self.get_previous_point(ind0)
return None if (ind is None) else self.h.get_xdata()[ind]
def get_next_point(self, ind):
return None if (ind==(self.n-1)) else (ind+1)
def get_next_x(self, ind0):
ind = self.get_next_point(ind0)
return None if (ind is None) else self.h.get_xdata()[ind]
def get_xminmax(self, ind):
x0,x1 = self.get_previous_x(ind), self.get_next_x(ind)
x0 = 2 if (x0 is None) else x0+2
x1 = self.Q-3 if (x1 is None) else x1-2
return x0,x1
def on_motion(self, event):
if event.inaxes:
# # self.crosshairs.update(x, y)
if self.dragging_enabled and self.dragging:
ind = self.selected_ind
x = floor(event.xdata)
x0,x1 = self.xminmax
x = min(x1, max(x0, x))
y = self.y_constraint[x]
self.set_data(ind, x, y)
self.dragged.emit(self, ind, x, y)
def on_selected(self, ind, distance):
super().on_selected(ind, distance)
self.dragging = True
self.selected_ind = ind
self.xminmax = self.get_xminmax(ind)
def on_release(self, event):
self.dragging_stopped.emit()
self.dragging = False
self.selected_ind = None
self.xminmax = None
def set_active(self, active):
super().set_active(active)
self.isselectable = active
def set_all_xdata(self, x):
self.h.set_xdata(x)
self.h.set_ydata( self.y_constraint[x] )
def set_data(self, ind, xnew, ynew):
x,y = self.h.get_xdata(), self.h.get_ydata()
x[ind] = xnew
y[ind] = ynew
self.h.set_xdata(x)
self.h.set_ydata(y)
def set_dragging_enabled(self, enabled):
self.dragging_enabled = enabled
def set_point_coordinates(self, x, y):
self.h.set_xdata(x)
self.h.set_ydata(y)
class SourceLandmarks(_DraggablePoints):
color_active = 0.98, 0.7, 0.3
zorder = 1
def set_active(self, active):
super().set_active(active)
self.h.set_visible(active)
class TemplateLandmarks(_DraggablePoints):
color_active = 0.3, 0.3, 0.98
zorder = 3
| 24.824859
| 134
| 0.649067
| 671
| 4,394
| 4.081967
| 0.178838
| 0.029208
| 0.023366
| 0.028478
| 0.263235
| 0.224535
| 0.173421
| 0.138372
| 0.125228
| 0.115371
| 0
| 0.021108
| 0.223714
| 4,394
| 176
| 135
| 24.965909
| 0.781882
| 0.030269
| 0
| 0.191667
| 0
| 0
| 0.010353
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.033333
| 0.033333
| 0.441667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e62bee983944925691e81c42d718cf0680c6b087
| 7,370
|
py
|
Python
|
convert/tartan_air_to_benchmark.py
|
AaltoML/vio_benchmark
|
cb2277026f824f88f3bc131057ebc687cb19d648
|
[
"Apache-2.0"
] | 32
|
2021-04-23T15:07:04.000Z
|
2022-03-30T08:04:28.000Z
|
convert/tartan_air_to_benchmark.py
|
AaltoML/vio_benchmark
|
cb2277026f824f88f3bc131057ebc687cb19d648
|
[
"Apache-2.0"
] | 3
|
2021-02-10T18:54:06.000Z
|
2022-03-12T16:58:19.000Z
|
convert/tartan_air_to_benchmark.py
|
AaltoML/vio_benchmark
|
cb2277026f824f88f3bc131057ebc687cb19d648
|
[
"Apache-2.0"
] | 4
|
2021-02-08T11:11:09.000Z
|
2022-03-15T12:45:05.000Z
|
#!/usr/bin/env python
#
# Download and convert TartanAir data <https://theairlab.org/tartanair-dataset/>.
#
# NOTE The whole dataset is several terabytes, so be sure to tune the `LEVELS` and
# `DATASETS` variables before running.
#
# It is recommended to install "AzCopy", an official tool for Azure, to get tolerable
# download speeds (pass `--azcopy` flag to enable).
#
# NOTE At the time of writing the data does not include simulated IMU samples.
import argparse
import csv
import json
import os
from pathlib import Path
import subprocess
from tartan_air_transformations import fixTartan
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('--azcopy', action='store_true', default=False, help='download the data with AzCopy')
args = parser.parse_args()
# Since the downloads can be slow, an option to leave the downloaded zip files in the RAW directory.
BACKUP_ZIPS = False
RAW = "data/raw/tartan-air"
OUT = "data/benchmark/tartan-air"
# <https://github.com/castacks/tartanair_tools/blob/master/download_training_zipfiles.txt>
RELEASE = "https://tartanair.blob.core.windows.net/tartanair-release1"
LEVELS = ["Easy", "Hard"]
DATASETS = [
"abandonedfactory",
"abandonedfactory_night",
"amusement",
"carwelding",
"endofworld",
"gascola",
"hospital",
"japanesealley",
"neighborhood",
"ocean",
"office",
"office2",
"oldtown",
"seasidetown",
"seasonsforest",
"seasonsforest_winter",
"soulcity",
"westerndesert",
]
DOWNLOAD_CMD = "wget -O"
UNZIP_CMD = "unzip -o -d"
# The data doesn't have time information of any sort,
# so pick something that makes the videos run at a pleasant speed.
FPS = 10
def runCmd(cmd):
print("Running command:", cmd)
os.system(cmd)
def convertVideo(files, output):
# Use `-crf 0` for lossless compression.
subprocess.run(["ffmpeg",
"-y",
"-r", str(FPS),
"-f", "image2",
"-pattern_type", "glob", "-i", files,
"-c:v", "libx264",
"-preset", "ultrafast",
# "-preset", "veryslow",
"-crf", "0",
"-vf", "format=yuv420p",
"-an",
output])
def getExtractedPath(dataset, level):
# For some reason `dataset` is duplicated in the zip hierarchy.
return "{}/{}/{}/{}".format(RAW, dataset, dataset, level)
def download(dataset, level):
extractedPath = getExtractedPath(dataset, level)
if os.path.isdir(extractedPath):
print(extractedPath, "already exists, skipping.")
return
outPath = RAW
Path(outPath).mkdir(parents=True, exist_ok=True)
for d in ["image_left", "image_right"]:
url = "{}/{}/{}/{}.zip".format(RELEASE, dataset, level, d)
z = "{}/{}.zip".format(outPath, d)
if args.azcopy:
cmd = "azcopy copy {} {}".format(url, z)
runCmd(cmd)
else:
cmd = "{} {} {}".format(DOWNLOAD_CMD, z, url)
runCmd(cmd)
cmd = "{} {} {}".format(UNZIP_CMD, outPath, z)
runCmd(cmd)
src = "{}/{}.zip".format(outPath, d)
if BACKUP_ZIPS:
name = "{}-{}-{}".format(dataset, level, d)
dst = "{}/{}.zip".format(outPath, name)
os.rename(src, dst)
else:
os.remove(src)
def convert_sequence(fullPath, sequence, dataset, level):
datasetOut = "{}/{}-{}".format(dataset, level.lower(), sequence)
outPath = "{}/{}".format(OUT, datasetOut)
Path(outPath).mkdir(parents=True, exist_ok=True)
convertVideo("{}/image_left/*.png".format(fullPath), "{}/data.mp4".format(outPath))
convertVideo("{}/image_right/*.png".format(fullPath), "{}/data2.mp4".format(outPath))
output = []
number = 0
time = 0.0
dt = 1.0 / FPS
p0 = [None, None, None]
# We define ground truth as pose of the left camera.
with open("{}/pose_left.txt".format(fullPath)) as f:
# format: tx ty tz qx qy qz qw
csvRows = csv.reader(f, delimiter=' ')
rows = []
for row in csvRows:
rows.append(row)
# The general coordinate transformation has the form
# M -> W*M*L, where M = M(p, q)
# The W and L matrices were found by experimentation starting with transforms
# in `ned2cam()` function in the TartanAir repository's scripts.
W = np.array([
[0,1,0,0],
[1,0,0,0],
[0,0,-1,0],
[0,0,0,1]], dtype=np.float32)
L = np.array([
[0,0,1,0],
[1,0,0,0],
[0,1,0,0],
[0,0,0,1]], dtype=np.float32)
fixedRows = fixTartan(W, L, rows)
for row in fixedRows:
if not p0[0]:
p0 = [row[0], row[1], row[2]]
p = [row[0] - p0[0], row[1] - p0[1], row[2] - p0[2]]
q = [row[6], row[3], row[4], row[5]] # wxyz
gt = {
"groundTruth": {
"position": {
"x": p[0], "y": p[1], "z": p[2]
},
"orientation": {
"w": q[0], "x": q[1], "y": q[2], "z": q[3]
}
},
"time": time
}
frame = {
"number": number,
"time": time,
"frames": [
{"cameraInd": 0, "time": time},
{"cameraInd": 1, "time": time},
],
}
output.append(gt)
output.append(frame)
time += dt
number += 1
# Write JSONL
with open(outPath + "/data.jsonl", "w") as f:
for obj in output:
f.write(json.dumps(obj, separators=(',', ':')))
f.write("\n")
# Write parameters
with open(outPath + "/parameters.txt", "w") as f:
# <https://github.com/castacks/tartanair_tools/blob/master/data_type.md>
fx = 320
fy = 320
cx = 320
cy = 240
f.write("focalLengthX {}; focalLengthY {};\nprincipalPointX {}; principalPointY {};\n".format(fx, fy, cx, cy))
f.write("secondFocalLengthX {}; secondFocalLengthY {};\nsecondPrincipalPointX {}; secondPrincipalPointY {};\n".format(fx, fy, cx, cy))
f.write("rot 0;\n")
# Define the (non-existent) IMU to have the same pose as the left camera.
for cam in [0, 1]:
columnMajor = []
for i in [0, 1, 2, 3]:
for j in [0, 1, 2, 3]:
if cam == 1 and i == 3 and j == 0:
num = "-0.25" # baseline
elif i == j:
num = "1"
else:
num = "0"
columnMajor.append(num)
f.write("{} {};\n".format(
"imuToCameraMatrix" if cam == 0 else "secondImuToCameraMatrix",
",".join(columnMajor)))
def convert(dataset, level):
extractedPath = getExtractedPath(dataset, level)
folders = [ (f.path, f.name) for f in os.scandir(extractedPath) if f.is_dir() ]
folders.sort()
for fullPath, sequence in folders:
convert_sequence(fullPath, sequence, dataset, level)
def main():
for dataset in DATASETS:
for l in LEVELS:
download(dataset, l)
convert(dataset, l)
if __name__ == "__main__":
main()
| 31.767241
| 142
| 0.53867
| 874
| 7,370
| 4.5
| 0.37643
| 0.008645
| 0.007628
| 0.006102
| 0.128909
| 0.116196
| 0.067379
| 0.06687
| 0.006102
| 0
| 0
| 0.023571
| 0.309227
| 7,370
| 231
| 143
| 31.904762
| 0.748969
| 0.183039
| 0
| 0.091429
| 0
| 0
| 0.178935
| 0.019529
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.045714
| 0.005714
| 0.097143
| 0.011429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e630f7f1230425fb80852a1c185d9c2e86b9dabb
| 4,985
|
py
|
Python
|
midas2/common/bowtie2.py
|
czbiohub/microbiome-igg
|
fd4bc62bee15e53587a947ca32bf3c5b9e8022e6
|
[
"MIT"
] | null | null | null |
midas2/common/bowtie2.py
|
czbiohub/microbiome-igg
|
fd4bc62bee15e53587a947ca32bf3c5b9e8022e6
|
[
"MIT"
] | 6
|
2022-03-14T19:37:52.000Z
|
2022-03-14T19:51:47.000Z
|
midas2/common/bowtie2.py
|
czbiohub/microbiome-igg
|
fd4bc62bee15e53587a947ca32bf3c5b9e8022e6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
import numpy as np
from midas2.common.utils import tsprint, command, split, OutputStream
def bowtie2_index_exists(bt2_db_dir, bt2_db_name):
bt2_db_suffixes = ["1.bt2", "2.bt2", "3.bt2", "4.bt2", "rev.1.bt2", "rev.2.bt2"]
if all(os.path.exists(f"{bt2_db_dir}/{bt2_db_name}.{ext}") for ext in bt2_db_suffixes):
tsprint(f"Use existing Bowtie2 indexes {bt2_db_dir}/{bt2_db_name}")
return True
bt2_db_large_suffixes = ["1.bt2l", "2.bt2l", "3.bt2l", "4.bt2l", "rev.1.bt2l", "rev.2.bt2l"]
if all(os.path.exists(f"{bt2_db_dir}/{bt2_db_name}.{ext}") for ext in bt2_db_large_suffixes):
tsprint(f"Use existing large Bowtie2 indexes {bt2_db_dir}/{bt2_db_name}")
return True
return False
def build_bowtie2_db(bt2_db_dir, bt2_db_name, downloaded_files, num_cores):
""" Build Bowtie2 database for the collections of fasta files """
bt2_db_prefix = f"{bt2_db_dir}/{bt2_db_name}"
if not bowtie2_index_exists(bt2_db_dir, bt2_db_name):
# Primarily for build_bowtie2db.py
if not os.path.exists(bt2_db_dir):
tsprint(f"Create bt2_db_dir: {bt2_db_dir}")
command(f"mkdir -p {bt2_db_dir}")
# Write the species_id to file, that used to build the bowtie2 indexes
with OutputStream(f"{bt2_db_prefix}.species") as stream:
stream.write("\n".join(map(str, downloaded_files.keys())))
command(f"rm -f {bt2_db_dir}/{bt2_db_name}.fa", quiet=False)
command(f"touch {bt2_db_dir}/{bt2_db_name}.fa")
for files in split(downloaded_files.values(), 20): # keep "cat" commands short
command("cat " + " ".join(files) + f" >> {bt2_db_dir}/{bt2_db_name}.fa")
try:
command(f"bowtie2-build --threads {num_cores} {bt2_db_prefix}.fa {bt2_db_prefix} > {bt2_db_dir}/bt2-db-build-{bt2_db_name}.log", quiet=False)
except:
tsprint(f"Bowtie2 index {bt2_db_prefix} run into error")
command(f"rm -f {bt2_db_prefix}.1.bt2")
raise
return bt2_db_prefix
def bowtie2_align(bt2_db_dir, bt2_db_name, bamfile_path, args):
""" Use Bowtie2 to map reads to prebuilt bowtie2 database """
bt2_db_prefix = f"{bt2_db_dir}/{bt2_db_name}"
if os.path.exists(bamfile_path):
tsprint(f"Use existing bamfile {bamfile_path}")
return
# Construct bowtie2 align input arguments
max_reads = f"-u {args.max_reads}" if args.max_reads else ""
aln_mode = "local" if args.aln_mode == "local" else "end-to-end"
aln_speed = args.aln_speed if aln_mode == "end-to-end" else args.aln_speed + "-local"
r2 = ""
max_fraglen = f"-X {args.fragment_length}" if args.r2 else ""
if args.r2:
r1 = f"-1 {args.r1}"
r2 = f"-2 {args.r2}"
elif args.aln_interleaved:
r1 = f"--interleaved {args.r1}"
else:
r1 = f"-U {args.r1}"
try:
bt2_command = f"bowtie2 --no-unal -x {bt2_db_prefix} {max_fraglen} {max_reads} --{aln_mode} --{aln_speed} --threads {args.num_cores} -q {r1} {r2}"
command(f"set -o pipefail; {bt2_command} | \
samtools view --threads {args.num_cores} -b - | \
samtools sort --threads {args.num_cores} -o {bamfile_path}", quiet=False)
except:
tsprint(f"Bowtie2 align to {bamfile_path} run into error")
command(f"rm -f {bamfile_path}")
raise
def samtools_sort(bamfile_path, sorted_bamfile, debug, num_cores):
if debug and os.path.exists(sorted_bamfile):
tsprint(f"Skipping samtools sort in debug mode as temporary data exists: {sorted_bamfile}")
return
try:
command(f"samtools sort -@ {num_cores} -o {sorted_bamfile} {bamfile_path}", quiet=False) #-m 2G
except:
tsprint(f"Samtools sort {bamfile_path} run into error")
command(f"rm -f {sorted_bamfile}")
raise
def samtools_index(bamfile_path, debug, num_cores):
if debug and os.path.exists(f"{bamfile_path}.bai"):
tsprint(f"Skipping samtools index in debug mode as temporary data exists: {bamfile_path}.bai")
return
try:
command(f"samtools index -@ {num_cores} {bamfile_path}", quiet=False)
except:
tsprint(f"Samtools index {bamfile_path} run into error")
command(f"rm -f {bamfile_path}.bai")
raise
def _keep_read(aln, aln_mapid, aln_readq, aln_mapq, aln_cov):
""" Check the quality of one alignnment from BAM file """
if aln.is_secondary:
return False
align_len = len(aln.query_alignment_sequence)
query_len = aln.query_length
# min pid
if 100 * (align_len - dict(aln.tags)['NM']) / float(align_len) < aln_mapid:
return False
# min read quality
if np.mean(aln.query_qualities) < aln_readq:
return False
# min map quality
if aln.mapping_quality < aln_mapq:
return False
# min aln cov
if align_len / float(query_len) < aln_cov:
return False
return True
| 38.643411
| 154
| 0.649549
| 760
| 4,985
| 4.030263
| 0.217105
| 0.07509
| 0.047013
| 0.053869
| 0.307868
| 0.265426
| 0.237023
| 0.179563
| 0.166503
| 0.109696
| 0
| 0.029184
| 0.22327
| 4,985
| 128
| 155
| 38.945313
| 0.76188
| 0.082648
| 0
| 0.282609
| 0
| 0.021739
| 0.324752
| 0.080088
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065217
| false
| 0
| 0.032609
| 0
| 0.23913
| 0.119565
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6314fc5be266fa2fd430fad718dac793df709ff
| 3,541
|
py
|
Python
|
src/race/src/my_lane_detection/findpoint.py
|
young43/ISCC_2020
|
2a7187410bceca901bd87b753a91fd35b73ca036
|
[
"MIT"
] | 3
|
2020-11-13T04:59:27.000Z
|
2021-04-02T06:36:03.000Z
|
src/race/src/my_lane_detection/findpoint.py
|
yongbeomkwak/ISCC_2021
|
7e7e5a8a14b9ed88e1cfbe2ee585fe24e4701015
|
[
"MIT"
] | null | null | null |
src/race/src/my_lane_detection/findpoint.py
|
yongbeomkwak/ISCC_2021
|
7e7e5a8a14b9ed88e1cfbe2ee585fe24e4701015
|
[
"MIT"
] | 5
|
2020-09-13T09:06:16.000Z
|
2021-06-19T02:31:23.000Z
|
import numpy as np
import cv2
class FindPoint:
def __init__(self,img):
self.window_height = 10
self.nwindows = 15
self.margin = 20
self.minpix = 70
self.center = img.shape[1]/2
def findpoint(self, img):
out_img = np.dstack((img, img, img))
h, w = img.shape
good_left_inds = []
good_right_inds = []
nonzero = img.nonzero()
nonzerox = nonzero[1]
nonzeroy = nonzero[0]
tmp_lx = 0
tmp_rx = 640
for i in range(1, self.center//10):
win_high = 390
win_low = 380
l_x_max = self.center - (i * 10 - 10)
l_x_min = self.center - (i * 10 + 10)
good_left_inds = \
((nonzerox >= l_x_min) & (nonzeroy >= win_low) & (nonzeroy <= win_high) & (nonzerox <= l_x_max)).nonzero()[
0]
if len(good_left_inds) > self.minpix:
tmp_lx = np.int(np.mean(nonzerox[good_left_inds]))
cv2.rectangle(out_img, (l_x_max, 380), (l_x_min, 390), (0, 255, 0), 1)
if tmp_lx != 0:
break
for i in range(1, 64-self.center//10):
win_high = 390
win_low = 380
r_x_min = self.center + (i * 10 - 10)
r_x_max = self.center + (i * 10 + 10)
good_right_inds = \
((nonzerox >= r_x_min) & (nonzeroy >= win_low) & (nonzeroy <= win_high) & (
nonzerox <= r_x_max)).nonzero()[
0]
if len(good_right_inds) > self.minpix:
tmp_rx = np.int(np.mean(nonzerox[good_right_inds]))
cv2.rectangle(out_img, (r_x_min, 380), (r_x_max, 390), (255, 0, 0), 1)
if tmp_rx != 640:
break
if tmp_rx - tmp_lx < 250:
for window in range(0,self.nwindows):
if tmp_lx != 0:
l_x_min = tmp_lx-(window+1)*self.window_height
l_x_max = tmp_lx - (window) * self.window_height
good_left_inds = \
((nonzerox >= l_x_min) & (nonzeroy >= win_low) & (nonzeroy <= win_high) & (
nonzerox <= l_x_max)).nonzero()[
0]
if len(good_left_inds) > self.minpix:
tmp_lx = np.int(np.mean(nonzerox[good_left_inds]))
cv2.rectangle(out_img, (l_x_max, 380), (l_x_min, 390), (0, 255, 0), 1)
if tmp_rx != 0:
r_x_max = tmp_rx+(window+1)*self.window_height
r_x_min = tmp_rx + (window) * self.window_height
good_right_inds = \
((nonzerox >= r_x_min) & (nonzeroy >= win_low) & (nonzeroy <= win_high) & (
nonzerox <= r_x_max)).nonzero()[
0]
if len(good_right_inds) > self.minpix:
tmp_rx = np.int(np.mean(nonzerox[good_right_inds]))
cv2.rectangle(out_img, (r_x_min, 380), (r_x_max, 390), (255,0, 0), 1)
# tmp_rx=None
# if tmp_rx - tmp_lx >250:
# break
print('l', tmp_lx , ' ', 'r',tmp_rx)
cv2.rectangle(out_img, (tmp_lx-10, 380), (tmp_lx+10, 390), (255, 0,255), 1)
cv2.rectangle(out_img, (tmp_rx-10, 380), (tmp_rx+10, 390), (255,0,255), 1)
# cv2.imshow('width_slide',out_img)
return tmp_lx, tmp_rx
| 40.238636
| 119
| 0.475572
| 465
| 3,541
| 3.335484
| 0.148387
| 0.045132
| 0.054159
| 0.069633
| 0.702772
| 0.598324
| 0.578981
| 0.509349
| 0.509349
| 0.469375
| 0
| 0.076526
| 0.398475
| 3,541
| 87
| 120
| 40.701149
| 0.651643
| 0.022592
| 0
| 0.444444
| 0
| 0
| 0.001447
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.027778
| 0
| 0.083333
| 0.013889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6315a99e2517f5c7110b8dd1b8d7574b184b340
| 6,198
|
py
|
Python
|
backend/ibutsu_server/tasks/db.py
|
john-dupuy/ibutsu-server
|
ae380fc7a72a4898075291bac8fdb86952bfd06a
|
[
"MIT"
] | null | null | null |
backend/ibutsu_server/tasks/db.py
|
john-dupuy/ibutsu-server
|
ae380fc7a72a4898075291bac8fdb86952bfd06a
|
[
"MIT"
] | null | null | null |
backend/ibutsu_server/tasks/db.py
|
john-dupuy/ibutsu-server
|
ae380fc7a72a4898075291bac8fdb86952bfd06a
|
[
"MIT"
] | null | null | null |
import time
from datetime import datetime
from datetime import timedelta
from bson import ObjectId
from bson.errors import InvalidId
from dynaconf import settings
from ibutsu_server.mongo import mongo
from ibutsu_server.tasks.queues import task
from ibutsu_server.tasks.results import add_result_start_time
from ibutsu_server.tasks.runs import update_run as update_run_task
from ibutsu_server.util import serialize
from kombu.exceptions import OperationalError
from pymongo import DESCENDING
from redis import Redis
from redis.exceptions import LockError
""" Tasks for DB related things"""
LOCK_EXPIRE = 1
@task
def create_runs_from_results():
# 1. get all the runs
runs_to_create = mongo.results.aggregate([{"$group": {"_id": "$metadata.run"}}])
# 2. loop over all the runs
for run_id in runs_to_create:
# first check if the run exists already
_id = run_id["_id"]
try:
if mongo.runs.find_one({"_id": ObjectId(_id)}):
continue
except InvalidId:
continue
run_dict = {
"_id": ObjectId(_id),
}
# 3. Create the run in Ibutsu
mongo.runs.insert_one(run_dict)
run_dict = serialize(run_dict)
# 4. Start the update task
update_run_task.apply_async((run_dict["id"],), countdown=5)
@task
def add_start_time_to_results():
""" Add the field 'start_time' to all the results. For this we create a task for each run. """
for run in mongo.runs.find(sort=[("start_time", DESCENDING)]):
run = serialize(run)
try:
add_result_start_time.apply_async((run["id"],), countdown=5)
except OperationalError:
pass
@task
def _add_project_metadata(run, project_id):
""" Update all runs and results to add the 'metadata.project' field"""
redis_client = Redis.from_url(settings["CELERY_BROKER_URL"])
try:
# Get a lock so that we don't run this task concurrently
with redis_client.lock(f"update-run-lock-{run['id']}", blocking_timeout=LOCK_EXPIRE):
# add project metadata to the run
if not run.get("metadata"):
run["metadata"] = {}
run["metadata"]["project"] = project_id
mongo.runs.replace_one({"_id": ObjectId(run["id"])}, run)
results = mongo.results.find(
{"metadata.run": run["id"], "metadata.project": {"$exists": False}}
)
for result in results:
result = serialize(result)
# add project metadata to the result
if not result.get("metadata"):
result["metadata"] = {}
result["metadata"]["project"] = project_id
mongo.results.replace_one({"_id": ObjectId(result["id"])}, result)
except LockError:
# If this task is locked, discard it so that it doesn't clog up the system
pass
@task
def add_project_metadata_to_objects(project_name="insights-qe"):
""" Add IQE Project Metadata to historical DB objects. """
project_id = serialize(mongo.projects.find_one({"name": project_name})).get("id")
if not project_id:
return
for run in mongo.runs.find(
{"metadata.project": {"$exists": False}}, sort=[("start_time", DESCENDING)]
):
run = serialize(run)
try:
_add_project_metadata.apply_async((run, project_id), countdown=5)
except OperationalError:
pass
@task
def _delete_old_files(filename, max_date):
""" Delete all files uploaded before the max_date """
try:
redis_client = Redis.from_url(settings["CELERY_BROKER_URL"])
if not isinstance(max_date, datetime):
max_date = datetime.fromisoformat(max_date)
try:
# Get a lock so that we don't run this task concurrently
with redis_client.lock(f"delete-file-lock-{filename}", blocking_timeout=LOCK_EXPIRE):
for file in mongo.fs.find({"filename": filename, "uploadDate": {"$lt": max_date}}):
mongo.fs.delete(file._id)
except LockError:
# If this task is locked, discard it so that it doesn't clog up the system
pass
except Exception:
# we don't want to continually retry this task
return
@task
def prune_old_files(months=5):
""" Delete artifact files older than specified months (here defined as 4 weeks). """
try:
if isinstance(months, str):
months = int(months)
if months < 2:
# we don't want to remove files more recent than 3 months
return
files_to_delete = ["traceback.log", "screenshot.png", "iqe.log"]
delta = timedelta(weeks=months * 4).total_seconds()
current_time = time.time()
timestamp_in_sec = current_time - delta
# get datetime obj
max_date = datetime.fromtimestamp(timestamp_in_sec)
# send out the tasks
for filename in files_to_delete:
try:
_delete_old_files.apply_async((filename, max_date), countdown=5)
except OperationalError:
pass
except Exception:
# we don't want to continually retry this task
return
@task
def delete_large_files(limit=256 * 1024):
""" Delete 'iqe.log' files larger than the limit, defaults to 256 KiB"""
try:
if isinstance(limit, str):
limit = int(limit)
if limit < (256 * 1024):
# we don't want to remove files smaller than 256 KiB
return
redis_client = Redis.from_url(settings["CELERY_BROKER_URL"])
try:
# Get a lock so that we don't run this task concurrently
with redis_client.lock(f"delete-file-lock-{limit}", blocking_timeout=LOCK_EXPIRE):
for file in mongo.fs.find({"length": {"$gt": limit}, "filename": "iqe.log"}):
mongo.fs.delete(file._id)
except LockError:
# If this task is locked, discard it so that it doesn't clog up the system
pass
except Exception:
# we don't want to continually retry this task
return
| 35.016949
| 99
| 0.62262
| 805
| 6,198
| 4.637267
| 0.21118
| 0.019287
| 0.012858
| 0.013394
| 0.361907
| 0.325475
| 0.302706
| 0.290383
| 0.266274
| 0.230378
| 0
| 0.007606
| 0.2788
| 6,198
| 176
| 100
| 35.215909
| 0.827517
| 0.203453
| 0
| 0.385246
| 0
| 0
| 0.085001
| 0.016092
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057377
| false
| 0.04918
| 0.122951
| 0
| 0.229508
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6338656305747e7dd588f6558bdad231c542786
| 830
|
py
|
Python
|
Estudos/namedtuple.py
|
Gbrvi/Python
|
02f0125c990f06ccb5cd705b4bf6ec5ecb6d1eab
|
[
"MIT"
] | null | null | null |
Estudos/namedtuple.py
|
Gbrvi/Python
|
02f0125c990f06ccb5cd705b4bf6ec5ecb6d1eab
|
[
"MIT"
] | null | null | null |
Estudos/namedtuple.py
|
Gbrvi/Python
|
02f0125c990f06ccb5cd705b4bf6ec5ecb6d1eab
|
[
"MIT"
] | null | null | null |
from collections import namedtuple
# É tipo um dicionario, é mais lento, mas é imutável!
#Jogador é a classe | #Atributos da classe
J = namedtuple('Jogador', ['nome', 'time', 'camisa', 'numero'])
j = J('Abel Hernadez', 'Flu', 99, 100) #Adicionando valores
j2 = J('Fred', 'Fluminense', 9, 157)
print(j2.nome)
#-------------------------------------------------------
# Nomes repetidos ou destinado ao python (def, class) são subtituidos se colocar o rename
P = namedtuple('Pessoa', ['nome', 'idade', 'def'], rename=True)
p = P('Carlos', 15, 'viano')
#output: Pessoa(nome='Carlos', idade=15, _2='viano')
#Default define um valor padrão, mas é nececssario que o primeiro valor "x" seja informado
L = namedtuple('valores', ['x', 'y', 'z'], defaults=(None, None))
l = L(2)
print(l)
| 31.923077
| 91
| 0.591566
| 110
| 830
| 4.454545
| 0.663636
| 0.016327
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025335
| 0.191566
| 830
| 26
| 92
| 31.923077
| 0.704918
| 0.477108
| 0
| 0
| 0
| 0
| 0.239402
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6342f9f6fc2f8be229cda6971a2b29ca77c7c7c
| 1,330
|
py
|
Python
|
src/decker/format/command.py
|
douglasfarinelli/pydev
|
9d43d485b102e5b44ee28894278ae496c3cec024
|
[
"MIT"
] | 21
|
2020-12-11T17:59:50.000Z
|
2022-03-12T02:22:09.000Z
|
src/decker/format/command.py
|
douglasfarinelli/decker
|
9d43d485b102e5b44ee28894278ae496c3cec024
|
[
"MIT"
] | null | null | null |
src/decker/format/command.py
|
douglasfarinelli/decker
|
9d43d485b102e5b44ee28894278ae496c3cec024
|
[
"MIT"
] | 2
|
2021-07-31T00:05:25.000Z
|
2021-11-04T12:09:26.000Z
|
import sys
from typing import List
import click
from decker.conf import Config
from decker.utils import print_done
from .pool import FormatterBackendPool
from .services import run_format
@click.option(
'-b',
'--backend',
type=click.Choice([backend.id for backend in FormatterBackendPool.all()]),
multiple=True,
help='Specify formatting backends.',
)
@click.option(
'-l',
'--line-length',
type=int,
default=79,
help='How many characters per line to allow.',
show_default=True,
)
@click.option(
'--exclude',
type=str,
default=None,
help='Files and directories that should be excluded on recursive searches.',
)
@click.argument(
'sources',
nargs=-1,
type=click.Path(
exists=True,
file_okay=True,
dir_okay=True,
readable=True,
allow_dash=True,
),
is_eager=True,
)
@click.command(name='format')
@click.pass_context
def format_command(
ctx: click.Context,
backend: List[str],
sources: List[str],
line_length: int,
exclude: str,
) -> None:
"""
Run code style format.
"""
config = Config.create(
ctx=ctx, sources=sources, line_length=line_length, exclude=exclude
)
run_format(
config,
backends=backend,
)
print_done()
sys.exit(0)
| 19
| 80
| 0.635338
| 162
| 1,330
| 5.12963
| 0.506173
| 0.048135
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003972
| 0.242857
| 1,330
| 69
| 81
| 19.275362
| 0.821251
| 0.016541
| 0
| 0.051724
| 0
| 0
| 0.140867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017241
| false
| 0.017241
| 0.12069
| 0
| 0.137931
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e63506be46724ae2661303db422a81cac16e9cfd
| 709
|
py
|
Python
|
seeds.py
|
hazzillrodriguez/Flaskdesk
|
16123f4d63c686a3332f3f91eda9bb3a8e2a3ed5
|
[
"MIT"
] | null | null | null |
seeds.py
|
hazzillrodriguez/Flaskdesk
|
16123f4d63c686a3332f3f91eda9bb3a8e2a3ed5
|
[
"MIT"
] | null | null | null |
seeds.py
|
hazzillrodriguez/Flaskdesk
|
16123f4d63c686a3332f3f91eda9bb3a8e2a3ed5
|
[
"MIT"
] | null | null | null |
from app import app, db
from app.models import Category, Priority, Status
from sqlalchemy.exc import SQLAlchemyError
category = 'Uncategorized'
priorities = ['Low', 'Medium', 'High', 'Urgent']
statuses = ['Open', 'Resolved', 'Pending', 'Closed']
def db_commit():
try:
db.session.commit()
print('Category, priorities, and statuses has been created.')
return True
except SQLAlchemyError:
result = str(SQLAlchemyError)
print(result)
return False
with app.app_context():
if db_commit():
for priority, status in zip(priorities, statuses):
db.session.add(Priority(priority=priority))
db.session.add(Status(status=status))
db.session.add(Category(category=category))
db.session.commit()
| 28.36
| 63
| 0.734838
| 90
| 709
| 5.755556
| 0.488889
| 0.086873
| 0.069498
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132581
| 709
| 25
| 64
| 28.36
| 0.842276
| 0
| 0
| 0.090909
| 0
| 0
| 0.153521
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.136364
| 0
| 0.272727
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e63871f321b5d3bb45b965cb63b221c456ac757e
| 2,527
|
py
|
Python
|
eval/plot.py
|
yhlleo/TriangleGAN
|
5bab76561e75145c2645a93e23d22abd3f66f329
|
[
"BSD-3-Clause"
] | 32
|
2019-07-15T11:11:57.000Z
|
2022-01-09T11:03:00.000Z
|
eval/plot.py
|
yhlleo/TriangleGAN
|
5bab76561e75145c2645a93e23d22abd3f66f329
|
[
"BSD-3-Clause"
] | null | null | null |
eval/plot.py
|
yhlleo/TriangleGAN
|
5bab76561e75145c2645a93e23d22abd3f66f329
|
[
"BSD-3-Clause"
] | 4
|
2019-07-17T09:00:14.000Z
|
2021-11-16T21:20:25.000Z
|
# plot prd scores
import os
import json
from matplotlib import pyplot as plt
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("json_files", nargs="*")
parser.add_argument("--output_fig", type=str, default='prd.png')
args = parser.parse_args()
def load_jsons(file_paths):
scores, labels = [], []
for json_file in file_paths:
with open(json_file) as f:
result = json.load(f)
scores.append(result["score"])
labels.append(result["label"])
return [[s["recall"], s["precision"]] for s in scores], labels
def plot(precision_recall_pairs, labels=None, out_path=None,
legend_loc='lower left', dpi=300):
"""Plots precision recall curves for distributions.
Creates the PRD plot for the given data and stores the plot in a given path.
Args:
precision_recall_pairs: List of prd_data to plot. Each item in this list is
a 2D array of precision and recall values for the
same number of ratios.
labels: Optional list of labels of same length as list_of_prd_data. The
default value is None.
out_path: Output path for the resulting plot. If None, the plot will be
opened via plt.show(). The default value is None.
legend_loc: Location of the legend. The default value is 'lower left'.
dpi: Dots per inch (DPI) for the figure. The default value is 150.
Raises:
ValueError: If labels is a list of different length than list_of_prd_data.
"""
if labels is not None and len(labels) != len(precision_recall_pairs):
raise ValueError(
'Length of labels %d must be identical to length of '
'precision_recall_pairs %d.'
% (len(labels), len(precision_recall_pairs)))
fig = plt.figure(figsize=(3.5, 3.5), dpi=dpi)
plot_handle = fig.add_subplot(111)
plot_handle.tick_params(axis='both', which='major', labelsize=12)
for i in range(len(precision_recall_pairs)):
precision, recall = precision_recall_pairs[i]
label = labels[i] if labels is not None else None
plt.plot(recall, precision, label=label, alpha=0.5, linewidth=3)
if labels is not None:
plt.legend(loc=legend_loc)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('Recall', fontsize=12)
plt.ylabel('Precision', fontsize=12)
plt.tight_layout()
plt.savefig(out_path, bbox_inches='tight', dpi=dpi)
plt.close()
if __name__ == '__main__':
precision_recall_pairs, labels = load_jsons(args.json_files)
plot(precision_recall_pairs, labels, args.output_fig)
| 37.716418
| 79
| 0.693708
| 385
| 2,527
| 4.402597
| 0.358442
| 0.097345
| 0.106195
| 0.040118
| 0.128024
| 0.037758
| 0
| 0
| 0
| 0
| 0
| 0.013386
| 0.20182
| 2,527
| 67
| 80
| 37.716418
| 0.826971
| 0.324891
| 0
| 0
| 0
| 0
| 0.107057
| 0.013158
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e63ab07fc8212736ff3ef91cca7ad9e31b8c2243
| 2,218
|
py
|
Python
|
data_output.py
|
adebraine/Time-Series-RNN
|
2e5ef0a222d84e15ed09141724fa437492c1466e
|
[
"MIT"
] | null | null | null |
data_output.py
|
adebraine/Time-Series-RNN
|
2e5ef0a222d84e15ed09141724fa437492c1466e
|
[
"MIT"
] | null | null | null |
data_output.py
|
adebraine/Time-Series-RNN
|
2e5ef0a222d84e15ed09141724fa437492c1466e
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import keras
def evaluate_model(model, split_sets):
training_error = model.evaluate(split_sets['X_train'], split_sets['y_train'], verbose=0)
print('training error = ' + str(training_error))
testing_error = model.evaluate(split_sets['X_test'], split_sets['y_test'], verbose=0)
print('testing error = ' + str(testing_error))
def output_plot(dataset, y, window_size, train_percent,
predictions):
if len(predictions) > 2:
train_split = int(np.ceil(len(y)*train_percent)) + window_size
valid_split = int(np.ceil(len(y)*((1-train_percent)/2))) + train_split
# plot original series
plt.plot(dataset, color='k')
# plot training set prediction
plt.plot(np.arange(window_size, train_split, 1),
predictions['train'], color='b')
# plot validation set prediction
plt.plot(np.arange(train_split, valid_split, 1),
predictions['valid'], color='g')
# plot testing set prediction
plt.plot(np.arange(valid_split, valid_split + len(predictions['test']), 1),
predictions['test'], color='r')
# pretty up graph
plt.xlabel('day')
plt.ylabel('(normalized) price')
plt.legend(['original series', 'training fit',
'Validation fit', 'testing fit'],
loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
else:
train_split = int(np.ceil(len(y)*train_percent)) + window_size
# plot original series
plt.plot(dataset, color='k')
# plot training set prediction
plt.plot(np.arange(window_size, train_split, 1),
predictions['train'], color='b')
# plot testing set prediction
plt.plot(np.arange(train_split, train_split + len(predictions['test']), 1),
predictions['test'], color='r')
# pretty up graph
plt.xlabel('day')
plt.ylabel('(normalized) price')
plt.legend(['original series', 'training fit',
'testing fit'],
loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
| 35.206349
| 92
| 0.595131
| 274
| 2,218
| 4.671533
| 0.248175
| 0.070313
| 0.0625
| 0.078125
| 0.691406
| 0.691406
| 0.633594
| 0.633594
| 0.538281
| 0.538281
| 0
| 0.009877
| 0.269612
| 2,218
| 62
| 93
| 35.774194
| 0.780247
| 0.098287
| 0
| 0.512821
| 0
| 0
| 0.126131
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051282
| false
| 0
| 0.076923
| 0
| 0.128205
| 0.051282
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e63b0d4192a6f56afdb4ff053aeafe21f3a6cf89
| 1,837
|
py
|
Python
|
vector_auto_regression.py
|
hotpxl/nebuchadnezzar
|
b26e0f19b9fdfeb8baa094e0f5ee2526cefb6409
|
[
"MIT"
] | 2
|
2015-05-20T18:02:40.000Z
|
2016-08-07T18:57:27.000Z
|
vector_auto_regression.py
|
hotpxl/nebuchadnezzar
|
b26e0f19b9fdfeb8baa094e0f5ee2526cefb6409
|
[
"MIT"
] | null | null | null |
vector_auto_regression.py
|
hotpxl/nebuchadnezzar
|
b26e0f19b9fdfeb8baa094e0f5ee2526cefb6409
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.4
import stats.data
import stats.plot
import stats.preprocess
import pandas
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates
import datetime
from statsmodels.tsa.api import VAR, DynamicVAR
sse_indices = stats.data.sse_indices()
for i in sse_indices:
d = stats.data.get_merged(i, 'date', 'volume', 'readCount')
# strip first few data points
d = d[2:]
for window_size in range(3, 10):
# window_size = 7
raw_volume = d[:, 1].astype(float)
volume = np.concatenate((np.zeros(window_size - 1,), stats.preprocess.sliding_ratio(raw_volume, window_size).astype(float)))
read_count = d[:, 2].astype(float)
data = pandas.DataFrame({'volume': volume, 'readCount': read_count})
data.index = pandas.DatetimeIndex(d[:, 0].astype(str))
model = VAR(data)
lag = model.select_order()['hqic']
length = data.values.shape[0]
print('using lag {}'.format(lag))
results = model.fit(lag)
# import IPython; IPython.embed()
prediction = [0] * (lag)
for j in range(lag, length):
prediction.append(results.forecast(data.values[j - lag: j], 1)[0][1])
pred = np.asarray(prediction).reshape((length, 1))
fig, ax = plt.subplots()
dates = list(map(lambda x: datetime.datetime.strptime(x, '%Y-%m-%d').date(), d[:, 0]))
ax.plot(dates, pred, 'r', label='forecast')
ax.plot(dates, volume, 'b', label='real')
ax.fmt_xdata = matplotlib.dates.DateFormatter('%Y-%m-%d')
fig.autofmt_xdate()
ax.set_ylabel('Volume')
ax.legend()
plt.show()
# plt.savefig('{}_{}.png'.format(i, window_size))
# stats.plot.twin_x(np.concatenate((d[:, 1].reshape((length, 1)), pred), axis=1))
# import IPython; IPython.embed()
| 37.489796
| 132
| 0.625476
| 253
| 1,837
| 4.462451
| 0.438735
| 0.044287
| 0.03543
| 0.044287
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014503
| 0.211758
| 1,837
| 48
| 133
| 38.270833
| 0.765193
| 0.140991
| 0
| 0
| 0
| 0
| 0.054777
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.243243
| 0
| 0.243243
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e63cd901a3e8b73ecbb160ecf9c349073434a2bf
| 2,086
|
py
|
Python
|
ArticleClassifierTF/src/data_models/weights/theme_weights.py
|
joduss/ArticleClassifier
|
38c0e168cdd74214b7f591c7cfc7b93fc496e46b
|
[
"Unlicense"
] | null | null | null |
ArticleClassifierTF/src/data_models/weights/theme_weights.py
|
joduss/ArticleClassifier
|
38c0e168cdd74214b7f591c7cfc7b93fc496e46b
|
[
"Unlicense"
] | null | null | null |
ArticleClassifierTF/src/data_models/weights/theme_weights.py
|
joduss/ArticleClassifier
|
38c0e168cdd74214b7f591c7cfc7b93fc496e46b
|
[
"Unlicense"
] | null | null | null |
from typing import Dict, List
from classifier.preprocessing.article_theme_tokenizer import ArticleThemeTokenizer
from data_models.ThemeStat import ThemeStat
class ThemeWeights:
theme_stats: List[ThemeStat]
theme_tokenizer: ArticleThemeTokenizer
def __init__(self, theme_stats: List[ThemeStat], theme_tokenizer: ArticleThemeTokenizer):
self.theme_stats = theme_stats
self.theme_tokenizer = theme_tokenizer
def weight_list(self) -> List[float]:
"""
Returns a list of weight for each theme, ordered by theme index.
"""
theme_weight: List[float] = list([])
#raise Exception("To review")
for theme in self.theme_tokenizer.orderedThemes:
stat = [stat for stat in self.theme_stats if stat.theme == theme][0]
theme_weight.append(stat.binary_weight_pos())
return theme_weight
def weights_of_theme(self, theme_idx: int) -> Dict[int, float]:
"""
Returns the weights for a theme under the form {0 : VAL_1, 1 : VAL_2}
:param theme_idx: index of the theme
"""
theme = self.theme_tokenizer.theme_at_index(theme_idx)
theme_stat = list(filter(lambda stat: stat.theme == theme, self.theme_stats))
if len(theme_stat) == 0:
raise Exception("Theme {} not found.".format(theme))
if len(theme_stat) > 1:
raise Exception("Theme {} found multiple times.".format(theme))
return {0 : theme_stat[0].binary_weight_neg(),
1 : theme_stat[0].binary_weight_pos()}
def weight_array(self) -> List[List[float]]:
theme_weight_array: List[List[float]] = []
# raise Exception("To review")
for theme in self.theme_tokenizer.orderedThemes:
stat = [stat for stat in self.theme_stats if stat.theme == theme][0]
theme_weight = [0,0]
theme_weight[0] = stat.binary_weight_neg()
theme_weight[1] = stat.binary_weight_pos()
theme_weight_array.append(theme_weight)
return theme_weight_array
| 32.59375
| 93
| 0.650527
| 264
| 2,086
| 4.912879
| 0.242424
| 0.069391
| 0.053971
| 0.037008
| 0.311488
| 0.277564
| 0.277564
| 0.188126
| 0.188126
| 0.188126
| 0
| 0.01025
| 0.251678
| 2,086
| 64
| 94
| 32.59375
| 0.820628
| 0.109779
| 0
| 0.121212
| 0
| 0
| 0.027192
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.090909
| 0
| 0.393939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e63cf8d084bbaa33179f664b68770d2a61c1830b
| 2,688
|
py
|
Python
|
installation_text.py
|
bryanrtboy/videoselector
|
6867c14ebb3f9ac563a2aa5533806ec4872a53e9
|
[
"MIT"
] | 1
|
2017-12-10T12:42:09.000Z
|
2017-12-10T12:42:09.000Z
|
installation_text.py
|
bryanrtboy/videoselector
|
6867c14ebb3f9ac563a2aa5533806ec4872a53e9
|
[
"MIT"
] | null | null | null |
installation_text.py
|
bryanrtboy/videoselector
|
6867c14ebb3f9ac563a2aa5533806ec4872a53e9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
from pssh import SSHClient, ParallelSSHClient, utils
import datetime
import time
import random
import sys
output = []
hosts = ['client0', 'client1', 'client2','client3', 'client4']
client = ParallelSSHClient(hosts)
values = ["bear","cake","fork","pipe","gun"]
def open_movies(my_values, delay):
choices = list(my_values)
for x in range(len(hosts)):
if x < len(hosts) - 1:
prompt = "Type "
for v in choices:
prompt += v + ", "
prompt = prompt[:-2]
prompt += " :"
choice = get_valid_input(prompt)
choices.remove(choice.lower())
open_movie(choice, x)
else:
choice = choices[0]
open_movie(choice, x)
print("wait {0} seconds".format(delay))
time.sleep(delay)
print("done waiting, back to the command and play idle movies on clients")
cmds = ["~/dbuscontrol.sh stop", "sleep 2", "omxplayer /mnt/usb/media/intro.mp4 --aspect-mode=stretch --loop"]
#run all the commands on all the clients
for cmd in cmds:
client.run_command(cmd, stop_on_errors=False)
#show a prompt to decide what to do next
next = raw_input("Hit return to continue or 'Q' to quit:")
if next == "Q":
print("quitting")
exit()
else:
open_movies()
def open_movie(choice, clientID) :
one_client = SSHClient(hosts[clientID])
num = random.randint(0,2)
command = "~/dbuscontrol.sh stop"
one_client.exec_command(command)
command = "omxplayer /mnt/usb/media/" + choice + "/mov_" + str(num) + ".mp4 --aspect-mode=stretch --loop"
one_client.exec_command(command)
print("Opening a " +choice+ " movie, number " + str(num) + " on " + hosts[clientID] + "!")
def get_valid_input(prompt):
while True:
data = raw_input(prompt)
#check if the entered word is in our list of values
if data.lower() not in values:
print("Not an appropriate choice.")
else:
break
return data
#if you need to get a response back from the client, use this functio
#instead of open_movies().
#Note with --loop argument in cmds, the process will never quit
#requires CTRL-C to end the process
def open_movies_wait_for_output():
cmds = ["omxplayer /mnt/usb/media/gun/mov_0.mp4 --aspect-mode=stretch --loop"]
start = datetime.datetime.now()
for cmd in cmds:
output.append(client.run_command(cmd, stop_on_errors=False))
end = datetime.datetime.now()
print("Started %s commands on %s host(s) in %s" % (
len(cmds), len(hosts), end-start,))
start = datetime.datetime.now()
for _output in output:
print("waiting for output")
client.join(_output)
print(_output)
end = datetime.datetime.now()
print("All commands finished in %s" % (end-start,))
if __name__ == "__main__":
open_movies(values, 15)
| 29.217391
| 111
| 0.679315
| 394
| 2,688
| 4.525381
| 0.393401
| 0.028043
| 0.042625
| 0.033651
| 0.171621
| 0.040381
| 0.040381
| 0.040381
| 0
| 0
| 0
| 0.008178
| 0.181176
| 2,688
| 91
| 112
| 29.538462
| 0.801908
| 0.124256
| 0
| 0.188406
| 0
| 0.014493
| 0.247656
| 0.04902
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057971
| false
| 0
| 0.072464
| 0
| 0.144928
| 0.130435
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e63d83d29b28004d4dc6e59ec720b1e34cdc3bc7
| 3,744
|
py
|
Python
|
poi/cache.py
|
jchluo/poi
|
6892d3e219ee2b841053a41d308887a5e6b60017
|
[
"Apache-2.0"
] | 10
|
2016-01-11T09:24:38.000Z
|
2021-07-20T06:40:15.000Z
|
poi/cache.py
|
jchluo/poi
|
6892d3e219ee2b841053a41d308887a5e6b60017
|
[
"Apache-2.0"
] | 1
|
2018-04-10T04:48:18.000Z
|
2018-04-10T04:48:18.000Z
|
poi/cache.py
|
jchluo/poi
|
6892d3e219ee2b841053a41d308887a5e6b60017
|
[
"Apache-2.0"
] | 8
|
2016-01-11T09:24:56.000Z
|
2020-04-23T08:25:53.000Z
|
# -*- coding: utf-8 -*-
"""Cache Recommender.
dump : run topN predict item for each user, and
dump them to file like object(disk file or memory).
load : recover from file like object, return CacheRecommender.
Note that this recommender just a tiny version of the original one,
which can only predict topN (stored in file) items.
usage:
>>> class M(object):
... def __init__(self):
... self.num_users = 1
... self.num_items = 3
... self.checkins = {0: {0:1}}
... self.name = "Test"
... def predict(self, u, i):
... return 1.0 * i
usage dump:
>>> from StringIO import StringIO
>>> f = StringIO()
>>> md = M()
>>> dump(md, f, attrs=["name"], num_pool=0)
usage load
>>> f.seek(0)
>>> cr = load(f)
>>> print cr.predict(0, 2)
2.0
>>> print cr.name
Test
"""
import time
import json
import logging
import numpy as np
from .utils import threads
from .models import Recommender
log = logging.getLogger(__name__)
__all__ = ["Recommender", "Evaluation"]
class CacheRecommender(Recommender):
"""Cache File Recommender.
"""
def __init__(self):
self.checkins = {}
self._data = {}
self._meta = {}
def __getattr__(self, attr):
if attr == "_meta":
raise AttributeError()
if attr in self._meta:
return self._meta[attr]
raise AttributeError("attribute: %s Not Found." % attr)
def __repr__(self):
return "<Cache %s>" % self._meta["__repr__"][1: -1]
def predict(self, user, item):
return self._data.get(user, {}).get(item, -10 * 10)
def _proxy_predict(arg):
model, i, num = arg
scores = [(j, model.predict(i, j)) for j in xrange(model.num_items)\
if j not in model.checkins[i]]
scores.sort(key=lambda x: x[1], reverse=True)
return [i, scores[: num]]
def dump(model, fp, num=1000, attrs=None, num_pool=4):
"""Dump predict record to file.
fp: file pointer like object,
num: top num item and its score will be stored,
other item will be abandoned.
attrs: list like, the attributes want to be stored,
num_items and num_users will auto stored.
num_pool: number of threads, 0 will turn off multiple threads.
"""
if model is None:
raise ValueError("model is None.")
t0 = time.time()
args = [(model, i, num) for i in xrange(model.num_users)]
if num_pool > 0:
results = threads(_proxy_predict, args, num_pool)
else:
results = [_proxy_predict(arg) for arg in args]
meta = {}
# write attributes
if attrs is None:
attrs = ["num_users", "num_items"]
else:
attrs = list(attrs)
attrs.extend(["num_users", "num_items"])
attrs = set(attrs)
for attr in attrs:
if not hasattr(model, attr):
raise AttributeError("attribute: %s Not Found." % attr)
meta[attr] = getattr(model, attr)
# write __repr__
meta["__repr__"] = str(model)
print >> fp, json.dumps(meta)
# write recoreds
for one in results:
print >> fp, json.dumps(one)
t1 = time.time()
log.debug("dump ok, time: %.2fs" % (t1 - t0))
def load(fp):
"""Reture a cacherecommender, which is the tiny version of the
original one.
fp: file like object.
"""
cr = CacheRecommender()
# meta
cr._meta = json.loads(fp.readline())
# recoreds
for line in fp:
rd = json.loads(line.strip())
user = int(rd[0])
scores = rd[1]
cr._data[user] = {}
for l, s in scores:
cr._data[user][int(l)] = float(s)
return cr
| 27.328467
| 75
| 0.576656
| 501
| 3,744
| 4.175649
| 0.311377
| 0.01912
| 0.020076
| 0.015296
| 0.068834
| 0.068834
| 0.043021
| 0.043021
| 0
| 0
| 0
| 0.013248
| 0.294338
| 3,744
| 136
| 76
| 27.529412
| 0.778577
| 0.344017
| 0
| 0.060606
| 0
| 0
| 0.075288
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106061
| false
| 0
| 0.090909
| 0.030303
| 0.287879
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e63da7efdb0e189e1a9e15a53af922678e7b6e0e
| 2,335
|
py
|
Python
|
p2p/protocol.py
|
teotoplak/trinity
|
6c67b5debfb94f74d0162c70f92ae3d13918b174
|
[
"MIT"
] | null | null | null |
p2p/protocol.py
|
teotoplak/trinity
|
6c67b5debfb94f74d0162c70f92ae3d13918b174
|
[
"MIT"
] | null | null | null |
p2p/protocol.py
|
teotoplak/trinity
|
6c67b5debfb94f74d0162c70f92ae3d13918b174
|
[
"MIT"
] | null | null | null |
import logging
from typing import (
Any,
Sequence,
Tuple,
Type,
)
from eth_utils.toolz import accumulate
from p2p.abc import (
CommandAPI,
ProtocolAPI,
TransportAPI,
)
from p2p.constants import P2P_PROTOCOL_COMMAND_LENGTH
from p2p.typing import Capability
class BaseProtocol(ProtocolAPI):
logger = logging.getLogger('p2p.protocol.Protocol')
def __init__(self,
transport: TransportAPI,
command_id_offset: int,
snappy_support: bool) -> None:
self.transport = transport
self.command_id_offset = command_id_offset
self.snappy_support = snappy_support
self.command_id_by_type = {
command_type: command_id_offset + command_type.protocol_command_id
for command_type
in self.commands
}
self.command_type_by_id = {
command_id: command_type
for command_type, command_id
in self.command_id_by_type.items()
}
def __repr__(self) -> str:
return "(%s, %d)" % (self.name, self.version)
@classmethod
def supports_command(cls, command_type: Type[CommandAPI[Any]]) -> bool:
return command_type in cls.commands
@classmethod
def as_capability(cls) -> Capability:
return (cls.name, cls.version)
def get_command_type_for_command_id(self, command_id: int) -> Type[CommandAPI[Any]]:
return self.command_type_by_id[command_id]
def send(self, command: CommandAPI[Any]) -> None:
message = command.encode(self.command_id_by_type[type(command)], self.snappy_support)
self.transport.send(message)
def get_cmd_offsets(protocol_types: Sequence[Type[ProtocolAPI]]) -> Tuple[int, ...]:
"""
Computes the `command_id_offsets` for each protocol. The first offset is
always P2P_PROTOCOL_COMMAND_LENGTH since the first protocol always begins
after the base `p2p` protocol. Each subsequent protocol is the accumulated
sum of all of the protocol offsets that came before it.
"""
return tuple(accumulate(
lambda prev_offset, protocol_class: prev_offset + protocol_class.command_length,
protocol_types,
P2P_PROTOCOL_COMMAND_LENGTH,
))[:-1] # the `[:-1]` is to discard the last accumulated offset which is not needed
| 31.986301
| 93
| 0.677088
| 289
| 2,335
| 5.207612
| 0.304498
| 0.083721
| 0.043189
| 0.047841
| 0.075083
| 0.037209
| 0.037209
| 0
| 0
| 0
| 0
| 0.00564
| 0.240685
| 2,335
| 72
| 94
| 32.430556
| 0.843204
| 0.151606
| 0
| 0.037736
| 0
| 0
| 0.014849
| 0.010753
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132075
| false
| 0
| 0.113208
| 0.075472
| 0.377358
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e63f1e8cde7eb9bc19101fd61c76b84d56a931e5
| 6,314
|
py
|
Python
|
soocii_services_lib/tokens.py
|
jonascheng/services-lib
|
5345be2ddeab8bbdbeccbc2bcbecf3202163d0bc
|
[
"Apache-2.0"
] | null | null | null |
soocii_services_lib/tokens.py
|
jonascheng/services-lib
|
5345be2ddeab8bbdbeccbc2bcbecf3202163d0bc
|
[
"Apache-2.0"
] | 5
|
2017-11-23T08:24:09.000Z
|
2018-12-25T04:42:48.000Z
|
soocii_services_lib/tokens.py
|
jonascheng/services-lib
|
5345be2ddeab8bbdbeccbc2bcbecf3202163d0bc
|
[
"Apache-2.0"
] | 3
|
2017-06-28T07:54:40.000Z
|
2018-12-25T04:44:42.000Z
|
import binascii
import json
import time
import jsonschema
from .crypter import AESCipher
from .exceptions import AccessTokenValidationError, RefreshTokenValidationError, TokenExpiredError, TokenSchemaError
class BaseToken(dict):
_schema = {}
def is_valid(self, age=None, raise_exception=False):
try:
jsonschema.validate(self, self._schema)
if age and ('timestamp' not in self or self['timestamp'] + age < int(time.time())):
msg = 'timestamp {} is expired'.format(self.get("timestamp"))
raise TokenExpiredError(msg)
except jsonschema.exceptions.ValidationError as e:
if raise_exception:
raise TokenSchemaError(str(e))
except TokenExpiredError:
if raise_exception:
raise
else:
return True
return False
class AccessToken(BaseToken):
ROLE_USER = 'user'
ROLE_BACKSTAGE = 'backstage'
ROLE_SERVICE = 'service'
_schema = {
'definitions': {
'basic': {
'type': 'object',
'properties': {
'timestamp': {
'type': 'integer'
}
}
},
ROLE_USER: {
'type': 'object',
'properties': {
'role': {
'type': 'string',
'enum': [ROLE_USER]
},
'pid': {
'type': 'string'
},
'id': {
'type': 'integer'
},
'soocii_id': {
'type': 'string'
},
'uid': {
'type': 'string',
'pattern': '^[0-9a-fA-F]{32}$'
}
},
'required': ['pid', 'id', 'soocii_id', 'uid']
},
ROLE_BACKSTAGE: {
'type': 'object',
'properties': {
'role': {
'type': 'string',
'enum': [ROLE_BACKSTAGE]
},
'id': {
'type': 'integer'
}
},
'required': ['id']
},
ROLE_SERVICE: {
'type': 'object',
'properties': {
'role': {
'type': 'string',
'enum': [ROLE_SERVICE]
},
'name': {
'type': 'string'
}
},
'required': ['name']
},
},
'allOf': [
{
'#ref': '#/definitions/basic'
},
{
'oneOf': [
{
'$ref': '#/definitions/user'
}, {
'$ref': '#/definitions/backstage'
}, {
'$ref': '#/definitions/service'
}
]
}
],
'required': ['role', 'timestamp']
}
@property
def role(self):
return self.get('role')
def is_role(self, role):
return self.role == role
class RefreshToken(BaseToken):
_schema = {
'type': 'object',
'properties': {
'timestamp': {
'type': 'integer'
},
'access_token': {
'type': 'string'
}
},
'required': ['timestamp', 'access_token']
}
class AccessTokenCryper(object):
age = 43200
exception = AccessTokenValidationError
_token_cls = AccessToken
def __init__(self, key, age=None):
key = binascii.unhexlify(key)
self.cipher = AESCipher(key)
if age:
self.age = age
def _encode(self, raw):
if isinstance(raw, str):
raw = raw.encode('utf-8')
return self.cipher.encrypt(raw)
def _decode(self, data):
# convert the pre-defined secret from hex string.
if isinstance(data, str):
data = data.encode('utf-8')
return self.cipher.decrypt(data)
def dumps(self, data=None, **kwargs):
"""
Generate token from encrypting the given data and keyword arguments. data should be a dict
"""
if not isinstance(data, dict):
data = {}
data.update(kwargs)
# append timestamp
data.update(timestamp=int(time.time()))
token = self._token_cls(data)
token.is_valid(raise_exception=True)
return self._encode(json.dumps(token))
def loads(self, token, valid_age=True):
"""
Load and decrypt token
"""
try:
token = self._token_cls(json.loads(self._decode(token).decode('utf-8')))
token.is_valid(self.age if valid_age else None, raise_exception=True)
except ValueError:
raise self.exception('invalid token format')
return token
def _get_specific_token(role):
def _wrapper(self, **kwargs):
mandatory_keys = self._token_cls._schema['definitions'][role]['required']
if any(k not in kwargs for k in mandatory_keys):
msg = '{} are required'.format(', '.join(mandatory_keys))
raise TokenSchemaError(msg)
kwargs['role'] = role
return self.dumps(kwargs).decode('utf-8')
return _wrapper
_get_user_token = _get_specific_token(_token_cls.ROLE_USER)
get_backstage_token = _get_specific_token(_token_cls.ROLE_BACKSTAGE)
get_service_token = _get_specific_token(_token_cls.ROLE_SERVICE)
def get_user_token(self, **kwargs):
if 'lang' not in kwargs:
kwargs['lang'] = 'EN-US'
return self._get_user_token(**kwargs)
class RefreshTokenCryper(AccessTokenCryper):
age = 604800
exception = RefreshTokenValidationError
_token_cls = RefreshToken
def get_token(self, access_token):
return self.dumps({'access_token': access_token}).decode('utf-8')
| 28.062222
| 116
| 0.464365
| 531
| 6,314
| 5.354049
| 0.246704
| 0.028139
| 0.035174
| 0.025325
| 0.125572
| 0.125572
| 0.079142
| 0.044319
| 0
| 0
| 0
| 0.005472
| 0.421128
| 6,314
| 224
| 117
| 28.1875
| 0.772367
| 0.02835
| 0
| 0.20904
| 0
| 0
| 0.119763
| 0.007229
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.033898
| 0.016949
| 0.282486
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e648ade42231ae7382e8ffb8232ee7fd02bab1ce
| 6,060
|
py
|
Python
|
software/camera-imu/tools/imu_driver_alt.py
|
MomsFriendlyRobotCompany/mjolnir
|
76f53e8e650ba1051b5f14e94ff2a9a283158da4
|
[
"MIT"
] | 1
|
2020-08-17T04:36:14.000Z
|
2020-08-17T04:36:14.000Z
|
software/camera-imu/tools/imu_driver_alt.py
|
MomsFriendlyRobotCompany/mjolnir
|
76f53e8e650ba1051b5f14e94ff2a9a283158da4
|
[
"MIT"
] | null | null | null |
software/camera-imu/tools/imu_driver_alt.py
|
MomsFriendlyRobotCompany/mjolnir
|
76f53e8e650ba1051b5f14e94ff2a9a283158da4
|
[
"MIT"
] | 1
|
2021-04-06T08:26:03.000Z
|
2021-04-06T08:26:03.000Z
|
from serial import Serial
import struct
from math import log10, sin, cos, acos, atan2, asin, pi, sqrt
import time
from collections import namedtuple
from colorama import Fore
# agmpt_t = namedtuple("agmpt_t", "accel gyro mag pressure temperature timestamp")
# ImageIMU = namedtuple("ImageIMU","image accel gyro temperature timestamp")
AccelGyroMag = namedtuple("AccelGyroMag", "ax ay az gx gy gz mx my mz")
TempPress = namedtuple("TempPress", "temperature pressure")
Light = namedtuple("Light", "lux")
c2f = lambda t: t*9/5+32
class cAccelGyroMag:
"""
Accel: g's
Gyro: rads/sec
Mag: uT
"""
header = 0xfd
unpack = struct.Struct("<9f").unpack
length = 9*4
def astuple(self, data):
return AccelGyroMag(*self.unpack(data))
class cAccelGyro:
header = 0xfe
unpack = struct.Struct("<6f").unpack
length = 6*4
def astuple(self, data):
raise NotImplementedError()
class cMag:
header = 0xfc
unpack = struct.Struct("<3f").unpack
length = 3*4
def astuple(self, data):
raise NotImplementedError()
class cTempPress:
"""
Temperature: C
Pressure: hPa
"""
header = 0xfb
unpack = struct.Struct("<ff").unpack
length = 2*4
def astuple(self, data):
return TempPress(*self.unpack(data))
class cLight:
header = 0xf9
unpack = struct.Struct("f").unpack
length = 1*4
def astuple(self, data):
return Light(*self.unpack(data))
class cIRCamera:
header = 0xf8
unpack = struct.Struct(f"<{32*24}f").unpack
length = 32*24*4
def astuple(self, data):
raise NotImplementedError()
Key = {
cAccelGyroMag.header: cAccelGyroMag(),
cAccelGyro.header: cAccelGyro(),
cMag.header: cMag(),
cTempPress.header: cTempPress(),
cLight.header: cLight(),
cIRCamera.header: cIRCamera(),
}
class Parser:
"""
[0xFF,0xFF]: start
0xFE: accel, gyro
0xFD: accel, gyro, mag
0xFC: mag
0xFB: temperature, pressure
0xFA:
0xF9: light
0xF8: MLX90640 IR camera
0xF7-0xF1: unused
0xF0: position, velocity, quaternion
[0xEE,0xEE]: end
"""
header = b"\xff"
ender = b"\xee"
def decode(self, data):
# print(f"{Fore.CYAN}[{len(data)}]{Fore.YELLOW}{data}{Fore.RESET}", flush=True)
if data[-2:] != b"\xee\xee":
print(f"{Fore.RED} ERROR: wrong message ending: {data[-2:]}{Fore.RESET}")
return None
size = len(data)
i = 0
ret = []
while True:
try:
k = data[i]
parse = Key[k]
except Exception as e:
print(e)
print(f"{Fore.RED}** Invalid key: {hex(data[i])}{Fore.RESET}")
return ret
i += 1 # header
if 0:
d = parse.unpack(data[i:i+parse.length])
ret += d
else:
d = parse.astuple(data[i:i+parse.length])
ret.append(d)
i += parse.length # message length
if i == size-2: # \xee\xee
break
return ret
class IMUDriver:
__slots__ = ["s", "decoder"]
def __init__(self, port):
# speed = 115200
speed = 1000000
self.s = Serial(port, speed, timeout=0.005)
self.decoder = Parser()
print(f">> IMUDriver opened {port}@{speed}")
def close(self):
self.s.close()
def read(self, cmd=b'g'):
"""
Return: array of data or None
"""
self.s.reset_input_buffer()
self.s.write(cmd)
bad = True
while self.s.out_waiting > 0:
time.sleep(0.001)
while self.s.in_waiting < 10:
# print(".", end="", flush=True)
time.sleep(0.001)
# print(" ")
a = self.s.read(1)
b = self.s.read(1)
success = False
for _ in range(8):
if a == b"\xff" and b == b"\xff":
success = True
break
time.sleep(0.001)
a = b
b = self.s.read(1)
if not success:
print(f"{Fore.RED}** failed header **{Fore.RESET}")
time.sleep(0.001)
self.s.flushInput()
return None
data_size = ord(self.s.read(1))
# print(f">> {Fore.BLUE}data size:{Fore.RESET} {data_size}", flush=True)
data = self.s.read(data_size)
ret = self.decoder.decode(data)
ret.append(time.time())
return ret
def compensate(self, accel, mag=None):
"""
"""
try:
ax, ay, az = normalize3(*accel)
pitch = asin(-ax)
if abs(pitch) >= pi/2:
roll = 0.0
else:
roll = asin(ay/cos(pitch))
if mag:
# mx, my, mz = mag
mx, my, mz = normalize3(*mag)
x = mx*cos(pitch)+mz*sin(pitch)
y = mx*sin(roll)*sin(pitch)+my*cos(roll)-mz*sin(roll)*cos(pitch)
heading = atan2(y, x)
# wrap heading between 0 and 360 degrees
if heading > 2*pi:
heading -= 2*pi
elif heading < 0:
heading += 2*pi
else:
heading = None
# if self.angle_units == Angle.degrees:
# roll *= RAD2DEG
# pitch *= RAD2DEG
# heading *= RAD2DEG
# elif self.angle_units == Angle.quaternion:
# return Quaternion.from_euler(roll, pitch, heading)
return (roll, pitch, heading,)
except ZeroDivisionError as e:
print('Error', e)
# if self.angle_units == Angle.quaternion:
# return Quaternion(1, 0, 0, 0)
# else:
return (0.0, 0.0, 0.0,)
def height(self, p):
"""
given pressure in hPa, returns altitude in meters.
"""
h = (1 - pow(p / 1013.25, 0.190263)) * 44330.8
return h
| 25.897436
| 87
| 0.518317
| 724
| 6,060
| 4.308011
| 0.290055
| 0.019237
| 0.034626
| 0.028855
| 0.126643
| 0.110292
| 0.073421
| 0.030779
| 0
| 0
| 0
| 0.039786
| 0.35297
| 6,060
| 233
| 88
| 26.008584
| 0.755675
| 0.179703
| 0
| 0.184932
| 0
| 0
| 0.067708
| 0.010208
| 0
| 0
| 0.005
| 0
| 0
| 1
| 0.082192
| false
| 0
| 0.041096
| 0.020548
| 0.39726
| 0.041096
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e64b61756e2c5141a88d05ce00a52ea06f0af2cf
| 1,718
|
py
|
Python
|
main.py
|
hwangseonu/pokeka
|
39e56c59dfc85a0c73232ac9105766ef060aa90e
|
[
"MIT"
] | 1
|
2021-06-01T05:26:48.000Z
|
2021-06-01T05:26:48.000Z
|
main.py
|
hwangseonu/pokeka
|
39e56c59dfc85a0c73232ac9105766ef060aa90e
|
[
"MIT"
] | null | null | null |
main.py
|
hwangseonu/pokeka
|
39e56c59dfc85a0c73232ac9105766ef060aa90e
|
[
"MIT"
] | null | null | null |
import base64
import svgwrite
import svgwrite.container
import svgwrite.shapes
import svgwrite.image
import bs4
import os
from urllib.request import urlopen
from selenium import webdriver
index = 0
code = input('덱 코드를 입력하세요.> ')
os.mkdir(code)
url = 'https://pokemoncard.co.kr/recipe/search?code=' + code
driver = webdriver.PhantomJS('phantomjs.exe')
driver.implicitly_wait(5)
driver.get(url)
soup = bs4.BeautifulSoup(driver.page_source, 'lxml')
card_items = soup.select(f'#show-card-detail-{code} .card-item')
card_list = []
for item in card_items:
cnt = item.select_one('.count')
cnt = int(cnt.text)
for i in range(cnt):
img = item.select_one('img')
card_list.append(img['src'])
pages = (len(card_list) // 9) + 1 if len(card_list) % 9 != 0 else 0
start_x, start_y = 10.5, 16.5
for p in range(0, pages):
x, y = 0, 0
path = os.path.join(code, f'card{p + 1}.svg')
dwg = svgwrite.Drawing(path, size=('210mm', '297mm'))
background = svgwrite.container.Group()
background.add(svgwrite.shapes.Rect(size=('210mm', '297mm'), fill='#ffe659'))
dwg.add(background)
cards_group = svgwrite.container.Group()
for i in range(0, 9):
index = p * 9 + i
if index >= len(card_list):
break
image = urlopen(card_list[index]).read()
cards_group.add(svgwrite.image.Image(
href='data:image/png;base64,' + base64.b64encode(image).decode(),
width='63mm', height='88mm',
x=str(start_x + (63 * x))+'mm', y=str(start_y + (88 * y))+'mm')),
x += 1
if x >= 3:
x = 0
y += 1
if y >= 3:
continue
dwg.add(cards_group)
dwg.save()
| 24.542857
| 81
| 0.610594
| 251
| 1,718
| 4.103586
| 0.422311
| 0.046602
| 0.032039
| 0.021359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044106
| 0.234575
| 1,718
| 69
| 82
| 24.898551
| 0.739164
| 0
| 0
| 0
| 0
| 0
| 0.115832
| 0.026775
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.173077
| 0
| 0.173077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e64d3c1360f948a0e4e91a1e5bc77802db0ff7e0
| 2,148
|
py
|
Python
|
synthesis/paramGen/testcase2.py
|
hyunynim/DIST-Renderer
|
4717ee8cea77f4f413b61f380a893c6800d0bde5
|
[
"MIT"
] | 176
|
2020-06-11T19:16:33.000Z
|
2022-03-29T01:38:28.000Z
|
synthesis/paramGen/testcase2.py
|
hyunynim/DIST-Renderer
|
4717ee8cea77f4f413b61f380a893c6800d0bde5
|
[
"MIT"
] | 6
|
2020-06-26T05:26:56.000Z
|
2021-11-10T07:31:21.000Z
|
synthesis/paramGen/testcase2.py
|
hyunynim/DIST-Renderer
|
4717ee8cea77f4f413b61f380a893c6800d0bde5
|
[
"MIT"
] | 23
|
2020-06-11T21:43:03.000Z
|
2022-02-18T00:16:16.000Z
|
'''
2019-08-07 00:01
Method:
20 x 5 grid over (camera x lighting)
'''
VIEW_NUM, LIGHTING_NUM = 20, 5
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from src.param_decomposer import AllParams
def generate_params(shape_list, randomizer):
nowpath = os.path.dirname(os.path.abspath(__file__))
basepath = os.path.dirname(nowpath)
folder = os.path.join(basepath, 'output', os.path.splitext(os.path.basename(__file__))[0])
all_params_list = []
shape_list = shape_list[:5] # take only five for testing.
print('generating rendering params...')
from tqdm import tqdm
for shape in tqdm(shape_list):
view_cfg, light_cfg, truncparam_cfg, cropbg_param_cfg, fname_cfg = [], [], [], [], []
# generate cameras and lights
camera_list, lighting_list = [], []
for idx in range(VIEW_NUM):
view = randomizer.randomize_view()
truncparam = randomizer.randomize_truncparam()
camera_list.append((view, truncparam))
for idx in range(LIGHTING_NUM):
lighting_list.append(randomizer.randomize_lighting())
counter = 0
for j1 in range(VIEW_NUM): # 10 cameras (views and truncparams)
camera = camera_list[j1]
view, truncparam = camera[0], camera[1]
for j2 in range(LIGHTING_NUM): # 10 lighting condtions and bg
lighting = lighting_list[j2]
cropbg_param = randomizer.randomize_cropbg_param()
# to append info to the list.
view_cfg.append(view)
light_cfg.append(lighting)
truncparam_cfg.append(truncparam)
cropbg_param_cfg.append(cropbg_param)
fname = os.path.join(shape.shape_md5, shape.shape_md5 + '_{0:08d}.png'.format(counter))
fname_cfg.append(fname)
counter = counter + 1
# to append all_params
all_params = AllParams(shape, view_cfg, light_cfg, truncparam_cfg, cropbg_param_cfg, fname_cfg)
all_params_list.append(all_params)
return folder, all_params_list
| 39.777778
| 103
| 0.640596
| 272
| 2,148
| 4.8125
| 0.297794
| 0.045837
| 0.039725
| 0.034377
| 0.132162
| 0.122231
| 0.122231
| 0.076394
| 0.076394
| 0.076394
| 0
| 0.023183
| 0.256983
| 2,148
| 53
| 104
| 40.528302
| 0.796992
| 0.107542
| 0
| 0
| 0
| 0
| 0.025237
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.078947
| 0
| 0.131579
| 0.026316
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e64ec15e4f7b983862625b28f909feef4c9e7bb4
| 3,894
|
py
|
Python
|
pygacal/camera/__init__.py
|
ereide/pyga-camcal
|
fd25748ddb11c5b05ef24a2deca2689e0d899875
|
[
"MIT"
] | 5
|
2018-05-22T09:11:31.000Z
|
2022-03-11T02:32:01.000Z
|
pygacal/camera/__init__.py
|
ereide/pyga-camcal
|
fd25748ddb11c5b05ef24a2deca2689e0d899875
|
[
"MIT"
] | null | null | null |
pygacal/camera/__init__.py
|
ereide/pyga-camcal
|
fd25748ddb11c5b05ef24a2deca2689e0d899875
|
[
"MIT"
] | null | null | null |
from clifford import g3c
import numpy as np
import scipy.optimize as opt
from pygacal.rotation.costfunction import restrictedImageCostFunction, restrictedMultiViewImageCostFunction
from pygacal.rotation import minimizeError
from pygacal.rotation.mapping import BivectorLineImageMapping, BivectorLineMapping, LinePropertyBivectorMapping, BivectorLineEstimationMapping
from pygacal.common.cgatools import Sandwich, Dilator, Translator, Reflector, inversion, Rotor, Transversor, I3, I5, VectorEquality, anticommuter, ga_exp, Meet
#Defining variables
layout = g3c.layout
locals().update(g3c.blades)
ep, en, up, down, homo, E0, ninf, no = (g3c.stuff["ep"], g3c.stuff["en"],
g3c.stuff["up"], g3c.stuff["down"], g3c.stuff["homo"],
g3c.stuff["E0"], g3c.stuff["einf"], -g3c.stuff["eo"])
class SLAM(object):
def __init__(self, model_estimate, lines_img_base, lines_imgs, R_start = None, mapping = BivectorLineImageMapping):
self.mapping = mapping
self.model_estimate = model_estimate
self.lines_img_base = lines_img_base
self.lines_imgs = lines_imgs
assert(len(lines_imgs[0]) == len(model_estimate))
if R_start is None:
self.R_estimate = [None for _ in range(len(lines_imgs))]
else:
assert(len(R_start) == len(lines_imgs))
self.R_estimate = R_start
def cost(self):
cost = sum([self.mapping.costfunction(self.R_estimate[i], self.model_estimate, self.lines_imgs[i]) for i in range(len(self.lines_imgs))])
return cost/len(self.lines_imgs)
def updateLocation(self):
print("Update Location")
for i in range(len(self.lines_imgs)):
args = (self.model_estimate, self.lines_imgs[i])
if (self.R_estimate[i] is None):
x0 = None
else:
x0 = self.mapping.inverserotorconversion(self.R_estimate[i])
R_min, N_int = minimizeError(args, self.mapping, x0 = x0)
self.R_estimate[i] = R_min
print("N_int = ", N_int)
print("Complete: Update location")
def addImage(self, lines_img_new, R_img_new = None):
self.lines_imgs.append(lines_img_new)
self.R_estimate.append(R_img_new)
def improveLine(self, i, O1 = up(0)):
line_guesses = []
R_B = self.R_estimate[ 0 ]
Line_A = self.lines_img_base[i]
Line_B = self.lines_imgs[0][i]
P_A = (O1 ^ Line_A).normal()
P_B = (R_B * (O1 ^ Line_B) * ~R_B).normal()
new_line = Meet(P_A, P_B)
line_guesses.append(new_line)
for j in range(1, len(self.R_estimate)):
R_A = self.R_estimate[j-1]
R_B = self.R_estimate[ j ]
Line_A = self.lines_imgs[j-1][i]
Line_B = self.lines_imgs[ j ][i]
P_A = (R_A * (O1 ^ Line_A) * ~R_A).normal()
P_B = (R_B * (O1 ^ Line_B) * ~R_B).normal()
new_line = Meet(P_A, P_B)
line_guesses.append(new_line)
for guess in line_guesses:
print("guess ", guess)
print("model ", self.model_estimate[i], "\n")
return self.averageLines(self.model_estimate[i], line_guesses)
def averageLines(self, line_start, line_guesses):
mapping = BivectorLineEstimationMapping
args = [line_start, line_guesses]
x0 = np.random.normal(0.01, size=6)
R_min, Nint = minimizeError(args, mapping, x0 = x0)
return R_min * line_start * ~R_min
def updateModel(self):
if any(self.R_estimate) is None:
self.updateLocation()
print("Update Model ")
for i in range(len(self.model_estimate)):
self.model_estimate[i] = self.improveLine(i)
print("Complete: model update")
| 34.460177
| 159
| 0.612994
| 511
| 3,894
| 4.455969
| 0.223092
| 0.059289
| 0.068511
| 0.024594
| 0.164251
| 0.151076
| 0.110672
| 0.083443
| 0.059728
| 0.059728
| 0
| 0.013432
| 0.273498
| 3,894
| 112
| 160
| 34.767857
| 0.791446
| 0.004623
| 0
| 0.103896
| 0
| 0
| 0.03125
| 0
| 0
| 0
| 0
| 0
| 0.025974
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.233766
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6543ff7671521504ac838b1689dbe9bfbccaca2
| 4,704
|
py
|
Python
|
sprout/runner.py
|
tjduigna/sprout
|
d8762ce7e6f04bb082b8ca1e65f73d8900338d9d
|
[
"Apache-2.0"
] | null | null | null |
sprout/runner.py
|
tjduigna/sprout
|
d8762ce7e6f04bb082b8ca1e65f73d8900338d9d
|
[
"Apache-2.0"
] | null | null | null |
sprout/runner.py
|
tjduigna/sprout
|
d8762ce7e6f04bb082b8ca1e65f73d8900338d9d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019, Sprout Development Team
# Distributed under the terms of the Apache License 2.0
import os
import asyncio
import asyncpg
from tortoise import Tortoise
import sprout
class Runner(sprout.Log):
"""An object-oriented interface
to the sprout utilities.
Args:
cfg (str,dict): config or path to it
env (str): key in cfg if it's nested
rc (str): path to secrets yaml file
app (str): app name
"""
_loop = asyncio.get_event_loop()
def _init_cfg(self, cfg):
if isinstance(cfg, str):
cfg = sprout.load_yml(cfg)
if not isinstance(cfg, dict) or not cfg:
raise Exception("cfg not understood")
if self.env is not None:
cfg = cfg[self.env]
for key in ['host', 'port', 'database']:
if key not in cfg:
raise Exception(f"'{key}' not found in cfg")
if 'username' not in cfg:
raise Exception("'username' not found in cfg")
if self.rc is not None:
cfg.update(sprout.load_yml(self.rc))
return cfg
def __init__(self, cfg, env=None, rc=None,
app=None, schemas=None):
self.env = env
self.rc = rc
self._cfg = self._init_cfg(cfg)
self.app = app
if schemas is None:
schemas = []
self.schemas = schemas
def db_str(self, dbname=None, schema=None):
"""Construct a 'jdbc' string"""
c = self._cfg
dbname = dbname or c['database']
auth = f"{c['username']}:{c['password']}"
url = f"{c['host']}:{c['port']}"
base = f"{c['driver']}://{auth}@{url}"
if schema is not None:
return f"{base}/{dbname}?schema={schema}"
return f"{base}/{dbname}"
async def _create_database(self):
if self.app is None:
self.log.error("has no app")
return
con = await asyncpg.connect(self.db_str(dbname='postgres'))
try:
await con.execute(f"create database {self.app};")
except asyncpg.exceptions.DuplicateDatabaseError:
sprout.cfg.log.info(f"database {self.app} exists")
finally:
await con.close()
async def _create_schemas(self):
if not self.app or not self.schemas:
self.log.error("either has no app or schemas")
return
con = await asyncpg.connect(self.db_str())
for name in self.schemas:
try:
await con.execute(f"create schema {name};")
except asyncpg.exceptions.DuplicateSchemaError:
sprout.cfg.log.info(f"schema {name} exists")
await con.close()
async def _init_schemas(self):
await self._create_database()
for schema in self.schemas:
await self._create_schemas()
await Tortoise.init(
db_url=self.db_str(schema=schema),
modules={'models': [f'{self.app}.orm.{schema}']}
)
await Tortoise.generate_schemas()
self.log.info(f"'{schema}' ready")
async def _init_db_pool(self):
c = self._cfg.copy()
c['user'] = c.pop('username')
c.pop('driver')
if self.app is None:
self.log.error("no app name provided")
return
c['database'] = self.app
pw = c.pop('password')
self.log.info(f"db_pool: {c}")
c['password'] = pw
pool = await asyncpg.create_pool(**c)
return pool
def create_database(self, app=None):
"""Initialize db"""
self.app = app or self.app
self._loop.run_until_complete(self._create_database())
def create_schemas(self, app=None, schemas=None):
"""Initialize db schemas"""
self.app = app or self.app
self.schemas = schemas or self.schemas
self._loop.run_until_complete(self._create_schemas())
def init_schemas(self, app=None, schemas=None):
"""Initialize db tables"""
self.app = app or self.app
self.schemas = schemas or self.schemas
self._loop.run_until_complete(self._init_schemas())
def init_db_pool(self, app=None):
"""Initialize db connection pool"""
self.app = app or self.app
pool = self._loop.run_until_complete(self._init_db_pool())
return pool
def easy_up(self, app):
"""Initialize everything and return a db
connection pool."""
self.create_database(app=app)
schemas = []
self.create_schemas(app=app, schemas=schemas)
self.init_schemas(app=app, schemas=schemas)
return self.init_db_pool(app=app)
| 32.895105
| 67
| 0.577594
| 612
| 4,704
| 4.326797
| 0.214052
| 0.055514
| 0.018882
| 0.018127
| 0.305891
| 0.200151
| 0.174094
| 0.138218
| 0.058912
| 0.058912
| 0
| 0.002135
| 0.302934
| 4,704
| 142
| 68
| 33.126761
| 0.805428
| 0.104379
| 0
| 0.179245
| 0
| 0
| 0.118074
| 0.032906
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075472
| false
| 0.028302
| 0.04717
| 0
| 0.226415
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e654e957c98bffeffb8209db916fbae89bbb1792
| 2,726
|
py
|
Python
|
sangam_poem_csv.py
|
naturalstupid/sangam_tamil_bot
|
2b8117504f10ce4b4bdc2fa8160951374c9d1516
|
[
"MIT"
] | null | null | null |
sangam_poem_csv.py
|
naturalstupid/sangam_tamil_bot
|
2b8117504f10ce4b4bdc2fa8160951374c9d1516
|
[
"MIT"
] | null | null | null |
sangam_poem_csv.py
|
naturalstupid/sangam_tamil_bot
|
2b8117504f10ce4b4bdc2fa8160951374c9d1516
|
[
"MIT"
] | null | null | null |
import string
import regex
import pandas as pd
from pandas.tests.io.parser import index_col
sangam_text_folder = "./sangam_tamil_text/"
sangam_poem_folder = "./sangam_tamil_poems/"
sangam_csv_folder = "./sangam_tamil_csv/"
data_files = ['agananuru','purananuru','ainkurunuru','kalithokai', 'kurunthokai', 'natrinai', 'pathitrupathu', 'pattinapaalai',
'mullaipaattu', 'nedunalvaadai', 'kurinjipaattu','malaipadukadaam','maduraikaanji','porunaraatrupadai',
'perumpaanaatrupadai', 'sirupaanaatrupadai', 'thirumurugaatrupadai', 'ainthinaiezhupathu', 'ainthinaiaimpathu',
'kaarnaarpathu','thinaimozhiaimpathu','kainnilai','thinaimaalainootraimbathu']#, 'thirukkural' ]
POEM_TYPES = ['அகநானூறு', 'புறநானூறு', 'ஐங்குறுநூறு', 'கலித்தொகை', 'குறுந்தொகை', 'நற்றிணை', 'பதிற்றுப்பத்து', 'பட்டினப்பாலை',
'முல்லைப்பாட்டு', 'நெடுநல்வாடை','குறிஞ்சிப்பாட்டு','மலைபடுகடாம்', 'மதுரைக்காஞ்சி','பொருநராற்றுப்படை',
'பெரும்பாணாற்றுப்படை', 'சிறுபாணாற்றுப்படை','திருமுருகாற்றுப்படை','ஐந்திணை எழுபது','ஐந்திணை ஐம்பது','கார் நாற்பது',
'திணைமொழி ஐம்பது','கைந்நிலை','திணைமாலை நூற்றைம்பது']#,'திருக்குறள்']
EN_POEM_TYPES = ['Akanānūru','Puranānūru','Ainkurunūru','Kalithokai','Kurunthokai','Natrinai','Pathitruppathu','Pattinapaalai',
'Mullaipaattu','Nedunalvaadai','Kurinjippāttu','Malaipadukadaam','Maduraikaanji','Porunaratrupadai',
'Perumpaanatrupadai','Sirupaanaatrupadai','Thirumurugaatrupadai','Ainthinai Ezhupathu','Aithinai Aimbathu',
'Kaar Naarpathu','Thinaimozhi Aimpathu','Kainnilai','Thinaimaalai Nootraimbathu'
]
sangam_poem_csv_file = sangam_csv_folder+"sangam_poems.csv"
sangam_poems_combined = []
csv_separator = ","
for i, sangam_poem in enumerate(data_files):
csv_file = sangam_csv_folder+sangam_poem+".csv" # agananuru
print("reading poems from",csv_file)
df = pd.read_csv(csv_file,encoding='utf-8',sep=csv_separator,header=0,usecols=['poem'],index_col=None)
df['poem_type'] = POEM_TYPES[i]
df['poem'] = df['poem'].str.translate(str.maketrans('', '', string.punctuation))
df['poem'] = df['poem'].str.replace("‘", '')
df['poem'] = df['poem'].str.replace("’", '')
df['poem'] = df['poem'].str.replace("“", '')
df['poem'] = df['poem'].str.replace("”", '')
df['poem'] = df['poem'].replace("\d+","",regex=True)
sangam_poems_combined.append(df)
print("Combining all sangam poems into a single database")
sangam_df = pd.concat(sangam_poems_combined,axis=0,ignore_index=True)
print("Writing sangam poems into",sangam_poem_csv_file)
sangam_df.to_csv(sangam_poem_csv_file,encoding='utf-8',sep=csv_separator, index=False, columns=["poem_type", "poem"])
| 69.897436
| 128
| 0.655906
| 527
| 2,726
| 3.548387
| 0.275142
| 0.041711
| 0.01123
| 0.038503
| 0.228342
| 0.163636
| 0.122995
| 0.122995
| 0.078075
| 0.06738
| 0
| 0.001676
| 0.124725
| 2,726
| 39
| 129
| 69.897436
| 0.724644
| 0.015407
| 0
| 0
| 0
| 0
| 0.454139
| 0.017151
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.102564
| 0
| 0.102564
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e65bcafb9495c37c2cdeefdfa42cd99132b78632
| 6,256
|
py
|
Python
|
flask_opa.py
|
hirosh7/flask-opa
|
a090083ce62944d1085a6923572ed9c68f0dbfa3
|
[
"MIT"
] | 34
|
2018-10-16T03:12:44.000Z
|
2022-02-21T09:53:13.000Z
|
flask_opa.py
|
hirosh7/flask-opa
|
a090083ce62944d1085a6923572ed9c68f0dbfa3
|
[
"MIT"
] | 12
|
2018-10-17T00:41:27.000Z
|
2021-03-16T12:58:33.000Z
|
flask_opa.py
|
hirosh7/flask-opa
|
a090083ce62944d1085a6923572ed9c68f0dbfa3
|
[
"MIT"
] | 8
|
2019-05-28T19:54:41.000Z
|
2022-02-23T13:19:33.000Z
|
"""
Flask Extension for OPA
"""
import requests
from flask.app import Flask
__version__ = "1.0.0"
class OPAException(Exception):
"""Exception evaluating a request in OPA"""
def __init__(self, message):
super().__init__(message)
class OPAUnexpectedException(OPAException):
"""Unexpected error evaluating the request in OPA"""
def __init__(self, message='Unexpected error'):
super().__init__(message)
class AccessDeniedException(OPAException):
"""OPA Denied the request"""
def __init__(self, message='Denied'):
super().__init__(message)
class OPAServerUnavailableException(OPAException):
"""When it cannot connect to the OPA Server"""
def __init__(self, message='OPA Server unavailable'):
super().__init__(message)
class OPA(object):
def __init__(self,
app: Flask,
input_function,
url: str = None,
allow_function=None,
wait_time: int = 20000):
super(OPA, self).__init__()
self._app = app
self._pep = {}
self._input_function = input_function
self._allow_function = allow_function or self.default_allow_function
self._deny_on_opa_fail = app.config.get('OPA_DENY_ON_FAIL', True)
self._url = url or app.config.get('OPA_URL')
self._wait_time = wait_time or app.config.get('OPA_WAIT_TIME')
if self._app.config.get('OPA_SECURED', False):
self.secured()
@staticmethod
def secure(*args, **kwargs):
return OPA(*args, **kwargs).secured()
def secured(self,
url=None,
input_function=None,
allow_function=None):
"""Secure app"""
if self.check_authorization not in self._app.before_request_funcs:
self._url = url or self._url
self._allow_function = allow_function or self._allow_function
self._input_function = input_function or self._input_function
if self._url and self._input_function and self._allow_function:
self._app.before_request(self.check_authorization)
else:
raise ValueError("Invalid OPA configuration")
return self
def check_authorization(self):
input = self.input
url = self.url
try:
response = self.query_opa(url, input)
if response is not None:
self.check_opa_response(response)
except OPAException as e:
if self.deny_on_opa_fail:
raise e
def query_opa(self, url, input):
self._app.logger.debug("%s query: %s. content: %s",
self.app, url, input)
try:
return requests.post(url, json=input, timeout=self.wait_time)
except requests.exceptions.ConnectionError as e:
if self.deny_on_opa_fail:
raise OPAServerUnavailableException(str(e))
def check_opa_response(self, response):
if response.status_code != 200:
opa_error = "OPA status code: {}. content: {}".format(
response.status_code, str(response)
)
self._app.logger.error(opa_error)
raise OPAUnexpectedException(opa_error)
resp_json = response.json()
self._app.logger.debug(" => %s", resp_json)
if not self.allow_function(resp_json):
raise AccessDeniedException()
return resp_json
def __call__(self, name: str, url: str,
input_function=None,
allow_function=None):
"""Creates a PEP"""
return PEP(self, name, url, input_function, allow_function)
@property
def pep(self):
return self._pep
@property
def url(self):
return self._url
@url.setter
def url(self, value):
self._url = value
@property
def deny_on_opa_fail(self):
return self._deny_on_opa_fail
@deny_on_opa_fail.setter
def deny_on_opa_fail(self, value):
self._deny_on_opa_fail = value
@property
def input(self):
return self.input_function()
@property
def input_function(self):
return self._input_function
@property
def allow_function(self):
return self._allow_function
@property
def app(self):
return self._app
@property
def wait_time(self):
return self._wait_time
@wait_time.setter
def wait_time(self, value):
self._wait_time = value
@classmethod
def default_allow_function(cls, response_json):
return response_json.get('result', False)
class PEP(OPA):
"""Class to handle Policy Enforcement Points"""
def __init__(self,
opa: OPA,
name: str,
url: str,
input_function=None,
allow_function=None,
deny_on_opa_fail: bool = False):
super(OPA, self).__init__()
self._app = opa.app
opa.pep[name] = self
self._url = url
self._input_function = input_function or opa.input_function
self._allow_function = allow_function or opa.allow_function
self._deny_on_opa_fail = deny_on_opa_fail or False
self._wait_time = opa.wait_time
self._name = name or "PEP"
if not (self._app and self._url and
self._input_function and self._allow_function):
raise ValueError("Invalid Police Enforcement Point configuration")
def check_authorization(self, *args, **kwargs):
_input = self.input(*args, **kwargs)
response = self.query_opa(self.url, _input)
if response is not None:
self.check_opa_response(response)
def __call__(self, f):
def secure_function(*args, **kwargs):
try:
self.check_authorization(*args, **kwargs)
return f(*args, **kwargs)
except OPAException as e:
if self.deny_on_opa_fail:
raise e
return secure_function
def input(self, *args, **kwargs):
return self._input_function(*args, **kwargs)
def __str__(self):
return "<{}>".format(self._name)
| 30.076923
| 78
| 0.605499
| 733
| 6,256
| 4.845839
| 0.155525
| 0.073198
| 0.030405
| 0.043919
| 0.323198
| 0.273086
| 0.221284
| 0.159628
| 0.134291
| 0.109797
| 0
| 0.002516
| 0.301151
| 6,256
| 207
| 79
| 30.222222
| 0.809927
| 0.038203
| 0
| 0.237179
| 0
| 0
| 0.040676
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.185897
| false
| 0
| 0.012821
| 0.076923
| 0.352564
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e65e9051029543698ac667d8972b05b6ac01763f
| 8,920
|
py
|
Python
|
model.py
|
Schrodinger1926/Project-3
|
88f8a1411a712a8ba62036e400ebce9e6df8e40f
|
[
"MIT"
] | null | null | null |
model.py
|
Schrodinger1926/Project-3
|
88f8a1411a712a8ba62036e400ebce9e6df8e40f
|
[
"MIT"
] | null | null | null |
model.py
|
Schrodinger1926/Project-3
|
88f8a1411a712a8ba62036e400ebce9e6df8e40f
|
[
"MIT"
] | null | null | null |
import sys
import os
import csv
from random import shuffle
import cv2
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Flatten,\
Dense,\
Lambda,\
Conv2D,\
MaxPooling2D,\
Dropout, \
Cropping2D
DATA_DIR = 'data'
IMG_DIR = os.path.join(DATA_DIR, 'IMG')
samples = []
with open(os.path.join(DATA_DIR, 'driving_log.csv')) as csvfile:
reader = csv.reader(csvfile)
next(reader)
for line in reader:
samples.append(line)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
def fetch_view_angle(batch_sample):
"""
Conducts Preprocessing on a single data point.
1. flips original image
2. adds an offset to steering angle depending upon camera view i.e left, center, right.
Arguments
---------
batch_sample: array_like
Elements as [path_center_image, path_left_image, path_right_image, steering_angle, ..]
Returns
---------
res_images: array_like
Elements as original and fliped images of each camera view as numpy ndarray.
res_angles: array_like
Elements as steering angle of original and fliped images of each camera view as float.
"""
res_images, res_angles = [], []
# fetch center angle
center_angle = float(batch_sample[3])
viewpoints = ['center', 'left', 'right']
for idx, view in enumerate(viewpoints):
filename = os.path.join(IMG_DIR, batch_sample[idx].split('/')[-1])
image = cv2.imread(filename)
# Store original image
res_images.append(image)
# store fliped image
res_images.append(cv2.flip(image, 1))
offset = 0.1
if view == 'center':
# Store angles
res_angles.append(center_angle)
# Store flip angle
res_angles.append(-center_angle)
if view == 'left':
# Store angle
res_angles.append(center_angle + offset)
# Store flip angle
res_angles.append(-(center_angle + offset))
if view == 'right':
# Store angle
res_angles.append(center_angle - offset)
# Store fliped angle
res_angles.append(-(center_angle - offset))
return res_images, res_angles
def generator(samples, batch_size=32):
"""
Generates a batch of data on the fly
Arguments
---------
samples: numpy ndarray
4 dimensional numpy array of images
batch_size: int
Size of the data to be generated
Returns
---------
4-D numpy ndarray of size(axis = 0) batch_size
"""
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
_images, _angles = fetch_view_angle(batch_sample = batch_sample)
images.extend(_images)
angles.extend(_angles)
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
def sanity_check_model():
"""
Bare Bones model with one no hidden layer i.e flattened input features
directly connected to output node.
This model is suppose to be used when building pipeline with minimum focus on model
performance.
Returns
---------
keras model
"""
# Initialize model
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Flatten(input_shape = (160, 320, 3)))
# Normalization
model.add(Lambda(lambda x: (x - 127)/127))
# Fully connected layer
model.add(Dense(1))
# Comple model
model.compile(loss='mse', optimizer='adam')
return model
def LeNet():
"""
Conventional LeNet model.
This model is suppose to be used when building insight about the model performance.
Returns
---------
keras model
"""
# Initialize model
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x - 127)/255, input_shape = (160, 320, 3)))
# Crop image, removing hood and beyond horizon
model.add(Cropping2D(cropping = ((70, 25), (0, 0))))
# First: Convolutional layer
model.add(Conv2D(6, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Second: Convolutional layer
model.add(Conv2D(6, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Third: Fully Connected layer
model.add(Flatten())
model.add(Dense(120))
model.add(Dropout(0.5))
# Fourth: Fully Connected layer
model.add(Dense(84))
model.add(Dropout(0.5))
# Fourth: Output layer
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
def nvidia():
"""
Model architeture used by Nvidia for end-to-end human behaviour cloning.
Reference: https://images.nvidia.com/content/tegra/automotive/images/2016/solutions/pdf/end-to-end-dl-using-px.pdf
This is an even powerfull network with 5 Convolutional layers and 3 Fully connected layers.
Returns
---------
keras model
"""
# Initialize model
model = Sequential()
# Preprocess incoming data, centered around zero with small standard deviation
model.add(Lambda(lambda x: (x - 127)/255, input_shape = (160, 320, 3)))
# Crop image, removing hood and beyond horizon
model.add(Cropping2D(cropping = ((70, 25), (0, 0))))
# First: Convolutional layer
model.add(Conv2D(24, (5, 5), strides = (2, 2), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
# Second: Convolutional layer
model.add(Conv2D(36, (5, 5), strides = (2, 2), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
# Third: Convolutional layer
model.add(Conv2D(48, (5, 5), strides = (2, 2), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
# Fourth: Convolutional layer
model.add(Conv2D(64, (3, 3), strides = (1, 1), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
# Fifth: Convolutional layer
model.add(Conv2D(64, (3, 3), strides = (1, 1), activation='relu'))
model.add(Dropout(0.25))
#model.add(BatchNormalization(axis = 1))
model.add(Flatten())
# Sixth: Fully Connected layer
model.add(Dense(100))
model.add(Dropout(0.5))
# Seventh: Fully Connected layer
model.add(Dense(50))
model.add(Dropout(0.5))
# Eigth: Fully Connected layer
model.add(Dense(10))
model.add(Dropout(0.5))
# Ninth: Output layer
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
def get_model(name = 'sanity_check'):
"""
Return appropriate model
Arguments
---------
name: string
Name of the model to be trained
Returns
---------
Keras model
"""
if name == 'sanity_check':
return sanity_check_model()
if name == 'LeNet':
return LeNet()
if name == 'nvidia':
return nvidia()
batch_size = 64
train_generator = generator(train_samples, batch_size = batch_size)
validation_generator = generator(validation_samples, batch_size = batch_size)
# Final Model Architecture to be used
model_name = 'nvidia'
print("Traning samples : {} | Validation samples : {}"\
.format(3*2*len(train_samples), 3*2*len(validation_samples)))
print(model_name)
model = get_model(name = model_name)
history_object = model.fit_generator(train_generator, steps_per_epoch= \
2*3*len(train_samples)//batch_size, validation_data=validation_generator, \
validation_steps=3*2*len(validation_samples)//batch_size, epochs=5)
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.savefig('post_training_analysis.png')
model.save('model_{}.h5'.format(model_name))
| 27.875
| 118
| 0.638453
| 1,143
| 8,920
| 4.877515
| 0.246719
| 0.060269
| 0.034978
| 0.034439
| 0.456682
| 0.379193
| 0.33435
| 0.322152
| 0.3087
| 0.255247
| 0
| 0.028661
| 0.245067
| 8,920
| 319
| 119
| 27.962382
| 0.799228
| 0.33509
| 0
| 0.262774
| 0
| 0
| 0.059085
| 0.004627
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043796
| false
| 0
| 0.080292
| 0
| 0.175182
| 0.021898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e65f8dcc762ad6c2b71e1c9a7964a20b18c50603
| 3,807
|
py
|
Python
|
enlarge_form/enlarge_form.py
|
lester-lees/extra_addons_sz
|
cddaf972cf4ea64c553bcff0006eb006a115d5ee
|
[
"Apache-2.0"
] | null | null | null |
enlarge_form/enlarge_form.py
|
lester-lees/extra_addons_sz
|
cddaf972cf4ea64c553bcff0006eb006a115d5ee
|
[
"Apache-2.0"
] | null | null | null |
enlarge_form/enlarge_form.py
|
lester-lees/extra_addons_sz
|
cddaf972cf4ea64c553bcff0006eb006a115d5ee
|
[
"Apache-2.0"
] | null | null | null |
#! -*- encoding: utf-8 -*-
from openerp import addons
from openerp.osv import fields, osv, orm
from openerp import tools
from openerp.tools.translate import _
class ir_ui_view(orm.Model):
_inherit = 'ir.ui.view'
_columns={
'enlarge_form' : fields.boolean('Use full width of the screen?' ,help='Set to true if you want to widden this form so that it will use full width of the screen.'),
}
def create(self, cr, uid, data, context=None):
result = super(ir_ui_view, self).create(cr, uid, data, context=context)
if result:
self.manipulate_sheet_tag(cr, uid, result)
return result
def write(self, cr, uid, ids, data, context=None):
result = super(ir_ui_view, self).write(cr, uid, ids, data, context=context)
if result:
self.manipulate_sheet_tag(cr, uid, ids)
return result
def has_sheet_tag(self, arch):
res=False
if arch.find('<sheet')>=0:
res=True
return res
def manipulate_sheet_tag(self, cr, uid, ids):
if not isinstance(ids,(tuple,list)):
ids=[ids]
#Warning(str(ids))
for this in self.browse(cr, uid, ids):
enlargement_view = str(this.model).replace('.','_') + '_enlarge_form'
#does a view already exist?
#view_exists=self.search(cr, uid, [('name','=',enlargement_view),('type','=','form'),('active','in',[True,False])])
view_exists=self.search(cr, uid, [('name','=',enlargement_view),('type','=','form')])
if view_exists:
if isinstance(view_exists,(tuple,list)):
view_exists=view_exists[0]
has_sheet_tag=self.has_sheet_tag(this.arch)
#what should we do?
if view_exists:
if not has_sheet_tag:
operation='deactivate_view'
else:
if this.enlarge_form:
operation='activate_view'
else:
operation='deactivate_view'
else:
if has_sheet_tag and this.enlarge_form:
operation='create_view'
else:
#nothing to do
operation=False
if not operation:
return True
if operation=='create_view':
view_arch="""<?xml version='1.0'?><xpath expr='//form/sheet' position='attributes'><attribute name='class'>enlarge_form</attribute></xpath>"""
#model_data_ids_form = model_obj.search(cr, user, [('model','=','ir.ui.view'), ('name', 'in', ['membership_products_form', 'membership_products_tree'])], context=context)
vals={
'name' : enlargement_view,
'type' : 'form',
'model' : this.model,
'inherit_id' : this.id,
'arch' : view_arch,
'xml_id' : 'enlarge_form.'+enlargement_view,
'active' : 'True',
}
res=self.create(cr, uid, vals)
#for some reason, active was always getting saved as false
if res:
cr.execute("UPDATE ir_ui_view SET active=TRUE WHERE id=%s" % res)
elif operation=='activate_view':
self.write(cr, uid, view_exists, {'active':True})
elif operation=='deactivate_view':
self.write(cr, uid, view_exists, {'active':False})
return True
| 34.297297
| 186
| 0.504334
| 412
| 3,807
| 4.495146
| 0.291262
| 0.035097
| 0.025918
| 0.024298
| 0.273218
| 0.215983
| 0.191145
| 0.191145
| 0.154428
| 0.113391
| 0
| 0.002118
| 0.379827
| 3,807
| 110
| 187
| 34.609091
| 0.782296
| 0.115314
| 0
| 0.205882
| 0
| 0.029412
| 0.152083
| 0.029167
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.235294
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e666c5e9e0189a92959abe01ef942dcddf54c96d
| 16,028
|
py
|
Python
|
build/build.py
|
lukas-ke/faint-graphics-editor
|
33eb9e6a3f2216fb2cf6ef9709a14f3d20b78fbf
|
[
"Apache-2.0"
] | 10
|
2016-12-28T22:06:31.000Z
|
2021-05-24T13:42:30.000Z
|
build/build.py
|
lukas-ke/faint-graphics-editor
|
33eb9e6a3f2216fb2cf6ef9709a14f3d20b78fbf
|
[
"Apache-2.0"
] | 4
|
2015-10-09T23:55:10.000Z
|
2020-04-04T08:09:22.000Z
|
build/build.py
|
lukas-ke/faint-graphics-editor
|
33eb9e6a3f2216fb2cf6ef9709a14f3d20b78fbf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Lukas Kemmer
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import configparser
import os
import subprocess
import sys
import faint_info
join_path = os.path.join
build_dir = os.path.split(os.path.realpath(__file__))[0]
os.chdir(build_dir) # Fixme: Don't change dir, use absolute paths.
root_dir = os.path.split(build_dir)[0]
sys.path.append(join_path(root_dir, "build-sys/"))
sys.path.append(join_path(root_dir, "test-sys/"))
import build_sys as bs # noqa: E402
from build_sys.util import list_cpp, strip_ext # noqa: E402
from build_sys.util.scoped import working_dir, no_output # noqa: E402
from test_sys import gen_runner # noqa: E402
import gencpp # noqa: E402
def recreate_config(platform):
with open("build.cfg", 'w') as f:
f = open("build.cfg", 'w')
f.write("[folders]\n")
f.write("wx_root=\n")
f.write("cairo_include=\n")
f.write("cairo_lib=\n")
f.write("python_include=\n")
f.write("python_lib=\n")
f.write("pango_include=\n")
f.write("pango_lib=\n")
f.write("glib_include=\n")
f.write("glib_lib=\n")
f.write("glib_config_include=\n")
f.write("pnglib_include=\n")
if platform == 'msw':
f.write("[nsis]\n")
f.write("makensis=\n")
f.write("[other]\n")
if platform != 'msw':
f.write('compiler=gcc\n')
f.write("parallell_compiles=0\n")
f.write("etags_folder=\n")
print('Config file "build.cfg" created.\n'
'You must update the file with correct paths.')
def read_config(platform):
def check_folder(name, folder, expected_content):
"""Verify that this folder (from an entry in the build.cfg) contains
some expected file.
"""
full_path = os.path.expanduser(os.path.join(folder, expected_content))
if not os.path.exists(full_path):
print(f'Error in build.cfg:\n {name}: {expected_content} not found in \n {folder}') # noqa: E501
print(full_path)
exit(1)
bo = bs.BuildOptions()
bo.platform = platform
config = configparser.RawConfigParser()
config.read('build.cfg')
try:
wx_root = config.get('folders', 'wx_root')
wx_vc_lib = join_path(wx_root, "lib", "vc_lib")
cairo_include = config.get('folders', 'cairo_include')
cairo_lib = config.get('folders', 'cairo_lib')
pango_include = config.get('folders', 'pango_include')
pango_lib = config.get('folders', 'pango_lib')
python_include = config.get('folders', 'python_include')
python_lib = config.get('folders', 'python_lib')
glib_include = config.get('folders', 'glib_include')
glib_lib = config.get('folders', 'glib_lib')
glib_config_include = config.get('folders', 'glib_config_include')
bo.parallell_compiles = int(config.get('other', 'parallell_compiles'))
pnglib_include = config.get('folders', 'pnglib_include')
except configparser.NoOptionError as e:
print("Error in build.cfg:", e)
exit(1)
# Verify that the specified paths contain expected includes or folders
check_folder("wx_root", wx_root, "include/wx")
check_folder("cairo_include", cairo_include, "cairo.h")
check_folder("python_include", python_include, "Python.h")
check_folder("pango_include", pango_include, "pango/pango.h")
check_folder("pnglib_include", pnglib_include, "png.h")
check_folder("glib_include", glib_include, "glib.h")
check_folder("glib_config_include", glib_config_include, "glibconfig.h")
bo.extra_resource_root = wx_root
if bo.platform == 'msw':
bo.makensis_exe = config.get('nsis', 'makensis')
if bo.platform == 'linux':
compiler = config.get('other', 'compiler')
if compiler is None:
print("Error: Compiler not specified in build.cfg.")
print("Expected compiler=clang or compiler=gcc under [other].")
exit(1)
elif compiler not in ('gcc', 'clang', 'iwyu'):
print(f'Error: Unsupported compiler specified in build.cfg: "{compiler}"') # noqa: E501
print('Expected "clang", "gcc" or "iwyu"')
exit(1)
bo.compiler = compiler
elif bo.platform == 'msw':
bo.compiler = 'msvc'
required_path_empty = (wx_root == "" or
python_lib == "" or
python_include == "" or
cairo_include == "" or
pango_include == "" or
pnglib_include == "")
if required_path_empty:
print("Error: Incorrect paths in build.cfg")
exit(1)
if cairo_lib == "" and not platform.startswith("linux"):
print("Error: Incorrect paths in build.cfg")
exit(1)
bo.lib_paths = [
cairo_lib,
pango_lib,
python_lib,
glib_lib]
bo.lib_paths = [l for l in bo.lib_paths if len(l) != 0]
if bo.platform == "msw":
bo.lib_paths.append(join_path(wx_root, 'lib', 'vc_lib'))
bo.project_root = faint_info.FAINT_ROOT
bo.system_include_folders = [
join_path(wx_vc_lib, "mswu"),
join_path(wx_root, "include"),
python_include,
cairo_include,
pango_include,
glib_include,
glib_config_include,
pnglib_include
]
bo.include_folders = [bo.project_root]
bo.wx_root = wx_root
return bo
def read_build_options(platform):
if not os.path.exists("build.cfg"):
recreate_config(platform)
exit(1)
return read_config(platform)
def test_extra_objs(bo):
def excluded(obj):
return (obj.startswith('app.')
or obj.startswith('py-initialize-ifaint.'))
obj_root = join_path(os.getcwd(),
faint_info.target.faint.objs_folder_prefix)
obj_root = obj_root + ("-debug" if bo.debug_compile else "-release")
return [join_path(obj_root, strip_ext(item)) for item in
os.listdir(join_path(os.getcwd(), obj_root))
if (item.endswith('.obj') or item.endswith('.o')) and
not excluded(item)]
def get_test_source_files(bo, folder):
test_source_folder = join_path(bo.project_root, folder)
test_root = join_path(bo.project_root, "tests")
test_files = []
for folder in (test_source_folder,
join_path(test_source_folder, 'gen'),
join_path(test_root, "test-util")):
test_files.extend([join_path(folder, f)
for f in list_cpp(folder)])
return test_files
def no_source_folders_f(*args, **kwArgs):
return []
def build(caption,
platform,
cmdline,
obj_folder_prefix,
out_name,
precompile_steps,
source_files,
source_folders,
extra_objs,
msw_subsystem,
forced_include_func):
print(caption)
print("--------------------")
bo = read_build_options(platform)
bo.obj_root_release = join_path(
os.getcwd(),
f"{obj_folder_prefix}-release")
bo.obj_root_debug = join_path(
os.getcwd(),
f"{obj_folder_prefix}-debug")
bo.extra_objs = extra_objs(bo)
bo.out_name_release = out_name
bo.out_name_debug = out_name + "d"
opts, args = cmdline
bo.debug_compile = opts.debug
precompile_steps(bo)
bo.source_files = source_files(platform, bo)
bo.source_folders = source_folders(platform, False)
bo.forced_include = forced_include_func(bo)
bo.msw_subsystem = msw_subsystem
return bs.build(bo, cmdline)
def exit_on_error(function, args, blank_line=True):
if blank_line:
print()
return_code = function(*args)
if return_code != 0:
exit(return_code)
def run_unit_tests(platform, cmdline):
extension = ".exe" if platform == "msw" else ""
test_root = join_path(faint_info.FAINT_ROOT, "tests")
cmd = join_path(test_root, "run-unit-tests" + extension) + " --silent"
result = subprocess.call(cmd,
shell=True,
cwd=test_root)
if result == 0:
print("* C++ Unit tests OK")
else:
print("* C++ Unit tests failed!")
return result
def run_py_tests(platform, cmdline):
sys.path.append(faint_info.FAINT_TESTS_ROOT)
import run_py_tests as py_tests
with no_output(), working_dir(faint_info.FAINT_TESTS_ROOT):
ok = py_tests.run_tests()
if ok:
print('* Python Unit tests OK')
return 0
else:
print("* Error: Python Unit tests failed!")
return 1
def forced_include_func(bo):
return join_path(bo.project_root, "util", "msw_warn.hh")
def build_faint(platform, cmdline):
def precompile_steps(bo):
# Generate setting-handling code based on set_and_get.py
gencpp.run("../python/generate")
if not os.path.exists("../help/source/generated"):
os.mkdir("../help/source/generated")
bs.gen_method_def.generate_headers(
faint_info.HEADERS_TO_GENERATE,
faint_info.GENERATED_METHOD_DEF_PATH,
faint_info.GENERATED_HELP_PATH)
bs.gen_resource.run(bo.project_root)
bs.gen_text_expressions.generate(
hh_path=join_path(
bo.project_root,
"generated", "text-expression-constants.hh"),
help_path=join_path(
faint_info.GENERATED_HELP_PATH,
"text-expressions.txt"))
# HTML help
bs.gen_help.run()
def get_faint_src_files(platform, bo):
src_folders = faint_info.get_src_folders(platform)
src_folders = [join_path(bo.project_root, folder)
for folder in src_folders]
src_folders.append(bo.project_root)
files = []
for folder in src_folders:
files.extend([join_path(folder, f)
for f in list_cpp(folder)])
return files
def get_faint_extra_objs(bo):
return []
return build(
"Faint",
platform,
cmdline,
"objs",
"faint",
precompile_steps,
get_faint_src_files,
faint_info.get_src_folders,
get_faint_extra_objs,
"windows",
forced_include_func)
def build_benchmarks(platform, cmdline):
target = faint_info.target.benchmark
def precompile_steps(bo):
bench_root = join_path(bo.project_root, target.source_folder)
gen_runner.gen_bench_runner(
root_dir=bench_root,
out_file=join_path(bench_root, 'gen', 'bench-runner.cpp'))
bo.create_build_info = False
def get_benchmark_source_files(platform_, bo):
return get_test_source_files(bo, target.source_folder)
return build(
"Benchmarks",
platform,
cmdline,
target.objs_folder_prefix,
target.executable,
precompile_steps,
get_benchmark_source_files,
no_source_folders_f,
test_extra_objs,
"console",
forced_include_func)
def build_unit_tests(platform, cmdline):
target = faint_info.target.unit_test
def precompile_steps(bo):
tests_root = join_path(bo.project_root, target.source_folder)
gen_runner.gen_test_runner(
root_dir=tests_root,
out_file=join_path(tests_root, 'gen', 'test-runner.cpp'))
bo.create_build_info = False
def get_unit_test_source_files(platform, bo):
return get_test_source_files(bo, target.source_folder)
return build(
"Unit tests",
platform,
cmdline,
target.objs_folder_prefix,
target.executable,
precompile_steps,
get_unit_test_source_files,
no_source_folders_f,
test_extra_objs,
"console",
forced_include_func)
def build_image_tests(platform, cmdline):
target = faint_info.target.image_test
def precompile_steps(bo):
tests_root = join_path(bo.project_root, target.source_folder)
gen_runner.gen_image_runner(
root_dir=tests_root,
out_file=join_path(tests_root, 'gen', 'image-runner.cpp'))
bo.create_build_info = False
def get_image_test_source_files(platform, bo):
return get_test_source_files(bo, target.source_folder)
return build(
"Image tests",
platform,
cmdline,
target.objs_folder_prefix,
target.executable,
precompile_steps,
get_image_test_source_files,
no_source_folders_f,
test_extra_objs,
"console",
forced_include_func)
def build_gui_tests(platform, cmdline):
target = faint_info.target.gui_test
def precompile_steps(bo):
bo.create_build_info = False
def get_gui_test_source_files(platform, bo):
test_source_folder = join_path(bo.project_root, target.source_folder)
test_root = join_path(bo.project_root, "tests")
test_files = []
for folder in (test_source_folder,
join_path(test_root, "test-util")):
test_files.extend([join_path(folder, f)
for f in list_cpp(folder)])
return test_files
return build(
"GUI-tests",
platform,
cmdline,
target.objs_folder_prefix,
target.executable,
precompile_steps,
get_gui_test_source_files,
no_source_folders_f,
test_extra_objs,
"windows",
forced_include_func)
def build_python_extension(platform, cmdline):
def precompile_steps(bo):
bo.create_build_info = False
bo.target_type = bo.Target.shared_python_library
if not os.path.exists("../ext/out"):
os.mkdir("../ext/out")
target = faint_info.target.python_extension
def extension_source_files(platform, bo):
src_folder = join_path(bo.project_root, target.source_folder)
return [join_path(src_folder, f) for f in list_cpp(src_folder)]
result = build(
"Python extension",
platform,
cmdline,
target.objs_folder_prefix,
target.out_lib,
precompile_steps,
extension_source_files,
no_source_folders_f,
test_extra_objs,
"console",
forced_include_func)
return result
if __name__ == '__main__':
platform = ("linux" if sys.platform.startswith('linux') else "msw")
cmdline = bs.parse_command_line()
opts, args = cmdline
exit_on_error(build_faint, (platform, cmdline), blank_line=False)
if platform == 'msw': # Py-extension build not implemented for Linux yet.
exit_on_error(build_python_extension, (platform, cmdline))
if opts.debug:
print("Fixme: Not building tests in debug.")
else:
exit_on_error(build_unit_tests, (platform, cmdline))
exit_on_error(build_image_tests, (platform, cmdline))
exit_on_error(build_benchmarks, (platform, cmdline))
exit_on_error(build_gui_tests, (platform, cmdline))
exit_on_error(run_unit_tests, (platform, cmdline))
if platform == 'msw':
exit_on_error(run_py_tests, (platform, cmdline))
if opts.version != bs.unknown_version_str and platform == 'msw':
bo = read_build_options(platform)
bs.build_installer(opts.version, bo.makensis_exe)
exit(0)
| 31.12233
| 109
| 0.630334
| 2,057
| 16,028
| 4.633933
| 0.140496
| 0.030214
| 0.011016
| 0.019618
| 0.374738
| 0.297524
| 0.260596
| 0.222409
| 0.203105
| 0.165653
| 0
| 0.003969
| 0.261106
| 16,028
| 514
| 110
| 31.182879
| 0.800895
| 0.061205
| 0
| 0.292621
| 0
| 0.002545
| 0.126159
| 0.012869
| 0
| 0
| 0
| 0.001946
| 0
| 1
| 0.081425
| false
| 0
| 0.02799
| 0.017812
| 0.170483
| 0.045802
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6671dd4f2c0b71c8a3b385713a43ac751148356
| 2,119
|
py
|
Python
|
printAlternatively.py
|
kamwithak/competitiveProgramming
|
ab4433568081900212a8a987d7bf8cb78d2698d1
|
[
"MIT"
] | null | null | null |
printAlternatively.py
|
kamwithak/competitiveProgramming
|
ab4433568081900212a8a987d7bf8cb78d2698d1
|
[
"MIT"
] | 1
|
2020-07-19T15:40:25.000Z
|
2020-07-19T15:40:25.000Z
|
printAlternatively.py
|
kamwithak/competitiveProgramming
|
ab4433568081900212a8a987d7bf8cb78d2698d1
|
[
"MIT"
] | null | null | null |
class Solution():
def __init__(self, A, B):
self.A = A
self.B = B
def printAlternativelySameSize(self):
"""
Assumes that len(self.A) == len(self.B) != 0
Alternatively print each element in the two Lists
"""
if (len(self.A) != len(self.B)):
raise Exception("the two lists must be of same length")
if (len(self.A) == len(self.B) == 0):
raise Exception("Empty lists")
#
ptrA = 0 ; ptrB = 0 ; decisionPoint = False
while (ptrA < len(self.A) or ptrB < len(self.B)):
if (not decisionPoint):
print(self.A[ptrA])
ptrA+=1
decisionPoint = True
else:
print(self.B[ptrB])
ptrB+=1
decisionPoint = False
def printAlternativelyDifferentSize(self):
"""
Alternatively print each element in the two Lists, regardless of List size
"""
ptrA = 0 ; ptrB = 0 ; decisionPoint = False
while (ptrA < len(self.A) and ptrB < len(self.B)):
if (not decisionPoint):
print(self.A[ptrA])
ptrA+=1
decisionPoint = True
else:
print(self.B[ptrB])
ptrB+=1
decisionPoint = False
while (ptrA < len(self.A)):
print(self.A[ptrA])
ptrA += 1
while (ptrB < len(self.B)):
print(self.B[ptrB])
ptrB += 1
obj = Solution(A=[3,2,1], B=[3,2,1])
obj.printAlternativelySameSize()
"""
Given two arrays, print each element alternatively
For example)
arr1 = [a,b,c,d]
arr2 = [e,f,g,h,i,j,k]
=> a e b f c g d h i j k
"""
class Solution():
def __init__(self, arr1, arr2):
self.arr1 = arr1
self.arr2 = arr2
self.n = len(self.arr1)
self.m = len(self.arr2)
def print_lists(self):
i, j = 0, 0
config = True
while(i < self.n and j < self.m):
if (config):
print(self.arr1[i])
i += 1
config = False
else:
print(self.arr2[j])
j += 1
config = True
while (i < self.n):
print(self.arr1[i])
i += 1
while (j < self.m):
print(self.arr2[j])
j += 1
obj = Solution(['a', 'b', 'c', 'd'], ['e','f','g','h','i','j','k'])
obj.print_lists()
| 21.40404
| 77
| 0.547428
| 312
| 2,119
| 3.685897
| 0.208333
| 0.085217
| 0.041739
| 0.028696
| 0.578261
| 0.536522
| 0.411304
| 0.337391
| 0.264348
| 0.264348
| 0
| 0.025367
| 0.293063
| 2,119
| 98
| 78
| 21.622449
| 0.742323
| 0.079755
| 0
| 0.546875
| 0
| 0
| 0.034483
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078125
| false
| 0
| 0
| 0
| 0.109375
| 0.234375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e66883315cccecf4d95a549214dcc1704e5e4e46
| 429
|
py
|
Python
|
tests/test_exp.py
|
SiddeshSambasivam/MatterIx
|
e9d3bc54c4f5793cc1262c89c7cb9d5a9dd99139
|
[
"MIT"
] | 9
|
2020-07-25T12:00:30.000Z
|
2021-07-07T09:30:57.000Z
|
tests/test_exp.py
|
SiddeshSambasivam/MatterIx
|
e9d3bc54c4f5793cc1262c89c7cb9d5a9dd99139
|
[
"MIT"
] | null | null | null |
tests/test_exp.py
|
SiddeshSambasivam/MatterIx
|
e9d3bc54c4f5793cc1262c89c7cb9d5a9dd99139
|
[
"MIT"
] | null | null | null |
import unittest
from matterix import Tensor
import numpy as np
class TestTensorExponents(unittest.TestCase):
def test_simple_exp(self):
an = np.random.randint(0, 10, (10, 10))
at = Tensor(an, requires_grad=True)
result = at * at
result.backward(gradient=Tensor.ones_like(result))
assert result.tolist() == (an ** 2).tolist()
assert at.grad.tolist() == (2.0 * an).tolist()
| 25.235294
| 58
| 0.638695
| 57
| 429
| 4.736842
| 0.578947
| 0.02963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.230769
| 429
| 16
| 59
| 26.8125
| 0.787879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6692d7fe75e939ec528720c041175b24637e974
| 1,722
|
py
|
Python
|
src/tests/test_task_2_4.py
|
Python-course/Python-course
|
59de0ef9928aeaa5dd185ceaafa334eb8e719217
|
[
"MIT"
] | null | null | null |
src/tests/test_task_2_4.py
|
Python-course/Python-course
|
59de0ef9928aeaa5dd185ceaafa334eb8e719217
|
[
"MIT"
] | null | null | null |
src/tests/test_task_2_4.py
|
Python-course/Python-course
|
59de0ef9928aeaa5dd185ceaafa334eb8e719217
|
[
"MIT"
] | null | null | null |
"""
Тесты для задания 2.4.
"""
from unittest import TestCase, main
from fractions import Fraction
from tasks import task_2_4
class TestFractionFromString(TestCase):
def test_fraction_from_string__CorrectArguments__ShouldReturnCorrectResult(self):
"""
Проверяет работу с корректными данными.
"""
data = [
("-2#1/2", Fraction(-5, 2)),
( "1#1/3", Fraction( 4, 3)),
("-1#1/6", Fraction(-7, 6)),
( "0#1/7", Fraction( 1, 7)),
("-0#1/7", Fraction(-1, 7))
]
for representation, result in data:
with self.subTest():
self.assertEqual(task_2_4.fraction_from_string(representation), result,
f'representation="{representation}"')
def test_fraction_from_string__DenominatorIsZero__ShouldRaiseValueError(self):
"""
Проверяет генерацию исключения при передаче нулевого знаменателя.
"""
with self.assertRaises(ValueError):
task_2_4.fraction_from_string("1#1/0")
class TestFractionToString(TestCase):
def test_fraction_to_string__CorrectArguments__ShouldReturnCorrectResult(self):
"""
Проверяет работу с корректными данными.
"""
data = \
[(Fraction(-5, 2), "-2#1/2"),
(Fraction(4, 3), "1#1/3"),
(Fraction(-7, 6), "-1#1/6"),
(Fraction(1, 7), "0#1/7"),
(Fraction(-1, 7), "-0#1/7")]
for fraction, result in data:
with self.subTest():
self.assertEqual(task_2_4.fraction_to_string(fraction), result, f"fraction={fraction}")
if __name__ == "__main__":
main(verbosity=2)
| 28.7
| 103
| 0.577236
| 190
| 1,722
| 5.005263
| 0.3
| 0.016824
| 0.025237
| 0.0347
| 0.466877
| 0.395373
| 0.359621
| 0.359621
| 0.359621
| 0.304942
| 0
| 0.052245
| 0.288618
| 1,722
| 59
| 104
| 29.186441
| 0.724082
| 0.097561
| 0
| 0.0625
| 0
| 0
| 0.081923
| 0.022343
| 0
| 0
| 0
| 0
| 0.09375
| 1
| 0.09375
| false
| 0
| 0.09375
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e6693d31028174fac6a03f7991d1cc9f5830e4f5
| 1,007
|
py
|
Python
|
aioweb_auth/helpers.py
|
kreopt/aioweb_auth
|
e6a982296b52fc2068dd09afb0827dab527ef9b7
|
[
"MIT"
] | null | null | null |
aioweb_auth/helpers.py
|
kreopt/aioweb_auth
|
e6a982296b52fc2068dd09afb0827dab527ef9b7
|
[
"MIT"
] | null | null | null |
aioweb_auth/helpers.py
|
kreopt/aioweb_auth
|
e6a982296b52fc2068dd09afb0827dab527ef9b7
|
[
"MIT"
] | null | null | null |
from aiohttp import web
from aiohttp_security import authorized_userid
from aioweb.conf import settings
async def redirect_authenticated(request):
user_id = await authorized_userid(request)
if user_id and not request.is_ajax():
redirect_url = request.query.get('redirect_to')
if not redirect_url:
redirect_url = getattr(settings, 'AUTH_PRIVATE_URL', '/')
raise web.HTTPFound(redirect_url)
def auth_error_response(controller, reason, detail=None):
if controller.request.is_ajax():
return web.HTTPForbidden(reason=reason)
else:
controller.flash['AUTH_ERROR'] = detail if detail else reason
return web.HTTPFound(controller.path_for('index'))
async def auth_success_response(controller):
if not controller.request.is_ajax():
await redirect_authenticated(controller.request)
else:
user_id = await authorized_userid(controller.request)
return {'id': user_id, 'token': controller.request.csrf_token}
| 34.724138
| 70
| 0.725919
| 126
| 1,007
| 5.587302
| 0.380952
| 0.120739
| 0.055398
| 0.059659
| 0.076705
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1857
| 1,007
| 28
| 71
| 35.964286
| 0.858537
| 0
| 0
| 0.090909
| 0
| 0
| 0.049652
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.136364
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6fd9ed01bdcac2a90cc2cff054eefd30d07deb0
| 3,901
|
py
|
Python
|
functions/aou/tests/upload_test_files.py
|
broadinstitute/wfl
|
1e5691100330a9afa0270fb4bab0a7d0a7d3bdc2
|
[
"BSD-3-Clause"
] | 15
|
2020-03-04T17:30:25.000Z
|
2022-03-09T14:57:26.000Z
|
functions/aou/tests/upload_test_files.py
|
broadinstitute/wfl
|
1e5691100330a9afa0270fb4bab0a7d0a7d3bdc2
|
[
"BSD-3-Clause"
] | 184
|
2020-03-06T20:55:15.000Z
|
2022-03-15T18:24:57.000Z
|
functions/aou/tests/upload_test_files.py
|
broadinstitute/wfl
|
1e5691100330a9afa0270fb4bab0a7d0a7d3bdc2
|
[
"BSD-3-Clause"
] | 2
|
2020-07-08T19:16:26.000Z
|
2020-07-10T18:47:30.000Z
|
""" Helper script that copies all of the files for an arrays sample into the dev aou input bucket. This will trigger
the submit_aou_workload cloud function for each file. When all files have been uploaded, it will launch an arrays
workflow via the workflow launcher (but only if a workflow with that chipwell barcode & analysis version has not
been run before).
Usage: python upload_test_files.py -b <bucket>
"""
import argparse
import json
import random
import sys
import subprocess
import tempfile
arrays_path = "gs://broad-gotc-dev-wfl-ptc-test-inputs/arrays/HumanExome-12v1-1_A/"
arrays_metadata_path = "gs://broad-gotc-dev-wfl-ptc-test-inputs/arrays/metadata/HumanExome-12v1-1_A/"
def get_destination_paths(bucket, prefix):
return {
"arrays": f"gs://{bucket}/{prefix}/arrays/",
"arrays_metadata": f"gs://{bucket}/{prefix}/arrays/metadata/",
"ptc": f"gs://{bucket}/{prefix}/ptc.json"
}
def get_ptc_json(bucket, prefix, chip_well_barcode, analysis_version, prod):
return {
"executor":
"https://cromwell-aou.gotc-prod.broadinstitute.org" if prod
else "https://cromwell-gotc-auth.gotc-dev.broadinstitute.org/",
"environment": "aou-prod" if prod else "aou-dev",
"uuid": None,
"notifications": [{
"analysis_version_number": analysis_version,
"call_rate_threshold": 0.98,
"chip_well_barcode": chip_well_barcode,
"green_idat_cloud_path": f"gs://{bucket}/{prefix}/arrays/HumanExome-12v1-1_A/idats/7991775143_R01C01/7991775143_R01C01_Grn.idat",
"params_file": f"gs://{bucket}/{prefix}/arrays/HumanExome-12v1-1_A/inputs/7991775143_R01C01/params.txt",
"red_idat_cloud_path": f"gs://{bucket}/{prefix}/arrays/HumanExome-12v1-1_A/idats/7991775143_R01C01/7991775143_R01C01_Red.idat",
"reported_gender": "Female",
"sample_alias": "NA12878",
"sample_lsid": "broadinstitute.org:bsp.dev.sample:NOTREAL.NA12878",
"bead_pool_manifest_file": f"gs://{bucket}/{prefix}/arrays/metadata/HumanExome-12v1-1_A/HumanExome-12v1-1_A.bpm",
"cluster_file": f"gs://{bucket}/{prefix}/arrays/metadata/HumanExome-12v1-1_A/HumanExomev1_1_CEPH_A.egt",
"zcall_thresholds_file": f"gs://{bucket}/{prefix}/arrays/metadata/HumanExome-12v1-1_A/IBDPRISM_EX.egt.thresholds.txt",
"gender_cluster_file": f"gs://{bucket}/{prefix}/arrays/metadata/HumanExome-12v1-1_A/HumanExomev1_1_gender.egt",
"extended_chip_manifest_file": f"gs://{bucket}/{prefix}/arrays/metadata/HumanExome-12v1-1_A/HumanExome-12v1-1_A.1.3.extended.csv"
}]
}
def main(bucket, prod):
chip_well_barcode = "7991775143_R01C01"
analysis_version = random.randrange(sys.maxsize)
prefix = f"chip_name/{chip_well_barcode}/{analysis_version}"
ptc_json = get_ptc_json(bucket, prefix, chip_well_barcode, analysis_version, prod)
destination_paths = get_destination_paths(bucket, prefix)
with tempfile.TemporaryDirectory() as tmpdirname:
with open(f'{tmpdirname}/ptc.json', 'w') as f:
json.dump(ptc_json, f)
subprocess.run(["gsutil", "cp", "-r", arrays_path, destination_paths["arrays"]])
subprocess.run(["gsutil", "cp", "-r", arrays_metadata_path, destination_paths["arrays_metadata"]])
subprocess.run(["gsutil", "cp", f"{tmpdirname}/ptc.json", destination_paths["ptc"]])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"-b",
"--bucket",
dest="bucket",
default="dev-aou-arrays-input",
help="The upload destination bucket."
)
parser.add_argument(
"-p",
"--prod",
action="store_true",
help="Use infrastructure in broad-aou rather than broad-gotc-dev."
)
args = parser.parse_args()
main(args.bucket, args.prod)
| 47.573171
| 141
| 0.681364
| 512
| 3,901
| 4.982422
| 0.316406
| 0.070561
| 0.070561
| 0.075265
| 0.383379
| 0.332811
| 0.288514
| 0.288514
| 0.288514
| 0.27401
| 0
| 0.046909
| 0.174827
| 3,901
| 81
| 142
| 48.160494
| 0.745573
| 0.10382
| 0
| 0.059701
| 0
| 0.119403
| 0.505734
| 0.348624
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044776
| false
| 0
| 0.089552
| 0.029851
| 0.164179
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
c6fe87b224a7fdc40686930d3055375689c20f4c
| 2,019
|
py
|
Python
|
warp_gui.py
|
maciejczechowski/CarND-Advanced-Lane-Lines
|
058a17a2ac1e0ee4c1e8fa2fc5222cb7d2eaa230
|
[
"MIT"
] | null | null | null |
warp_gui.py
|
maciejczechowski/CarND-Advanced-Lane-Lines
|
058a17a2ac1e0ee4c1e8fa2fc5222cb7d2eaa230
|
[
"MIT"
] | null | null | null |
warp_gui.py
|
maciejczechowski/CarND-Advanced-Lane-Lines
|
058a17a2ac1e0ee4c1e8fa2fc5222cb7d2eaa230
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
from src import lane_finder as lf
from src import parameters
import argparse
class WarpFinder:
def __init__(self, image, horizon = 400, x1 = 500):
self.image1 = image
self._horizon = horizon
self._x1 = x1
def onChangeHorizon(pos):
self._horizon = pos
self._render()
def onChangeX1(pos):
self._x1 = pos
self._render()
cv2.namedWindow('result')
cv2.createTrackbar('horizon', 'result', self._horizon, 720, onChangeHorizon)
cv2.createTrackbar('x1', 'result', self._x1, 640, onChangeX1)
self._render()
print("Adjust the parameters as desired. Hit any key to close.")
cv2.waitKey(0)
cv2.destroyWindow('result')
def draw_grid(self, img, w, h, line_color=(0, 255, 0), thickness=1, type_= cv2.LINE_AA, pxstep=50):
'''(ndarray, 3-tuple, int, int) -> void
draw gridlines on img
line_color:
BGR representation of colour
thickness:
line thickness
type:
8, 4 or cv2.LINE_AA
pxstep:
grid line frequency in pixels
'''
x = pxstep
y = pxstep
while x < w:
cv2.line(img, (x, 0), (x, h), color=line_color, lineType=type_, thickness=thickness)
x += pxstep
while y < h:
cv2.line(img, (0, y), (w, y), color=line_color, lineType=type_, thickness=thickness)
y += pxstep
def _render(self):
warped1 = lf.toBirdsEye(self.image1, self._x1, self._horizon)
self.draw_grid(warped1, 1280, 720)
self._result = warped1
cv2.imshow('result', self._result)
parser = argparse.ArgumentParser(description='Visualizes the warp transform.')
parser.add_argument('filename')
args = parser.parse_args()
image = cv2.imread(args.filename)
params = parameters.LaneFinderParams()
thresh = WarpFinder(image, params.warp_horizon, params.warp_x1)
| 25.884615
| 103
| 0.602278
| 245
| 2,019
| 4.82449
| 0.4
| 0.037225
| 0.021997
| 0.025381
| 0.07445
| 0.07445
| 0.07445
| 0
| 0
| 0
| 0
| 0.041812
| 0.289252
| 2,019
| 77
| 104
| 26.220779
| 0.781882
| 0.101535
| 0
| 0.069767
| 0
| 0
| 0.077012
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0
| 0.116279
| 0
| 0.255814
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05007036c73f4b4b153318ac832ce22662ff0e07
| 2,041
|
py
|
Python
|
election_data/uc_santa_barbara/2017/src_data/parser/ElectionBallotParser.py
|
dkupsh/stvote
|
dbe906681a171c5654341b93dc0fb5b0208cfd33
|
[
"MIT"
] | null | null | null |
election_data/uc_santa_barbara/2017/src_data/parser/ElectionBallotParser.py
|
dkupsh/stvote
|
dbe906681a171c5654341b93dc0fb5b0208cfd33
|
[
"MIT"
] | null | null | null |
election_data/uc_santa_barbara/2017/src_data/parser/ElectionBallotParser.py
|
dkupsh/stvote
|
dbe906681a171c5654341b93dc0fb5b0208cfd33
|
[
"MIT"
] | null | null | null |
###############
# Ballot Parser for UC Berkeley Results
#
# This ballot parser has been tailored to the ballot
# system used by UCB. If you use another software
# to define ballots, ensure the data returned by the
# ballot parser returns data in the following fashion:
#
# [
# {
# "ballot_id": "unique_ballot_id",
# "ballot_data": {
# "race_id": [
# "candidate_id",
# "candidate_id",
# ...
# ],
# "race_id": [
# "candidate_id",
# "candidate_id",
# ...
# ],
# ...
# }
# },
# {
# "ballot_id": "unique_ballot_id",
# "ballot_data": {
# "race_id": [
# "candidate_id",
# "candidate_id",
# ...
# ],
# "race_id": [
# "candidate_id",
# "candidate_id",
# ...
# ],
# ...
# }
# },
# ...
# ]
#
# The race_id value should correspond to the value
# specified in the configuration file.
#
# Each list identified by the race_id should be in
# voting-choice order, where the first candidate
# within the list corresponds to the ballot's first
# choice vote.
#
# The candidate_id should correspond to the value
# returned by the election candidate parser.
#
# Last Modified: April 12, 2016
###############
import json
import uuid
def parse(ballot_file_path, races):
ballots_data = []
# Open the ballot file.
with open(ballot_file_path, encoding="UTF-8", errors="ignore") as ballot_file:
ballot_file_data = json.loads(ballot_file.read())
for ballot in ballot_file_data["ballots"]:
ballot_data = {}
ballot_data["ballot_id"] = str(uuid.uuid4())
ballot_data["ballot_data"] = {}
for race in races:
ballot_data["ballot_data"][race.id()] = ballot[race.id()]
ballots_data.append(ballot_data)
return ballots_data
| 26.166667
| 82
| 0.526213
| 218
| 2,041
| 4.724771
| 0.366972
| 0.087379
| 0.100971
| 0.066019
| 0.221359
| 0.170874
| 0.170874
| 0.170874
| 0.170874
| 0.170874
| 0
| 0.005966
| 0.342969
| 2,041
| 77
| 83
| 26.506494
| 0.762118
| 0.641352
| 0
| 0
| 0
| 0
| 0.075969
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05018611063b1ec5bb0bc5adba5e6965095d97d4
| 5,971
|
py
|
Python
|
deco/__init__.py
|
patdex/deco
|
83141719b3b68fb1e99b43384a25288aea5c3e8c
|
[
"MIT"
] | null | null | null |
deco/__init__.py
|
patdex/deco
|
83141719b3b68fb1e99b43384a25288aea5c3e8c
|
[
"MIT"
] | null | null | null |
deco/__init__.py
|
patdex/deco
|
83141719b3b68fb1e99b43384a25288aea5c3e8c
|
[
"MIT"
] | null | null | null |
import collections
import inspect
import time
import re
# module config:
disable_tracing = False
indent = True
# indentation for log output
_log_indent = dict()
def indent_str(cnt, end=False):
"""
indent string
:param cnt: indentation count
:param end: close actual indentation?
:return:
"""
if not indent:
return ''
return '| ' * cnt + ('/ ' if not end else '\\ ')
class _MyOrderedDict(collections.OrderedDict):
"""
format representation string vor log output
"""
def __repr__(self):
ret = str()
for key, val in self.items():
ret += '{0}={2}({1}), '.format(key, val, val.__class__.__name__)
return ret[:-2]
class _MyList(list):
"""
format representation string vor log output
"""
def __repr__(self):
ret = str()
for val in self:
ret += '{0}({1}), '.format(val, val.__class__.__name__)
return ret[:-2]
def _get_wrapped_method(func):
"""
get inner method if multiple decorators are used
:param func:
:return:
"""
while hasattr(func, '__wrapped__'):
func = getattr(func, '__wrapped__')
return func
def _wrap(wrapper, func):
"""
save wrapped function if multiple decorators are used
:param func:
:return:
"""
setattr(wrapper, '__wrapped__', func)
def argument_types(func):
"""
:param func:
:return: dictionary with argument name and type
"""
signature = inspect.signature(func)
sig = re.match(r"\(([^)]+)\)", str(signature)).group(1)
param_list = str(sig).split(', ')
types = dict()
for param in param_list:
try:
elements = param.split(':')
types[elements[0]] = elements[1].split('=')[0]
except IndexError:
pass
return types
def collect_all_arguments_to_dict(func, args, kwargs):
"""
:param func:
:param args:
:param kwargs:
:return: dictionary with all method arguments and their values (like kwargs)
"""
arg_names = [arg_name for arg_name in inspect.signature(func).parameters]
all_as_kwargs = _MyOrderedDict()
# collect args
for arg_name, arg_val in zip(arg_names, args):
all_as_kwargs[arg_name] = arg_val
# collect kwargs
for arg_name in arg_names:
if arg_name in kwargs:
all_as_kwargs[arg_name] = kwargs[arg_name]
# collect default arguments:
for arg_name, arg_val in inspect.signature(func).parameters.items():
if arg_name in arg_names and arg_name not in all_as_kwargs:
all_as_kwargs[arg_name] = arg_val.default
return all_as_kwargs
class Trace:
"""
Decorator Class
"""
def __init__(self, log_method, disable=False):
"""
:param log_method: logging method
:param disable: disable logging
"""
self.log_method = log_method
self.disabled = disable
def __call__(self, func):
"""
:param func: decorated method
:return:
"""
def wrapper(*args, **kwargs):
if self.disabled or disable_tracing:
return func
inner_func = _get_wrapped_method(func)
ind = self._increment_indent() # indent log message
all_as_kwargs = collect_all_arguments_to_dict(inner_func, args, kwargs) # all arguments to OrderedDict
self.log_method(indent_str(ind) + self._call_message(inner_func, all_as_kwargs))
start_time = time.time()
ret = func(*args, **kwargs) # run decorated method
exec_time = time.time() - start_time
self.log_method(indent_str(ind, True) + self._return_message(inner_func, ret, exec_time))
self._decrement_indent() # redo indent log message
return ret
_wrap(wrapper, func)
return wrapper
@staticmethod
def _call_message(func, all_as_kwargs):
"""
format call log message
:param func:
:param all_as_kwargs:
:return:
"""
message = '{0}({1})'.format(func.__name__, all_as_kwargs)
return message
@staticmethod
def _return_message(func, ret, exec_time):
"""
format return log message
:param func:
:param ret:
:return:
"""
ret_arg_str = str(_MyList(ret)) if isinstance(ret, tuple) else '{1}({0})'.format(ret, ret.__class__.__name__)
message = '{1} in {2:.3f}ms'.format(func.__name__, ret_arg_str, exec_time * 1000)
return message
def _increment_indent(self):
if not indent:
return ''
if self.log_method not in _log_indent:
_log_indent[self.log_method] = 0
else:
_log_indent[self.log_method] += 1
return _log_indent[self.log_method]
def _decrement_indent(self):
if not indent:
return ''
_log_indent[self.log_method] -= 1
def cast_std_arguments(func):
"""
cast arguments with standard and defined type
:param func:
:return:
"""
def wrapper(*args, **kwargs):
inner_func = _get_wrapped_method(func)
all_as_kwargs_casted = collections.OrderedDict()
all_as_kwargs = collect_all_arguments_to_dict(inner_func, args, kwargs) # all arguments to OrderedDict
arg_types = argument_types(inner_func)
for arg_name, arg_value in all_as_kwargs.items():
arg_type = arg_types.get(arg_name, None)
if arg_type: # if type defined:
try: # try to cast
arg_value = eval('{0}(arg_value)'.format(arg_type))
except NameError: # unknown namespace
pass
all_as_kwargs_casted[arg_name] = arg_value
# run decorated method with casted arguments
return func(**all_as_kwargs_casted)
_wrap(wrapper, func)
return wrapper
| 26.420354
| 117
| 0.601574
| 722
| 5,971
| 4.66759
| 0.18144
| 0.023739
| 0.052226
| 0.01543
| 0.339763
| 0.225519
| 0.135905
| 0.106825
| 0.081899
| 0.081899
| 0
| 0.006177
| 0.295093
| 5,971
| 225
| 118
| 26.537778
| 0.794488
| 0.192598
| 0
| 0.288288
| 0
| 0
| 0.028002
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.144144
| false
| 0.018018
| 0.036036
| 0
| 0.36036
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05067ca48cd1bf1cfe7a6e17e6b2e4d00c579d5b
| 3,780
|
py
|
Python
|
app/mysql2json.py
|
ToHanwei/CORD
|
09f75b136431222ec945b2ddd6798ae805ec332e
|
[
"MIT"
] | null | null | null |
app/mysql2json.py
|
ToHanwei/CORD
|
09f75b136431222ec945b2ddd6798ae805ec332e
|
[
"MIT"
] | null | null | null |
app/mysql2json.py
|
ToHanwei/CORD
|
09f75b136431222ec945b2ddd6798ae805ec332e
|
[
"MIT"
] | null | null | null |
#!coding:utf-8
import json
import pymysql
import pandas as pd
class ReadJson():
def __init__(self, host, user, passwd, db, table, sort=None, _filter=None):
self.host =host
self.user =user
self.passwd = passwd
self.db = db
self.table = table
self.sort = sort
self.filter = _filter
self.data = ''
self.jsondata = ''
def _filter_data(self, col, inlist):
"""
table filter funcion
"""
bools = []
for ele in inlist:
bools.append(self.data[col] == ele)
bools = [any(elist) for elist in zip(*bools)]
return bools
def conecter_to_mysql(self):
connec = pymysql.connect(
host=self.host,
user=self.user,
password=self.passwd,
database=self.db,
charset='utf8',
use_unicode=True
)
return connec
def select_row(self, rowname, colname):
connec = self.conecter_to_mysql()
Cursor = connec.cursor()
sql = "SELECT * FROM `" + self.table + "` where " + colname + "=" + "'" + str(rowname) + "'"
Cursor.execute(sql)
row = Cursor.fetchall()
return row
def read_receptor(self):
connec = self.conecter_to_mysql()
# prepare data
sort_order = self.sort["order"]
sort_prop = self.sort["prop"]
filters = self.filter['cluster'][0]
_type = ""
if sort_order == "ascending":
_type = True
elif sort_order == "descending":
_type = False
# read MySQL data to DataFrame
sql = "SELECT * FROM " + "`" + self.table + "`;"
self.data = pd.read_sql(sql, connec)
bools = self._filter_data("cluster", filters)
if bools:
self.data = self.data[bools]
if sort_order:
self.data.sort_values(by=[sort_prop], ascending=_type, inplace=True)
self.jsondata = json.loads(self.data.to_json(orient="records"))
def read_json(self):
"""
connect to MySQL database
"""
connec = self.conecter_to_mysql()
# prepare data
sort_order = self.sort["order"]
sort_prop = self.sort["prop"]
filter_order = self.filter["order"][0]
filter_Family = self.filter["Family"][0]
filter_Genus = self.filter["Genus"][0]
_type = ""
if sort_order == "ascending":
_type = True
elif sort_order == "descending":
_type = False
# read MySQL data to DataFrame
sql = "SELECT * FROM " + self.table + ";"
self.data = pd.read_sql(sql, connec)
# filter funtion
order_bools = self._filter_data("Order", filter_order)
family_bools = self._filter_data("Family", filter_Family)
genus_bools = self._filter_data("Genus", filter_Genus)
bools = [elist for elist in (order_bools, family_bools, genus_bools) if elist]
bools = [all(elist) for elist in zip(*bools)]
if bools:
self.data = self.data[bools]
# sort funtion
if sort_order:
self.data.sort_values(by=[sort_prop], ascending=_type, inplace=True)
# convert DataFrame to json
self.jsondata = json.loads(self.data.to_json(orient="records"))
def build_filter(self, colname):
"""
Build a filter list
"""
elems = list(set(self.data[colname].values))
if None in elems:
elems = list(filter(None, elems))
elems = sorted(elems)
elems.append(None)
else:
elems = sorted(elems)
outfilter = [{'text': ele, 'value': ele} for ele in elems]
return outfilter
| 30.983607
| 100
| 0.552381
| 434
| 3,780
| 4.656682
| 0.218894
| 0.05146
| 0.029688
| 0.037605
| 0.399307
| 0.376051
| 0.35329
| 0.325581
| 0.325581
| 0.325581
| 0
| 0.00237
| 0.330159
| 3,780
| 121
| 101
| 31.239669
| 0.795814
| 0.057672
| 0
| 0.348315
| 0
| 0
| 0.053689
| 0
| 0.022472
| 0
| 0
| 0
| 0
| 1
| 0.078652
| false
| 0.033708
| 0.033708
| 0
| 0.168539
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0506e61a9ace0c2d5bc6f23b2cc7e615718656a8
| 3,583
|
py
|
Python
|
dict2xml.py
|
lucasicf/dict2xml
|
7421414c71e1d95a4d60e84f942379edb4df2df5
|
[
"BSD-3-Clause"
] | 12
|
2015-07-12T20:07:10.000Z
|
2022-02-10T05:16:14.000Z
|
dict2xml.py
|
lucasicf/dict2xml
|
7421414c71e1d95a4d60e84f942379edb4df2df5
|
[
"BSD-3-Clause"
] | null | null | null |
dict2xml.py
|
lucasicf/dict2xml
|
7421414c71e1d95a4d60e84f942379edb4df2df5
|
[
"BSD-3-Clause"
] | 7
|
2015-05-21T09:39:52.000Z
|
2021-02-28T22:01:15.000Z
|
# -*- coding: utf-8 -*-
from xml.dom import minidom
import re
# Thrown on any dictionary error
class Dict2XMLException(Exception):
pass
def _dict_sort_key(key_value):
key = key_value[0]
match = re.match('(\d+)__.*', key)
return match and int(match.groups()[0]) or key
_iter_dict_sorted = lambda dic: sorted(
dic.iteritems(), key=(lambda key_value: _dict_sort_key(key_value))
)
def _remove_order_id(key):
match = re.match('\d+__(.*)', key)
return match and match.groups()[0] or key
DATATYPE_ROOT_DICT = 0
DATATYPE_KEY = 1
DATATYPE_ATTR = 2
DATATYPE_ATTRS = 3
def _check_errors(value, data_type):
if data_type == DATATYPE_ROOT_DICT:
if isinstance(value, dict):
values = value.values()
if len(values) != 1:
raise Dict2XMLException(
'Must have exactly one root element in the dictionary.')
elif isinstance(values[0], list):
raise Dict2XMLException(
'The root element of the dictionary cannot have a list as value.')
else:
raise Dict2XMLException('Must pass a dictionary as an argument.')
elif data_type == DATATYPE_KEY:
if not isinstance(value, basestring):
raise Dict2XMLException('A key must be a string.')
elif data_type == DATATYPE_ATTR:
(attr, attrValue) = value
if not isinstance(attr, basestring):
raise Dict2XMLException('An attribute\'s key must be a string.')
if not isinstance(attrValue, basestring):
raise Dict2XMLException('An attribute\'s value must be a string.')
elif data_type == DATATYPE_ATTRS:
if not isinstance(value, dict):
raise Dict2XMLException('The first element of a tuple must be a dictionary '
'with a set of attributes for the main element.')
# Recursive core function
def _buildXMLTree(rootXMLElement, key, content, document):
_check_errors(key, DATATYPE_KEY)
keyElement = document.createElement(_remove_order_id(key))
if isinstance(content, tuple) and len(content) == 2:
(attrs, value) = content
else:
(attrs, value) = ({}, content)
_check_errors(attrs, DATATYPE_ATTRS)
for (attr, attrValue) in attrs.iteritems():
_check_errors((attr, attrValue), DATATYPE_ATTR)
keyElement.setAttribute(attr, '%s' % attrValue)
if isinstance(value, basestring):
# Simple text value inside the node
keyElement.appendChild(document.createTextNode('%s' % value))
rootXMLElement.appendChild(keyElement)
elif isinstance(value, dict):
# Iterating over the children
for (k, cont) in _iter_dict_sorted(value):
# Recursively parse the subdictionaries
_buildXMLTree(keyElement, k, cont, document)
rootXMLElement.appendChild(keyElement)
elif isinstance(value, list):
# Recursively replicate this key element for each value in the list
for subcontent in value:
_buildXMLTree(rootXMLElement, key, subcontent, document)
else:
raise Dict2XMLException('Invalid value.')
def dict2XML(dic, indent=True, utf8=False):
document = minidom.Document()
# Root call of the recursion
_check_errors(dic, DATATYPE_ROOT_DICT)
(key, content) = dic.items()[0]
_buildXMLTree(document, key, content, document)
encoding = utf8 and 'utf-8' or None
return (indent and document.toprettyxml(indent=' ', encoding=encoding)
or document.toxml(encoding=encoding))
| 34.786408
| 88
| 0.6542
| 426
| 3,583
| 5.361502
| 0.295775
| 0.077058
| 0.028021
| 0.02627
| 0.180823
| 0.140981
| 0.055166
| 0.055166
| 0
| 0
| 0
| 0.009314
| 0.250907
| 3,583
| 102
| 89
| 35.127451
| 0.841654
| 0.075077
| 0
| 0.094595
| 0
| 0
| 0.10348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067568
| false
| 0.027027
| 0.027027
| 0
| 0.148649
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05071a1ee7761ffc57199c77291dcea3601a853d
| 1,247
|
py
|
Python
|
06_rotation_transformation.py
|
Mathanraj-Sharma/OpenCV_Sample_Codes
|
a20710fa05d7817b9c4c78acc64b852b0cde7583
|
[
"Apache-2.0"
] | 1
|
2019-11-23T06:52:58.000Z
|
2019-11-23T06:52:58.000Z
|
06_rotation_transformation.py
|
Mathanraj-Sharma/OpenCV_Sample_Codes
|
a20710fa05d7817b9c4c78acc64b852b0cde7583
|
[
"Apache-2.0"
] | null | null | null |
06_rotation_transformation.py
|
Mathanraj-Sharma/OpenCV_Sample_Codes
|
a20710fa05d7817b9c4c78acc64b852b0cde7583
|
[
"Apache-2.0"
] | 1
|
2019-11-23T11:18:37.000Z
|
2019-11-23T11:18:37.000Z
|
import cv2
import argparse
import numpy as np
ap = argparse.ArgumentParser()
ap.add_argument('-i', required = True, help = 'Enter the path of Image')
args = vars(ap.parse_args())
image = cv2.imread(args['i'])
def wheel(image, center):
i = 1
while (True):
if i > 359:
cv2.imshow('Wheel', image)
cv2.waitKey(1)
i = 1
else:
rotated_image = rotate(image, center, i, 1.0)
cv2.imshow('Wheel', rotated_image)
cv2.waitKey(10)
i += 1
def rotate(image, point, angle, scale):
"""
this function will take an image and rotate it through the given angle
with respect to given point.
Optionally we can scale the image 1.0 = original, 2.0 = double etc.
"""
# M is the rotation Matrix for derived using angel, Point, and Scale
M = cv2.getRotationMatrix2D(point, angle, scale)
rotated_image = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
return rotated_image
if __name__ == '__main__':
#tranforming image with respect to its center and through -45*
center = (image.shape[1]//2, image.shape[0]//2)
angel = -45
cv2.imshow('Original Image', image)
cv2.waitKey(0)
rotated_image = rotate(image, center, angel, 1.0)
cv2.imshow('Rotated Image', rotated_image)
cv2.waitKey(0)
wheel(image, center)
| 22.267857
| 75
| 0.690457
| 194
| 1,247
| 4.35567
| 0.391753
| 0.099408
| 0.071006
| 0.030769
| 0.068639
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040976
| 0.178027
| 1,247
| 56
| 76
| 22.267857
| 0.783415
| 0.238172
| 0
| 0.125
| 0
| 0
| 0.075693
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.09375
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0507429bfe72a62ce8131002bc3538a2af143672
| 3,972
|
py
|
Python
|
feichangzun/directGetFlightData.py
|
Octoberr/weizhuangIP
|
d37e82df35d0b8b84bfa38f3a487fd81ab969070
|
[
"Apache-2.0"
] | null | null | null |
feichangzun/directGetFlightData.py
|
Octoberr/weizhuangIP
|
d37e82df35d0b8b84bfa38f3a487fd81ab969070
|
[
"Apache-2.0"
] | null | null | null |
feichangzun/directGetFlightData.py
|
Octoberr/weizhuangIP
|
d37e82df35d0b8b84bfa38f3a487fd81ab969070
|
[
"Apache-2.0"
] | null | null | null |
import getflightdata
import requests
from bs4 import BeautifulSoup
import random
import json
import pymongo
import datetime
from Utils.config import config
# import config
mongoConf = config['mongo']
feichangzun = 'http://www.variflight.com/flight/fnum/'
feichangzunhouzui = '.html?AE71649A58c77&fdate='
def get_headers():
headers = {
"X-Forwarded-For": '%s.%s.%s.%s' % (
random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),
'Host': "www.variflight.com",
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate'}
return headers
def getqueryflight(flight, flightdate):
allflightlinks = []
client = pymongo.MongoClient(host=mongoConf['host'], port=mongoConf['port'])
db = client.swmdb
feichangzhundata = db.feichangzun
cursor = feichangzhundata.find({"Info.fno": flight, "Info.Date": flightdate})
for el in cursor:
allflightlinks.append(el)
return allflightlinks
def getDirectFlight(flight, flightdate):
strDate = datetime.datetime.strptime(flightdate, "%Y-%m-%d").strftime("%Y%m%d")
gt = getflightdata.GETFLIGHTDATA()
url = feichangzun + flight + feichangzunhouzui + strDate
flightlist = []
listHtml = requests.get(url, headers=get_headers())
listSoup = BeautifulSoup(listHtml.text, 'lxml')
listUrl = listSoup.find('div', class_='fly_list')
if listUrl is not None:
listhref = listUrl.find('div', class_='li_box').find_all('a')
for link in listhref:
if '/schedule' in link.get('href'):
flightlist.append(link.get('href'))
flightdictlist = gt.getaflightinfo(flightlist)
if len(flightdictlist) == 0:
return None
flightdict = getFlightJsonData(flightdictlist)
querdata = getqueryflight(flight, flightdate)
if len(querdata) == 0:
gt.insertintomongo(flightdict)
del(flightdict['_id'])
# flightdictr = json.dumps(flightdict)
return flightdict
def getFlightJsonData(flightinfo):
flightdic = {}
info = {}
if len(flightinfo) == 1:
init = 0
info['from'] = flightinfo[init]['qf']
info['to'] = flightinfo[init]['dd']
info['from_simple'] = flightinfo[init]['qf_simple']
info['to_simple'] = flightinfo[init]['dd_simple']
info['FromTerminal'] = flightinfo[init]['qfTerminal']
info['ToTerminal'] = flightinfo[init]['ddTerminal']
info['from_city'] = flightinfo[init]['qf_city']
info['to_city'] = flightinfo[init]['dd_city']
info['from_code'] = flightinfo[init]['qf_citycode']
info['to_code'] = flightinfo[init]['dd_citycode']
info['fno'] = flightinfo[init]['fno']
info['Company'] = '3U'
info['Date'] = flightinfo[init]['date']
info['zql'] = ""
else:
init = 1
info['from'] = flightinfo[init]['qf']
info['to'] = flightinfo[init]['dd']
info['from_simple'] = flightinfo[init]['qf_simple']
info['to_simple'] = flightinfo[init]['dd_simple']
info['FromTerminal'] = flightinfo[init]['qfTerminal']
info['ToTerminal'] = flightinfo[init]['ddTerminal']
info['from_city'] = flightinfo[init]['qf_city']
info['to_city'] = flightinfo[init]['dd_city']
info['from_code'] = flightinfo[init]['qf_citycode']
info['to_code'] = flightinfo[init]['dd_citycode']
info['fno'] = flightinfo[init]['fno']
info['Company'] = '3U'
info['Date'] = flightinfo[init]['date']
info['zql'] = ""
flightdic['Info'] = info
flightdic['List'] = flightinfo
return flightdic
#
# flight = '3U3048'
# flightdate ='2017-08-02'
#
# jsodater = getDirectFlight(flight, flightdate)
# print(jsodater)
| 34.842105
| 108
| 0.629909
| 447
| 3,972
| 5.52349
| 0.331096
| 0.136087
| 0.051843
| 0.027542
| 0.348319
| 0.348319
| 0.348319
| 0.348319
| 0.348319
| 0.348319
| 0
| 0.023477
| 0.206445
| 3,972
| 113
| 109
| 35.150442
| 0.759835
| 0.039275
| 0
| 0.311111
| 0
| 0.022222
| 0.207304
| 0.023384
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.088889
| 0
| 0.188889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0507ce8c6b29b5cd6c3e947a8e5f6cea05343e0b
| 2,402
|
py
|
Python
|
face/face-30sec.py
|
eric-erki/ai-smarthome
|
ca7316ebe72b0ad26f0b59e3186426633807cac8
|
[
"BSD-2-Clause"
] | 28
|
2018-08-09T13:10:34.000Z
|
2022-01-07T13:39:31.000Z
|
face/face-30sec.py
|
eric-erki/ai-smarthome
|
ca7316ebe72b0ad26f0b59e3186426633807cac8
|
[
"BSD-2-Clause"
] | 4
|
2018-08-09T13:18:12.000Z
|
2021-04-06T19:04:54.000Z
|
face/face-30sec.py
|
eric-erki/ai-smarthome
|
ca7316ebe72b0ad26f0b59e3186426633807cac8
|
[
"BSD-2-Clause"
] | 15
|
2018-12-17T09:17:28.000Z
|
2021-03-02T11:25:05.000Z
|
import numpy as np
import cv2
import face_recognition
import time
# Load a sample picture and learn how to recognize it.
me_image = face_recognition.load_image_file("known/joakim.png")
me_face_encoding = face_recognition.face_encodings(me_image)[0]
known_face_encodings = [
me_face_encoding,
]
known_face_names = [
"Joakim Eriksson",
]
cap = cv2.VideoCapture(0)
photo_time = 0
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
face_locations = face_recognition.face_locations(frame)
face_encodings = face_recognition.face_encodings(frame, face_locations)
print(face_locations)
name = "Unknown"
match = False
# Loop through each face found in the unknown image
for (top, right, bottom, left), face_encoding in zip(face_locations, face_encodings):
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
# If a match was found in known_face_encodings, just use the first one.
if True in matches:
first_match_index = matches.index(True)
name = known_face_names[first_match_index]
match = True
cut = frame[top:bottom, left:right]
cv2.rectangle(frame,(left, top), (right, bottom),(0,255,0),3)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, name,(left, top - 5), font, 0.7, (255,255,255),2,cv2.LINE_AA)
cv2.imshow('cut', cut)
print("Name: ", name)
if match == False:
print("no match")
# Display the resulting frame
cv2.imshow('frame', frame)
if time.time() - photo_time > 30.0:
print("the photo is old...")
known_face_encodings = known_face_encodings[0:1]
known_face_names = known_face_names[0:1]
key = cv2.waitKey(1) & 0xff
if key == ord('q'):
break
if key == ord('p'):
if(len(known_face_encodings) < 2):
print("Storing new encoding")
photo_time = time.time()
known_face_encodings = known_face_encodings + [face_encoding]
known_face_names = known_face_names + ["Newly Photoed"]
if key == ord('o'):
if name == "Newly Photoed":
print("Door will open for you!")
else:
print("Door is closed for you!")
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| 32.459459
| 89
| 0.651957
| 332
| 2,402
| 4.521084
| 0.343373
| 0.08994
| 0.095936
| 0.037308
| 0.13058
| 0.085276
| 0
| 0
| 0
| 0
| 0
| 0.023064
| 0.241882
| 2,402
| 73
| 90
| 32.90411
| 0.801208
| 0.130724
| 0
| 0
| 0
| 0
| 0.083654
| 0
| 0
| 0
| 0.001923
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
050a2b44b8dd6b46945c916a81b519efe47b76fb
| 2,473
|
py
|
Python
|
solutions/dropbox/compare_functions.py
|
roman-kachanovsky/checkio
|
3134cbc04ed56e92006d1e2f09d7365e900953db
|
[
"BSD-3-Clause"
] | 1
|
2017-02-07T19:50:52.000Z
|
2017-02-07T19:50:52.000Z
|
solutions/dropbox/compare_functions.py
|
roman-kachanovsky/checkio-python
|
3134cbc04ed56e92006d1e2f09d7365e900953db
|
[
"BSD-3-Clause"
] | null | null | null |
solutions/dropbox/compare_functions.py
|
roman-kachanovsky/checkio-python
|
3134cbc04ed56e92006d1e2f09d7365e900953db
|
[
"BSD-3-Clause"
] | null | null | null |
""" --- Compare Functions --- Simple
Two functions f and g are provided as inputs to checkio.
The first function f is the primary function and the second
function g is the backup. Use your coding skills to return
a third function h which returns the same output as f unless
f raises an exception or returns None. In this case h should
return the same output as g. If both f and g raise exceptions
or return None, then h should return None.
As a second output, h should return a status string indicating
whether the function values are the same and if either function
erred. A function errs if it raises an exception or returns
a null value (None).
The status string should be set to: "same" if f and g return
the same output and neither errs, "different" if f and g return
different outputs and neither errs, "f_error" if f errs but not g,
"g_error" if g errs but not f, or "both_error" if both err.
Input: Two functions: f (primary) and g (backup).
Output: A function h which takes arbitrary inputs
and returns a two-tuple.
How it is used: This is an exercise in working with functions
as first class objects.
Precondition: hasattr(f,'__call__');
hasattr(g,'__call__')
"""
def my_solution(f, g):
def h(*args, **kwargs):
f_res, f_err, g_res, g_err = None, False, None, False
try:
f_res = f(*args, **kwargs)
f_err = f_res is None
except:
f_err = True
try:
g_res = g(*args, **kwargs)
g_err = g_res is None
except:
g_err = True
if f_err and g_err:
return None, 'both_error'
elif g_err or f_err:
return (f_res, 'g_error') if g_err else (g_res, 'f_error')
else:
return (g_res, 'same') if f_res == g_res else (f_res, 'different')
return h
def diz_solution(*funcs):
def helper(*args, **kwargs):
output = None
status = 'same'
for i, f in enumerate(funcs, ord('f')):
try:
result = f(*args, **kwargs)
except:
result = None
if result is None:
status = [chr(i), 'both']['error' in status] + '_error'
elif output is None:
output = result
elif result != output:
status = 'different'
return output, status
return helper
| 32.973333
| 78
| 0.595633
| 368
| 2,473
| 3.894022
| 0.277174
| 0.016748
| 0.013957
| 0.020935
| 0.054431
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.327537
| 2,473
| 74
| 79
| 33.418919
| 0.861696
| 0.51112
| 0
| 0.162162
| 0
| 0
| 0.055
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0
| 0
| 0.27027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
050b0bea353171a3c51a6088825350acb0d9291f
| 3,402
|
py
|
Python
|
yawndb/sync.py
|
selectel/python-yawndb
|
6d1c7d4b16a5cb5ef96496a22a3afb0bae7f2bb6
|
[
"MIT"
] | null | null | null |
yawndb/sync.py
|
selectel/python-yawndb
|
6d1c7d4b16a5cb5ef96496a22a3afb0bae7f2bb6
|
[
"MIT"
] | null | null | null |
yawndb/sync.py
|
selectel/python-yawndb
|
6d1c7d4b16a5cb5ef96496a22a3afb0bae7f2bb6
|
[
"MIT"
] | null | null | null |
"""
yawndb.sync
~~~~~~~~~~~
Sync YAWNDB transport. Use standart socket object methods.
"""
import time
import json
import socket
import urllib2
import logging
from collections import deque
from yawndb._base import _YAWNDBBase
_logger = logging.getLogger(__name__)
class YAWNDB(_YAWNDBBase):
"""Sync YAWNDB transport.
Store not sent data in cache. Try to resend it on the next
:py:meth:`.send_msgs` call or you can do it manually via
:py:meth:`.send_cached` method.
Try to reconnect if connection has lost on each :py:meth:`.send`
and :py:meth:`.send_msgs` call.
"""
def __init__(self, host, tcp_port=2011, json_port=8081, cache_size=100000):
super(YAWNDB, self).__init__(host, tcp_port, json_port)
self._socket = None
self._disconnected = 0
self._data_cache = deque([], cache_size)
def slice(self, path, rule, from_t, to_t):
url = 'http://{0}:{1}/paths/{2}/{3}/slice?from={4}&to={5}'.format(
self._host, self._json_port, path, rule, from_t, to_t)
return self._request(url)
def last(self, path, rule, n):
url = 'http://{0}:{1}/paths/{2}/{3}/last?n={4}'.format(
self._host, self._json_port, path, rule, n)
return self._request(url)
def _request(self, url):
try:
res = urllib2.urlopen(url).read()
except Exception:
_logger.exception('JSON API IO error on %s', url)
return []
else:
res = json.loads(res)
if res['status'] != 'ok':
_logger.error('JSON API error on %s: %s', url, res)
return []
if res['answer'] == 'empty':
return []
return res['answer']
def start(self):
try:
self._socket = socket.socket()
self._socket.settimeout(2)
self._socket.connect((self._host, self._tcp_port))
except IOError:
_logger.error(
'Couldn\'t to connect to YAWNDB at %s:%s',
self._host, self._tcp_port)
self.stop()
def stop(self):
if self._socket:
try:
self._socket.close()
except IOError:
pass
self._disconnected = time.time()
self._socket = None
def _send(self, data):
if not self._socket:
return False
try:
self._socket.sendall(data)
return True
except IOError:
_logger.error(
'Couldn\'t send data to YAWNDB at %s:%s',
self._host, self._tcp_port)
self.stop()
self._socket = None
return False
def send(self, data):
if not self._send(data):
self._data_cache.append(data)
def send_msgs(self, msgs):
super(YAWNDB, self).send_msgs(msgs)
self.send_cached()
def send_cached(self):
"""Try to re-send data that was failed to sent."""
if not self._socket and time.time() - self._disconnected > 10:
self.start()
while True:
if not self._socket:
break
try:
data = self._data_cache.popleft()
except IndexError:
break
if not self._send(data):
self._data_cache.appendleft(data)
| 29.076923
| 79
| 0.546149
| 419
| 3,402
| 4.24105
| 0.295943
| 0.06753
| 0.033765
| 0.025324
| 0.261114
| 0.204277
| 0.155881
| 0.115926
| 0.043894
| 0.043894
| 0
| 0.013753
| 0.337449
| 3,402
| 116
| 80
| 29.327586
| 0.774623
| 0.116402
| 0
| 0.352941
| 0
| 0.011765
| 0.059302
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0.011765
| 0.082353
| 0
| 0.317647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
050b23d1c21cc11db93c4c94dba0b845a1f1693e
| 1,209
|
py
|
Python
|
setup.py
|
ofek/depq
|
370e3ad503d3e9cedc3c49dc64add393ba945764
|
[
"MIT"
] | 1
|
2019-02-12T13:17:56.000Z
|
2019-02-12T13:17:56.000Z
|
setup.py
|
ofek/depq
|
370e3ad503d3e9cedc3c49dc64add393ba945764
|
[
"MIT"
] | 4
|
2016-12-10T20:17:38.000Z
|
2017-06-16T19:02:47.000Z
|
setup.py
|
ofek/depq
|
370e3ad503d3e9cedc3c49dc64add393ba945764
|
[
"MIT"
] | 5
|
2016-12-10T20:13:42.000Z
|
2020-09-28T09:02:10.000Z
|
from setuptools import setup, find_packages
with open('README.rst', 'r') as infile:
read_me = infile.read()
setup(
packages=find_packages(),
name='depq',
version='1.5.5',
description='Double-ended priority queue',
long_description=read_me,
author='Ofek Lev',
author_email='ofekmeister@gmail.com',
maintainer='Ofek Lev',
maintainer_email='ofekmeister@gmail.com',
url='https://github.com/Ofekmeister/depq',
download_url='https://github.com/Ofekmeister/depq',
license='MIT',
platforms=None,
keywords=[
'double ended priority queue',
'depq',
'priority queue',
'data structure',
'scheduling',
'heuristic analysis',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| 28.116279
| 71
| 0.612076
| 124
| 1,209
| 5.903226
| 0.604839
| 0.053279
| 0.102459
| 0.065574
| 0.087432
| 0.087432
| 0
| 0
| 0
| 0
| 0
| 0.010977
| 0.246485
| 1,209
| 42
| 72
| 28.785714
| 0.792536
| 0
| 0
| 0.054054
| 0
| 0
| 0.503722
| 0.034739
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027027
| 0
| 0.027027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
050bc5ae6e8eba8aac368023fb49c3014cb5ef03
| 880
|
py
|
Python
|
tests/exact_tests/contour_tests/strategies.py
|
lycantropos/rene
|
c73c616f3e360b994e92c950a3616a8ccb1136b9
|
[
"MIT"
] | null | null | null |
tests/exact_tests/contour_tests/strategies.py
|
lycantropos/rene
|
c73c616f3e360b994e92c950a3616a8ccb1136b9
|
[
"MIT"
] | null | null | null |
tests/exact_tests/contour_tests/strategies.py
|
lycantropos/rene
|
c73c616f3e360b994e92c950a3616a8ccb1136b9
|
[
"MIT"
] | null | null | null |
from hypothesis import strategies
from rithm import Fraction
from rene import MIN_CONTOUR_VERTICES_COUNT
from rene.exact import (Contour,
Point)
integers = strategies.integers()
non_zero_integers = integers.filter(bool)
scalars = (integers | strategies.fractions()
| strategies.builds(Fraction, integers, non_zero_integers)
| strategies.floats(allow_infinity=False,
allow_nan=False))
points = strategies.builds(Point, scalars, scalars)
contours_vertices = strategies.lists(points,
unique=True,
min_size=MIN_CONTOUR_VERTICES_COUNT)
invalid_count_contours_vertices = strategies.lists(
points,
unique=True,
max_size=MIN_CONTOUR_VERTICES_COUNT - 1
)
contours = strategies.builds(Contour, contours_vertices)
| 36.666667
| 73
| 0.664773
| 90
| 880
| 6.255556
| 0.388889
| 0.053286
| 0.095915
| 0.122558
| 0.262877
| 0.166963
| 0.166963
| 0
| 0
| 0
| 0
| 0.00155
| 0.267045
| 880
| 23
| 74
| 38.26087
| 0.871318
| 0
| 0
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.190476
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05108d99ff3259ead7d1205123464ffd5c4850a2
| 5,504
|
py
|
Python
|
app.py
|
makerdao-data/gov-tracker
|
52b7588e5c200b0af5b64a2891b276cbcc149ff1
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
makerdao-data/gov-tracker
|
52b7588e5c200b0af5b64a2891b276cbcc149ff1
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
makerdao-data/gov-tracker
|
52b7588e5c200b0af5b64a2891b276cbcc149ff1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Public version #
from flask import Flask, request, jsonify
import atexit
from datetime import datetime
import csv
from io import StringIO
from werkzeug.wrappers import Response
from sqlalchemy import func
from deps import get_db
from utils.query import pull_filtered_data
from views.main_view import main_page_view, main_page_data
from views.address_views import address_page_view, address_data_view
from views.yay_views import yay_page_view, yay_data_view
from views.poll_views import poll_page_view, poll_data_view
from views.proxy_views import proxy_page_view, proxy_data_view
from views.protocol_parameters_views import parameters_page_view, parameters_data_view
from connectors.sf import sf, sf_disconnect
from models import ParameterEvent
from utils.query import pull_filtered_data
app = Flask(__name__)
app.config["JSON_SORT_KEYS"] = False
# HTML endpoints -------------------------------------------
@app.route("/")
def main_page():
return main_page_view(sf)
@app.route("/address/<address>")
def address_page(address):
return address_page_view(sf, address.lower())
@app.route("/proxy/<proxy>")
def proxy_page(proxy):
return proxy_page_view(sf, proxy.lower())
@app.route("/yay/<yay_id>")
def yay_page(yay_id):
return yay_page_view(sf, yay_id)
@app.route("/poll/<poll_id>")
def poll_page(poll_id):
return poll_page_view(sf, poll_id)
@app.route("/protocol_parameters")
def parameters_page():
return parameters_page_view(sf)
# DATA endpoints -------------------------------------------
@app.route("/data/main", methods=["GET"])
def get_main_page_data():
dataset = main_page_data(sf)
return jsonify(dataset)
@app.route("/data/address/<address>", methods=["GET"])
def get_address_page_data(address):
dataset = address_data_view(sf, address.lower())
return jsonify(dataset)
@app.route("/data/proxy/<proxy>", methods=["GET"])
def get_proxy_page_data(proxy):
dataset = proxy_data_view(sf, proxy.lower())
return jsonify(dataset)
@app.route("/data/yay/<yay>", methods=["GET"])
def get_yay_page_data(yay):
dataset = yay_data_view(sf, yay)
return jsonify(dataset)
@app.route("/data/poll/<poll>", methods=["GET"])
def get_poll_page_data(poll):
dataset = poll_data_view(sf, poll)
return jsonify(dataset)
# @app.route("/data/protocol_parameters", methods=["GET"])
# def get_parameters_page_data():
# dataset = parameters_data_view(sf)
# return jsonify(dataset)
@app.route("/data/protocol_parameters/<s>/<e>", methods=["GET"])
def get_parameters_page_data(s, e):
session = next(get_db())
query = pull_filtered_data(request, s, e, session, ParameterEvent)
total_filtered = query.count()
# sorting
order = []
i = 0
while True:
col_index = request.args.get(f'order[{i}][column]')
if col_index is None:
break
col_name = request.args.get(f'columns[{col_index}][data]')
if col_name not in ['block', 'timestamp', 'tx_hash', 'source', 'parameter', 'ilk', 'from_value', 'to_value']:
col_name = 'block'
descending = request.args.get(f'order[{i}][dir]') == 'desc'
col = getattr(ParameterEvent, col_name)
if descending:
col = col.desc()
order.append(col)
i += 1
if order:
query = query.order_by(*order)
# pagination
start = request.args.get('start', type=int)
length = request.args.get('length', type=int)
query = query.offset(start).limit(length)
records_total = session.query(ParameterEvent).count()
# response
return {
'data': [record.to_dict() for record in query],
'recordsFiltered': total_filtered,
'recordsTotal': records_total,
'draw': request.args.get('draw', type=int),
}
@app.route("/data/parameters_history_export/<s>/<e>", methods=["GET"])
def parameters_history_export(s, e):
session = next(get_db())
query = pull_filtered_data(request, s, e, session, ParameterEvent)
def generate():
data = StringIO()
w = csv.writer(data)
# write header
w.writerow(('block', 'timestamp', 'tx_hash', 'source', 'parameter', 'ilk', 'from_value', 'to_value'))
yield data.getvalue()
data.seek(0)
data.truncate(0)
# write each log item
for item in query:
w.writerow(tuple(item.to_list()))
yield data.getvalue()
data.seek(0)
data.truncate(0)
# stream the response as the data is generated
response = Response(generate(), mimetype='text/csv')
# add a filename
response.headers.set("Content-Disposition", "attachment", filename="export.csv")
return response
# cleanup tasks
def cleanup_task():
if not sf.is_closed():
sf_disconnect(sf)
print("SF connection closed.")
atexit.register(cleanup_task)
if __name__ == "__main__":
app.run(debug=False)
| 28.518135
| 117
| 0.677871
| 748
| 5,504
| 4.794118
| 0.270053
| 0.031233
| 0.026771
| 0.031233
| 0.226157
| 0.208589
| 0.187953
| 0.118238
| 0.090351
| 0.0686
| 0
| 0.003112
| 0.182776
| 5,504
| 192
| 118
| 28.666667
| 0.794131
| 0.178234
| 0
| 0.146552
| 0
| 0
| 0.129061
| 0.026925
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12931
| false
| 0
| 0.155172
| 0.051724
| 0.396552
| 0.008621
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
051130482cb3691a34b0be84581c86dd2a4ce54f
| 3,280
|
py
|
Python
|
open_spiel/python/mst/run_mst.py
|
BrandonKates/open_spiel
|
f820abe9bdfdbc4bd45c2e933439393d4ad3622a
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/mst/run_mst.py
|
BrandonKates/open_spiel
|
f820abe9bdfdbc4bd45c2e933439393d4ad3622a
|
[
"Apache-2.0"
] | null | null | null |
open_spiel/python/mst/run_mst.py
|
BrandonKates/open_spiel
|
f820abe9bdfdbc4bd45c2e933439393d4ad3622a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python spiel example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
from absl import app
from absl import flags
import numpy as np
from scipy.spatial import distance_matrix
import pyspiel
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "mst", "Name of the game")
flags.DEFINE_integer("num_nodes", None, "Number of nodes")
flags.DEFINE_string("load_state", None,
"A file containing a string to load a specific state")
def main(_):
action_string = None
print("Creating game: " + FLAGS.game)
if FLAGS.num_nodes is not None:
distances = np.random.random((FLAGS.num_nodes,2))
dist_mat = np.round(distance_matrix(distances, distances),2).flatten()
generated_weights = str(dist_mat[0])
for i in range(1,dist_mat.size):
generated_weights+="," + str(dist_mat[i])
game = pyspiel.load_game(FLAGS.game,
{"num_nodes": pyspiel.GameParameter(FLAGS.num_nodes),
"weights": pyspiel.GameParameter(generated_weights)})
else:
game = pyspiel.load_game(FLAGS.game, {"num_nodes": pyspiel.GameParameter(5),
"weights": pyspiel.GameParameter("0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0")})
# Get a new state
if FLAGS.load_state is not None:
# Load a specific state
state_string = ""
with open(FLAGS.load_state, encoding="utf-8") as input_file:
for line in input_file:
state_string += line
state_string = state_string.rstrip()
print("Loading state:")
print(state_string)
print("")
state = game.deserialize_state(state_string)
else:
state = game.new_initial_state()
# Print the initial state
print(str(state))
while not state.is_terminal():
# The state can be three different types: chance node,
# simultaneous node, or decision node
legal_actions = state.legal_actions(state.current_player())
print("Legal Actions: ", [(i//FLAGS.num_nodes, i%FLAGS.num_nodes) for i in legal_actions])
# Decision node: sample action for the single current player
action = random.choice(legal_actions)
action_string = state.action_to_string(state.current_player(), action)
print("Player ", state.current_player(), ", randomly sampled action: ",
action_string)
state.apply_action(action)
print(str(state))
# Game is now done. Print utilities for each player
returns = state.returns()
for pid in range(game.num_players()):
print("Utility for player {} is {}".format(pid, returns[pid]))
if __name__ == "__main__":
app.run(main)
| 34.166667
| 129
| 0.695122
| 468
| 3,280
| 4.713675
| 0.350427
| 0.021759
| 0.031278
| 0.039891
| 0.085675
| 0.062103
| 0.062103
| 0.062103
| 0.062103
| 0.062103
| 0
| 0.014829
| 0.198171
| 3,280
| 95
| 130
| 34.526316
| 0.823954
| 0.264329
| 0
| 0.071429
| 0
| 0.017857
| 0.128978
| 0.020519
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017857
| false
| 0
| 0.160714
| 0
| 0.178571
| 0.178571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0511cceb2ee442a4c70aeab49d84be0233b7fcac
| 10,952
|
py
|
Python
|
classify.py
|
clulab/incivility
|
82d8e8164b81e9f4d5737520f2cbf308d3fcd033
|
[
"Apache-2.0"
] | 1
|
2020-09-18T12:05:13.000Z
|
2020-09-18T12:05:13.000Z
|
classify.py
|
clulab/incivility
|
82d8e8164b81e9f4d5737520f2cbf308d3fcd033
|
[
"Apache-2.0"
] | null | null | null |
classify.py
|
clulab/incivility
|
82d8e8164b81e9f4d5737520f2cbf308d3fcd033
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import os
import subprocess
from typing import List, Sequence, Text
import textwrap
import numpy as np
import pandas as pd
import sklearn
import tensorflow as tf
import tensorflow_addons as tfa
import transformers
import data
import models
import ga
def train(model_path: Text,
train_data_paths: Sequence[Text],
dev_data_paths: Sequence[Text],
pretrained_model_name: Text,
label_col: Text,
n_rows: int,
learning_rate: float,
batch_size: int,
grad_accum_steps: int,
n_epochs: int,
qsub: bool,
time: Text,
singularity_image: Text,
use_gpu: bool):
if not qsub:
if time is not None:
raise ValueError("time limit not supported")
tokenizer_for = transformers.AutoTokenizer.from_pretrained
tokenizer = tokenizer_for(pretrained_model_name)
train_x, train_y = data.read_csvs_to_xy(
data_paths=train_data_paths,
n_rows=n_rows,
tokenizer=tokenizer,
label_col=label_col)
dev_x, dev_y = data.read_csvs_to_xy(
data_paths=dev_data_paths,
n_rows=n_rows,
tokenizer=tokenizer,
label_col=label_col)
# set class weight inversely proportional to class counts
counts = np.bincount(train_y)
class_weight = dict(enumerate(counts.max() / counts))
# determine optimizer
optimizer_kwargs = dict(
learning_rate=learning_rate, epsilon=1e-08, clipnorm=1.0)
if grad_accum_steps != 1:
optimizer_class = ga.AdamGA
optimizer_kwargs.update(grad_accum_steps=grad_accum_steps)
else:
optimizer_class = tf.optimizers.Adam
model_for = transformers.TFAutoModel.from_pretrained
model = models.from_transformer(
transformer=model_for(pretrained_model_name),
n_outputs=1)
model.compile(
optimizer=optimizer_class(**optimizer_kwargs),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=[
tf.keras.metrics.BinaryAccuracy(),
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall(),
tfa.metrics.F1Score(num_classes=1, threshold=0.5),
])
model.fit(x=train_x, y=train_y,
validation_data=(dev_x, dev_y),
epochs=n_epochs,
batch_size=batch_size,
class_weight=class_weight,
callbacks=tf.keras.callbacks.ModelCheckpoint(
filepath=model_path,
monitor="val_f1_score",
mode="max",
verbose=1,
save_weights_only=True,
save_best_only=True))
else:
if time is None:
raise ValueError("time limit required for qsub")
model_prefix, _ = os.path.splitext(model_path)
n_rows_str = "all" if n_rows is None else n_rows
prefix = f"{model_prefix}.{label_col}.{pretrained_model_name}.r{n_rows_str}.b{batch_size}.ga{grad_accum_steps}.lr{learning_rate}"
pbs_path = f"{prefix}.pbs"
def format_paths(paths):
return ' '.join(f'"{p}"' for p in paths)
with open(pbs_path, "w") as pbs_file:
pbs_file.write(textwrap.dedent(f"""
#!/bin/bash
#PBS -q windfall
#PBS -l select=1{":ncpus=16:ngpus=1" if use_gpu else ":ncpus=4"}:mem=64gb
#PBS -N {prefix}
#PBS -W group_list=nlp
#PBS -l walltime={time}
module load singularity
module load cuda10/10.1
{"export CUDA_VISIBLE_DEVICES=-1" if not use_gpu else ""}
cd {os.path.dirname(os.path.realpath(__file__))}
singularity exec --nv \\
{singularity_image} \\
python3.7 classify.py \\
--pretrained-model-name {pretrained_model_name} \\
--label-col {label_col} \\
train \\
{'' if n_rows is None else f'--n-rows={n_rows}'} \\
--n-epochs={n_epochs} \\
--batch-size={batch_size} \\
--grad-accum-steps={grad_accum_steps} \\
--learning-rate={learning_rate} \\
{prefix}.model \\
--train-data {format_paths(train_data_paths)} \\
--dev-data {format_paths(dev_data_paths)}
"""))
subprocess.run(["qsub", pbs_path])
def test(model_paths: Sequence[Text],
test_data_paths: Sequence[Text],
pretrained_model_name: Text,
label_col: Text,
n_rows: int,
batch_size: int,
verbose: bool):
width = max(len(p) for p in model_paths + test_data_paths)
headers = ["precision", "recall", "f1-score", "support"]
header_fmt = f'{{:<{width}s}} ' + ' {:>9}' * 4
row_fmt = f'{{:<{width}s}} ' + ' {:>9.3f}' * 3 + ' {:>9}'
# load the tokenizer model
tokenizer_for = transformers.AutoTokenizer.from_pretrained
tokenizer = tokenizer_for(pretrained_model_name)
# load the pre-trained transformer model
model_for = transformers.TFAutoModel.from_pretrained
transformer = model_for(pretrained_model_name)
test_data_rows = {p: [] for p in test_data_paths}
for model_path in model_paths:
tf.keras.backend.clear_session()
# load the fine-tuned transformer model
model = models.from_transformer(transformer=transformer, n_outputs=1)
model.load_weights(model_path).expect_partial()
for data_path in test_data_paths:
# tokenize the test data
df = data.read_csv(data_path=data_path,
label_col=label_col,
n_rows=n_rows)
x, y_ref = data.df_to_xy(df=df,
tokenizer=tokenizer,
label_col=label_col)
# predict on the test data
y_pred_scores = model.predict(x, batch_size=batch_size)
y_pred = (y_pred_scores >= 0.5).astype(int).ravel()
# evaluate predictions
stats_arrays = sklearn.metrics.precision_recall_fscore_support(
y_ref, y_pred, labels=[1])
stats = [a.item() for a in stats_arrays]
row = [model_path] + stats
test_data_rows[data_path].append(row_fmt.format(*row))
# if requested, print detailed results for this model
if verbose:
header = header_fmt.format(data_path, *headers)
print("=" * len(header))
print(header)
print(row_fmt.format(*row))
print("=" * len(header))
df.insert(1, "prediction", y_pred_scores)
print(df)
print()
# print results for all models on all datasets
for data_path, rows in test_data_rows.items():
print(header_fmt.format(data_path, *headers))
for row in rows:
print(row)
print()
def predict_csv(model_path: Text,
input_path: Text,
output_path: Text,
text_col: Text,
label_col: Text,
pretrained_model_name: Text,
output_scores: bool,
n_rows: int,
batch_size: int):
# load the tokenizer model
tokenizer_for = transformers.AutoTokenizer.from_pretrained
tokenizer = tokenizer_for(pretrained_model_name)
# read input data
with open(input_path, encoding="utf-8", errors="ignore") as input_file:
df = pd.read_csv(input_file, nrows=n_rows)
x = data.from_tokenizer(tokenizer, df[text_col])
# load the pre-trained transformer model
model_for = transformers.TFAutoModel.from_pretrained
transformer = model_for(pretrained_model_name)
# load the fine-tuned transformer model
model = models.from_transformer(transformer=transformer, n_outputs=1)
model.load_weights(model_path).expect_partial()
# predict on the test data
y_pred = model.predict(x, batch_size=batch_size)
df[label_col] = (y_pred >= 0.5).astype(int).ravel()
if output_scores:
df[f"{label_col}_score"] = y_pred
df.to_csv(output_path, encoding='utf-8-sig')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--pretrained-model-name", default="roberta-base")
parser.add_argument("--label-col", default="namecalling")
subparsers = parser.add_subparsers()
train_parser = subparsers.add_parser("train")
train_parser.add_argument("model_path")
train_parser.add_argument("--train-data", dest="train_data_paths", nargs='+',
metavar="PATH", required=True)
train_parser.add_argument("--dev-data", dest="dev_data_paths", nargs='+',
metavar="PATH", required=True)
train_parser.add_argument("--qsub", action="store_true")
train_parser.add_argument("--time")
train_parser.add_argument("--no-gpu", dest="use_gpu", action="store_false")
train_parser.add_argument(
"--singularity-image",
default="/xdisk/bethard/hpc-ml_centos7-python3.7-transformers3.2.0.sif")
train_parser.add_argument("--n-rows", type=int)
train_parser.add_argument("--learning-rate", type=float, default=3e-5)
train_parser.add_argument("--batch-size", type=int, default=1)
train_parser.add_argument("--grad-accum-steps", type=int, default=1)
train_parser.add_argument("--n-epochs", type=int, default=10)
train_parser.set_defaults(func=train)
test_parser = subparsers.add_parser("test")
test_parser.add_argument("model_paths", nargs="+", metavar="model_path")
test_parser.add_argument("--test-data", dest="test_data_paths", nargs='+',
metavar="PATH", required=True)
test_parser.add_argument("--n-rows", type=int)
test_parser.add_argument("--batch-size", type=int, default=1)
test_parser.add_argument("--verbose", action="store_true")
test_parser.set_defaults(func=test)
predict_parser = subparsers.add_parser("predict")
predict_parser.add_argument("model_path")
predict_parser.add_argument("input_path")
predict_parser.add_argument("output_path")
predict_parser.add_argument("--text-col", default="tweet_text")
predict_parser.add_argument("--output-scores", action="store_true")
predict_parser.add_argument("--n-rows", type=int)
predict_parser.add_argument("--batch-size", type=int, default=1)
predict_parser.set_defaults(func=predict_csv)
args = parser.parse_args()
kwargs = vars(args)
kwargs.pop("func")(**kwargs)
| 38.975089
| 137
| 0.603543
| 1,321
| 10,952
| 4.738077
| 0.207419
| 0.038824
| 0.070618
| 0.042179
| 0.37498
| 0.316984
| 0.269212
| 0.221601
| 0.203866
| 0.184215
| 0
| 0.007553
| 0.286797
| 10,952
| 280
| 138
| 39.114286
| 0.793752
| 0.044375
| 0
| 0.188596
| 0
| 0.008772
| 0.200957
| 0.05177
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017544
| false
| 0
| 0.061404
| 0.004386
| 0.083333
| 0.039474
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
051260c977d73217e66d8ef66398ae1931f7b899
| 814
|
py
|
Python
|
p2/src/prove.py
|
ruimgf/AIDS
|
72bc808ef5e21113f635f34581d18c0dc2c8c7da
|
[
"MIT"
] | null | null | null |
p2/src/prove.py
|
ruimgf/AIDS
|
72bc808ef5e21113f635f34581d18c0dc2c8c7da
|
[
"MIT"
] | null | null | null |
p2/src/prove.py
|
ruimgf/AIDS
|
72bc808ef5e21113f635f34581d18c0dc2c8c7da
|
[
"MIT"
] | null | null | null |
import sys
from kb import *
#receives a list of setences if it is in test mode
def main(lista=None):
sentences = []
if lista is None:
with sys.stdin as f : #open stdin as a file
lines = f.readlines()
for line in lines: # convert each line to a python object
line = line.rstrip()
a = eval(line)
if isinstance(a,list):
sentences.append(set(a))
else:
b = set([a])
sentences.append(b)
if DEBUG:
print(sentences)
else:
for x in lista:
if x is not None:
sentences.append(set(x))
knowledge = Kb(sentences)
return knowledge.pl_resolution()
if __name__ == '__main__':
print(main())
| 25.4375
| 69
| 0.5086
| 100
| 814
| 4.05
| 0.51
| 0.111111
| 0.088889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.405405
| 814
| 31
| 70
| 26.258065
| 0.836777
| 0.130221
| 0
| 0.08
| 0
| 0
| 0.011348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.08
| 0
| 0.16
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0514df3dee36ec46f44f8239441b8f0b35d0374b
| 758
|
py
|
Python
|
stub_extractor/util.py
|
srittau/stub-extractor
|
f161c10a2f041a74040a04e00e0b0d33cb94a0fe
|
[
"MIT"
] | null | null | null |
stub_extractor/util.py
|
srittau/stub-extractor
|
f161c10a2f041a74040a04e00e0b0d33cb94a0fe
|
[
"MIT"
] | null | null | null |
stub_extractor/util.py
|
srittau/stub-extractor
|
f161c10a2f041a74040a04e00e0b0d33cb94a0fe
|
[
"MIT"
] | null | null | null |
from typing import Iterator, List, Optional, Sequence, Tuple, TypeVar
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
def rzip_longest(
seq1: Sequence[_T1], seq2: Sequence[_T2]
) -> Iterator[Tuple[_T1, Optional[_T2]]]:
"""Make an iterator over tuples, with elements from the input sequences.
If the second sequence is shorter than the first by N elements,
the second element of the first N tuples is set to None.
>>> list(rzip_longest([1,2,3], ["a", "b"]))
[(1, None), (2, "a"), (3, "b")]
"""
len_diff = len(seq1) - len(seq2)
if len_diff < 0:
raise ValueError("seq2 can't be longer than seq1")
padded_seq2: List[Optional[_T2]] = [None] * len_diff
padded_seq2.extend(seq2)
return zip(seq1, padded_seq2)
| 30.32
| 76
| 0.647757
| 114
| 758
| 4.157895
| 0.491228
| 0.044304
| 0.059072
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045075
| 0.209763
| 758
| 24
| 77
| 31.583333
| 0.746244
| 0.353562
| 0
| 0
| 0
| 0
| 0.077754
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05167a6a94f7c83fc6497eed1db4333dd9bd4308
| 12,980
|
py
|
Python
|
weibospider.py
|
Chiang97912/WeiboSpider
|
2c426d2dfa8c6d418b66bd54002c292194872c88
|
[
"MIT"
] | null | null | null |
weibospider.py
|
Chiang97912/WeiboSpider
|
2c426d2dfa8c6d418b66bd54002c292194872c88
|
[
"MIT"
] | null | null | null |
weibospider.py
|
Chiang97912/WeiboSpider
|
2c426d2dfa8c6d418b66bd54002c292194872c88
|
[
"MIT"
] | 1
|
2021-05-07T06:35:22.000Z
|
2021-05-07T06:35:22.000Z
|
# -*- coding: UTF-8 -*-
import os
import json
import time
import rsa
import base64
import urllib
import binascii
import traceback
import requests
import pandas as pd
from lxml import etree
from datetime import datetime
class NoResultException(Exception):
def __init__(self):
super().__init__()
def __str__(self):
return 'No result'
class Config(object):
def __init__(self, **entries):
self.__dict__.update(entries)
class WeiboSpider(object):
def __init__(self, config):
self.year = config.year
self.month = config.month
self.day = config.day
self.query = config.query
self.config = config
self.weibo = list()
self.cookie = self.get_cookie()
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:54.0) Gecko/20100101 Firefox/54.0'
}
def get_cookie(self):
data = {
'entry': 'weibo',
'gateway': '1',
'from': '',
'savestate': '7',
'qrcode_flag': 'false',
'useticket': '1',
'pagerefer': 'https://login.sina.com.cn/crossdomain2.php?action=logout&r=https%3A%2F%2Fweibo.com%2Flogout.php%3Fbackurl%3D%252F',
'wsseretry': 'servertime_error',
'vsnf': '1',
'su': '',
'service': 'miniblog',
'servertime': '1529058370',
'nonce': 'CPEDL5',
'pwencode': 'rsa2',
'rsakv': '1330428213',
'sp': '',
'sr': '1536*864',
'encoding': 'UTF-8',
'prelt': '75',
'url': 'https://weibo.com/ajaxlogin.php?framelogin=1&callback=parent.sinaSSOController.feedBackUrlCallBack',
'returntype': 'META'
}
username = self.config.username
password = self.config.password
pre_url = "http://login.sina.com.cn/sso/prelogin.php?entry=weibo&callback=sinaSSOController.preloginCallBack&su=emhlZGFwYXQlNDAxNjMuY29t&rsakt=mod&client=ssologi"
s = requests.session()
res = s.get(pre_url)
res = res.text.split('(')[-1].split(')')[0]
pre_json = json.loads(res)
servertime = pre_json['servertime']
nonce = pre_json['nonce']
rsakv = pre_json['rsakv']
pubkey = pre_json['pubkey']
su = base64.encodestring(urllib.parse.quote(
username).encode(encoding="utf-8"))[:-1]
# rsa2计算sp
rsaPubkey = int(pubkey, 16)
key = rsa.PublicKey(rsaPubkey, 65537)
message = str(servertime) + '\t' + str(nonce) + '\n' + str(password)
sp = rsa.encrypt(message.encode(encoding="utf-8"), key)
sp = binascii.b2a_hex(sp)
data['servertime'] = servertime
data['nonce'] = nonce
data['rsakv'] = rsakv
data['su'] = su
data['sp'] = sp
url = 'http://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.18)&wsseretry=servertime_error'
res = requests.post(url, data=data)
cookie = res.cookies.get_dict()
return cookie
def set_encoding(self, res):
'''
解决weibo网页不同编码问题
'''
code = ['UTF-8', 'GBK']
for item in code:
if item in res.text:
res.encoding = item
break
def extract_digit(self, s):
if s:
return ''.join([x for x in s if x.isdigit()])
else:
return ''
def get_detail_info(self, url, weibo):
res = requests.get(url, headers=self.headers, cookies=self.cookie)
res.encoding = 'utf-8'
html = res.text
lines = html.splitlines() # splitlines将字符串按照\n切割
weibo['gender'] = ''
weibo['location'] = ''
weibo['age'] = ''
for line in lines:
line = line.replace(r'\t', '')
line = line.replace(r'\n', '')
line = line.replace(r'\r', '')
if line.startswith('<script>FM.view({"ns":"pl.header.head.index","domid":"Pl_Official_Headerv6__1"'):
n = line.find('html":"')
if n > 0:
line = line[n + 7: -12].replace("\\", "") # 去掉所有的斜杠
if not line.find('<div class="search_noresult">') > 0:
parser = etree.HTML(line)
temp = parser.xpath(
'//*[@class="pf_username"]/span/a/i/@class')[0].split(' ')[1]
if temp == 'icon_pf_male':
weibo['gender'] = '男'
elif temp == 'icon_pf_female':
weibo['gender'] = '女'
if line.startswith('<script>FM.view({"ns":"pl.content.homeFeed.index","domid":"Pl_Core_UserInfo'):
n = line.find('html":"')
if n > 0:
line = line[n + 7: -12].replace("\\", "") # 去掉所有的斜杠
if not line.find('<div class="search_noresult">') > 0:
parser = etree.HTML(line)
# lv = parser.cssselect(
# '.W_icon_level > span')
# lv = lv[0].text[3:] if len(lv) > 0 else ''
# weibo['lv'] = lv # 等级
t = 1
flag1 = False
flag2 = False
while True:
try:
icon = parser.xpath(
'//*[@class="WB_innerwrap"]/div/div/ul/li[{}]/span[1]/em/@class'.format(t))[0].split(' ')[1]
if icon == 'ficon_cd_place':
flag1 = True
weibo['location'] = parser.xpath(
'//*[@class="WB_innerwrap"]/div/div/ul/li[{}]/span[2]'.format(t))[0].xpath('string(.)').strip()
elif icon == 'ficon_constellation':
flag2 = True
age_text = parser.xpath(
'//*[@class="WB_innerwrap"]/div/div/ul/li[{}]/span[2]'.format(t))[0].xpath('string(.)').strip()
y = age_text.split('年')[0]
if y.isdigit():
weibo['age'] = datetime.now().year - int(y)
else:
weibo['age'] = ''
t += 1
except Exception as e:
break
if flag1 and flag2:
break
def get_one_page(self, html):
selecter = etree.HTML(html)
k = 1
while True:
weibo = dict()
try:
div = selecter.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[{}]'.format(k))
if len(div) == 0:
break
name = selecter.xpath('//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[1]/div[2]/div[1]/div[2]/a'.format(k))
weibo['name'] = name[0].text.strip() if len(name) > 0 else ''
content = selecter.xpath(
'//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[1]/div[2]/p[1]'.format(k))
weibo['content'] = content[0].xpath('string(.)').strip() if len(content) > 0 else ''
release_time = selecter.xpath(
'//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[1]/div[2]/p[@class="from"]/a[1]'.format(k))
weibo['release_time'] = release_time[0].xpath('string(.)').strip() if len(release_time) > 0 else ''
transpond = selecter.xpath(
'//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[2]/ul/li[2]/a'.format(k))
transpond = transpond[0].text if len(transpond) > 0 else ''
transpond = self.extract_digit(transpond)
if transpond:
weibo['transpond_num'] = transpond
else:
weibo['transpond_num'] = 0
comment = selecter.xpath(
'//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[2]/ul/li[3]/a'.format(k))
comment = comment[0].text if len(comment) > 0 else ''
comment = self.extract_digit(comment)
if comment:
weibo['comment_num'] = comment
else:
weibo['comment_num'] = 0
thumbsup = selecter.xpath(
'//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[2]/ul/li[4]/a/em'.format(k))
thumbsup = thumbsup[0].text if len(thumbsup) > 0 else ''
thumbsup = self.extract_digit(thumbsup)
if thumbsup:
weibo['thumbsup_num'] = thumbsup
else:
weibo['thumbsup_num'] = 0
homepage_url = selecter.xpath(
'//*[@id="pl_feedlist_index"]/div[2]/div[{}]/div/div[1]/div[2]/div[1]/div[2]/a[1]/@href'.format(k))
homepage_url = homepage_url[0] if len(homepage_url) > 0 else ''
if homepage_url:
h = homepage_url[2:].split('/')
if h[1] == 'u':
weibo['uid'] = h[2].split('?')[0]
else:
weibo['uid'] = h[1].split('?')[0]
homepage_url = 'https:' + homepage_url
self.get_detail_info(homepage_url, weibo)
except Exception as e:
print(traceback.print_exc())
break
k += 1
self.weibo.append(weibo)
def save(self):
columns_map = {
'name': '微博名称',
'location': '微博所在地',
'gender': '性别',
'content': '微博内容',
'transpond_num': '转发量',
'comment_num': '评论量',
'thumbsup_num': '点赞量',
'uid': '用户ID',
'age': '年龄',
'release_time': '发布时间'
}
df = pd.DataFrame(self.weibo)
df.rename(columns=columns_map, inplace=True)
columns = ['微博名称', '用户ID', '性别', '年龄', '微博所在地', '微博内容', '发布时间', '转发量', '评论量', '点赞量']
df.to_excel('./data/{}年{}月{}日.xlsx'.format(self.year, self.month, self.day), columns=columns)
def start(self):
page_index = 1
while True:
url = 'https://s.weibo.com/weibo?q={}&typeall=1&suball=1×cope=custom:{}-{}-{}-0:{}-{}-{}-23&Refer=g&page={}'.format(
self.query, self.year, str(self.month).zfill(2), str(self.day).zfill(2), self.year, str(self.month).zfill(2), str(self.day).zfill(2), page_index)
if page_index == 51:
break
try:
res = requests.get(url, headers=self.headers, cookies=self.cookie)
except Exception as e:
print(e)
page_index += 1
continue
self.set_encoding(res)
html = res.text
if '新浪通行证' in html:
self.cookie = self.get_cookie()
res = requests.get(url, headers=self.headers, cookies=self.cookie)
self.set_encoding(res)
html = res.text
print('cookie updated!')
print('正在抓取{}年{}月{}日 第{}页数据'.format(self.year, self.month, self.day, page_index))
try:
self.get_one_page(html)
except NoResultException as e:
print(e)
break
time.sleep(0.5)
page_index += 1
self.save()
def main():
blacklist_file = 'blacklist.txt' # 黑名单文件
config = {
'query': '共享单车', # 查询关键词
'start_month': 1, # 开始月份
'start_day': 1, # 开始天数
'username': 'xxxxxxxxxxxx', # 账号
'password': 'xxxxxxxxxxxx', # 密码
}
years = ['2018', '2019']
config = Config(**config)
if not os.path.exists(blacklist_file):
open(blacklist_file, 'w').close() # 如果黑名单不存在就创建
if not os.path.exists('./data'):
os.makedirs('./data')
for year in years:
for month in range(config.start_month, 13):
for day in range(config.start_day, 32):
with open(blacklist_file) as f:
blacklist = [line.strip() for line in f.readlines()]
if '{}-{}-{}'.format(year, month, day) in blacklist:
continue
config.year = year
config.month = month
config.day = day
ws = WeiboSpider(config)
ws.start()
with open(blacklist_file, 'a') as f:
f.write('{}-{}-{}\n'.format(year, month, day))
print("数据抓取并保存完成")
if __name__ == '__main__':
main()
| 39.938462
| 170
| 0.469106
| 1,397
| 12,980
| 4.256263
| 0.248389
| 0.017154
| 0.011773
| 0.022873
| 0.235789
| 0.214598
| 0.207198
| 0.17928
| 0.168517
| 0.162294
| 0
| 0.026612
| 0.377581
| 12,980
| 324
| 171
| 40.061728
| 0.70937
| 0.01849
| 0
| 0.185965
| 0
| 0.049123
| 0.200945
| 0.077323
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042105
| false
| 0.010526
| 0.042105
| 0.003509
| 0.108772
| 0.021053
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0516e5d4fd543c80d6f16ba01f4a7586b969a893
| 3,783
|
py
|
Python
|
spoty/commands/get_second_group.py
|
dy-sh/spoty
|
431a392707c8754da713871e0e7747bcc4417274
|
[
"MIT"
] | 2
|
2022-02-01T16:49:32.000Z
|
2022-03-02T18:30:31.000Z
|
spoty/commands/get_second_group.py
|
dy-sh/spoty
|
431a392707c8754da713871e0e7747bcc4417274
|
[
"MIT"
] | null | null | null |
spoty/commands/get_second_group.py
|
dy-sh/spoty
|
431a392707c8754da713871e0e7747bcc4417274
|
[
"MIT"
] | null | null | null |
from spoty.commands.first_list_commands import \
count_command, \
export_command, \
print_command
from spoty.commands.second_list_commands import \
filter_second_group, \
find_duplicates_second_command,\
find_deezer_second_group, \
find_spotify_second_group
from spoty.commands import get_group
from spoty.utils import SpotyContext
import click
@click.group("get")
@click.option('--spotify-playlist', '--sp', multiple=True,
help='Get tracks from Spotify playlist URI or ID.')
@click.option('--spotify-entire-library', '--s', multiple=True,
help='Get all tracks from Spotify library (by user URI or ID). To request a list for the current authorized user, use "me" as ID.')
@click.option('--spotify-entire-library-regex', '--sr', nargs=2, multiple=True,
help='Works the same as --spotify-entire-library, but you can specify regex filter which will be applied to playlists names. This way you can query any playlists by names.')
@click.option('--deezer-playlist', '--dp', multiple=True,
help='Get tracks from Deezer playlist URI or ID.')
@click.option('--deezer-entire-library', '--d', multiple=True,
help='Get all tracks from Deezer library (by user URI or ID). To request a list for the current authorized user, use "me" as ID.')
@click.option('--deezer-entire-library-regex', '--dr', nargs=2, multiple=True,
help='Works the same as --deezer-entire-library, but you can specify regex filter which will be applied to playlists names. This way you can query any playlists by names.')
@click.option('--audio', '--a', multiple=True,
help='Get audio files located at the specified local path. You can specify the audio file name as well.')
@click.option('--csv', '--c', multiple=True,
help='Get tracks from csv playlists located at the specified local path. You can specify the scv file name as well.')
@click.option('--m3u8', '--m', multiple=True,
help='Get tracks from m3u8 playlists located at the specified local path. You can specify the m3u8 file name as well.')
@click.option('--no-recursive', '-r', is_flag=True,
help='Do not search in subdirectories from the specified path.')
@click.pass_obj
def get_second(context: SpotyContext,
spotify_playlist,
spotify_entire_library,
spotify_entire_library_regex,
deezer_playlist,
deezer_entire_library,
deezer_entire_library_regex,
audio,
csv,
m3u8,
no_recursive
):
"""
Collect second list of tracks for further actions (see next commands).
"""
context.summary.append("Collecting second list:")
get_group.get_tracks_wrapper(context,
spotify_playlist,
spotify_entire_library,
spotify_entire_library_regex,
deezer_playlist,
deezer_entire_library,
deezer_entire_library_regex,
audio,
csv,
m3u8,
no_recursive,
)
get_second.add_command(filter_second_group.filter_second)
get_second.add_command(count_command.count_tracks)
get_second.add_command(print_command.print_tracks)
get_second.add_command(export_command.export_tracks)
get_second.add_command(find_duplicates_second_command.find_duplicates_second)
get_second.add_command(find_deezer_second_group.find_deezer)
get_second.add_command(find_spotify_second_group.find_spotify)
| 50.44
| 187
| 0.641819
| 469
| 3,783
| 4.991471
| 0.234542
| 0.077745
| 0.061512
| 0.056813
| 0.664246
| 0.58223
| 0.41777
| 0.390431
| 0.390431
| 0.359675
| 0
| 0.004334
| 0.268041
| 3,783
| 75
| 188
| 50.44
| 0.841098
| 0.018504
| 0
| 0.272727
| 0
| 0.090909
| 0.341437
| 0.041869
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015152
| false
| 0.015152
| 0.075758
| 0
| 0.090909
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05195432ec2c13cb2bd586385c70cb0f3fcc21ab
| 19,804
|
py
|
Python
|
jenkins_job_wrecker/modules/triggers.py
|
romanek-adam/jenkins-job-wrecker
|
db9379d852afe8b621c7688d34fd057d916de8f2
|
[
"MIT"
] | 1
|
2020-06-05T06:36:50.000Z
|
2020-06-05T06:36:50.000Z
|
jenkins_job_wrecker/modules/triggers.py
|
romanek-adam/jenkins-job-wrecker
|
db9379d852afe8b621c7688d34fd057d916de8f2
|
[
"MIT"
] | 15
|
2020-05-18T07:37:06.000Z
|
2020-08-24T09:16:08.000Z
|
jenkins_job_wrecker/modules/triggers.py
|
romanek-adam/jenkins-job-wrecker
|
db9379d852afe8b621c7688d34fd057d916de8f2
|
[
"MIT"
] | null | null | null |
# encoding=utf8
import jenkins_job_wrecker.modules.base
from jenkins_job_wrecker.helpers import get_bool, Mapper
class Triggers(jenkins_job_wrecker.modules.base.Base):
component = 'triggers'
def gen_yml(self, yml_parent, data):
triggers = []
for child in data:
object_name = child.tag.split('.')[-1].lower()
self.registry.dispatch(self.component, object_name, child, triggers)
yml_parent.append(['triggers', triggers])
def scmtrigger(top, parent):
pollscm = {}
for child in top:
if child.tag == 'spec':
pollscm['cron'] = child.text
elif child.tag == 'ignorePostCommitHooks':
pollscm['ignore-post-commit-hooks'] = (child.text == 'true')
else:
raise NotImplementedError('cannot handle scm trigger '
'setting %s' % child.tag)
parent.append({'pollscm': pollscm})
def timertrigger(top, parent):
parent.append({'timed': top[0].text})
def reversebuildtrigger(top, parent):
reverse = {}
for child in top:
if child.tag == 'upstreamProjects':
reverse['jobs'] = child.text
elif child.tag == 'threshold':
pass # TODO
elif child.tag == 'spec':
pass # TODO
else:
raise NotImplementedError('cannot handle reverse trigger '
'setting %s' % child.tag)
parent.append({'reverse': reverse})
def __gerrit_process_file_paths(attribute):
file_paths = []
for file_path_type in attribute:
if file_path_type.tag == "com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.data.FilePath":
file_path = {}
for file_path_attribute in file_path_type:
if file_path_attribute.tag == "compareType":
file_path["compare-type"] = file_path_attribute.text
elif file_path_attribute.tag == "pattern":
file_path["pattern"] = file_path_attribute.text
file_paths.append(file_path)
else:
raise NotImplementedError("Not implemented file path type: ", file_path_type.tag)
return file_paths
def __gerrit_process_gerrit_projects(child):
projects = []
for gerrit_project in child:
project = {}
for attribute in gerrit_project:
if attribute.tag == "compareType":
project["project-compare-type"] = attribute.text
elif attribute.tag == "pattern":
project["project-pattern"] = attribute.text
elif attribute.tag == "branches":
branches = []
for branch_type in attribute:
if branch_type.tag == \
"com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.data.Branch":
branch = {}
for branch_attribute in attribute[0]:
if branch_attribute.tag == "compareType":
branch["branch-compare-type"] = branch_attribute.text
elif branch_attribute.tag == "pattern":
branch["branch-pattern"] = branch_attribute.text
else:
raise NotImplementedError("Not implemented branch attribute: ",
branch_attribute.tag)
branches.append(branch)
else:
raise NotImplementedError("Not implemented branch type: ", branch_type.tag)
project["branches"] = branches
elif attribute.tag == "disableStrictForbiddenFileVerification":
project["disable-strict-forbidden-file-verification"] = get_bool(attribute.text)
elif attribute.tag == "filePaths":
file_paths = __gerrit_process_file_paths(attribute)
project["file-paths"] = file_paths
elif attribute.tag == "forbiddenFilePaths":
forbidden_file_paths = __gerrit_process_file_paths(attribute)
project["forbidden-file-paths"] = forbidden_file_paths
elif attribute.tag == "topics":
topics = []
for topic in attribute:
if topic.tag == \
"com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.data.Topic":
topic_keys = {}
for topic_attribute in topic:
if topic_attribute.tag == "compareType":
topic_keys["compare-type"] = topic_attribute.text
elif topic_attribute.tag == "pattern":
topic_keys["pattern"] = topic_attribute.text
else:
raise NotImplementedError("Not implemented topic attribute: ", topic_attribute.tag)
topics.append(topic_keys)
else:
raise NotImplementedError("Not implemented topic type: ", topic.tag)
project["topics"] = topics
else:
raise NotImplementedError("Not implemented attribute: ", attribute.tag)
projects.append(project)
return projects
def __gerrit_process_trigger_on_events(child):
trigger_on = []
sonyericsson_prefix = "com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.events."
for event in child:
if event.tag == sonyericsson_prefix + "PluginChangeAbandonedEvent":
trigger_on.append("change-abandoned-event")
elif event.tag == sonyericsson_prefix + "PluginChangeMergedEvent":
trigger_on.append("change-merged-event")
elif event.tag == sonyericsson_prefix + "PluginChangeRestoredEvent":
trigger_on.append("change-restored-event")
elif event.tag == sonyericsson_prefix + "PluginCommentAddedEvent":
comment_added_event = {}
for element in event:
if element.tag == "verdictCategory":
comment_added_event["approval-category"] = element.text
elif element.tag == "commentAddedTriggerApprovalValue":
comment_added_event["approval-value"] = element.text
trigger_on.append({"comment-added-event": comment_added_event})
elif event.tag == sonyericsson_prefix + "PluginCommentAddedContainsEvent":
trigger_on.append({"comment-added-contains-event": {"comment-contains-value": event[0].text}})
elif event.tag == sonyericsson_prefix + "PluginDraftPublishedEvent":
trigger_on.append("draft-published-event")
elif event.tag == sonyericsson_prefix + "PluginPatchsetCreatedEvent":
patchset_created_event = {}
for attribute in event:
if attribute.tag == "excludeDrafts":
patchset_created_event["exclude-drafts"] = get_bool(attribute.text)
elif attribute.tag == "excludeTrivialRebase":
patchset_created_event["exclude-trivial-rebase"] = get_bool(attribute.text)
elif attribute.tag == "excludeNoCodeChange":
patchset_created_event["exclude-no-code-change"] = get_bool(attribute.text)
elif attribute.tag == "excludePrivateState":
patchset_created_event["exclude-private"] = get_bool(attribute.text)
elif attribute.tag == "excludeWipState":
patchset_created_event["exclude-wip"] = get_bool(attribute.text)
trigger_on.append({"patchset-created-event": patchset_created_event})
elif event.tag == sonyericsson_prefix + "PluginPrivateStateChangedEvent":
trigger_on.append("private-state-changed-event")
elif event.tag == sonyericsson_prefix + "PluginRefUpdatedEvent":
trigger_on.append("ref-updated-event")
elif event.tag == sonyericsson_prefix + "PluginTopicChangedEvent":
trigger_on.append("topic-changed-event")
elif event.tag == sonyericsson_prefix + "PluginWipStateChangedEvent":
trigger_on.append("wip-state-changed-event")
return trigger_on
def gerrittrigger(top, parent):
mapper = Mapper({
"silentMode": ("silent", bool),
"silentStartMode": ("silent-start", bool),
"escapeQuotes": ("escape-quotes", bool),
"dependencyJobsNames": ("dependency-jobs", str),
"nameAndEmailParameterMode": ("name-and-email-parameter-mode", str),
"commitMessageParameterMode": ("commit-message-parameter-mode", str),
"changeSubjectParameterMode": ("change-subject-parameter-mode", str),
"commentTextParameterMode": ("comment-text-parameter-mode", str),
"buildStartMessage": ("start-message", str),
"buildFailureMessage": ("failure-message", str),
"buildSuccessfulMessage": ("successful-message", str),
"buildUnstableMessage": ("unstable-message", str),
"buildNotBuiltMessage": ("notbuilt-message", str),
"buildUnsuccessfulFilepath": ("failure-message-file", str),
"customUrl": ("custom-url", str),
"serverName": ("server-name", str),
"dynamicTriggerConfiguration": ("dynamic-trigger-enabled", bool),
"triggerConfigURL": ("dynamic-trigger-url", str),
})
mapper_gerrit_build = Mapper({
"gerritBuildStartedVerifiedValue": ("gerrit-build-started-verified-value", int),
"gerritBuildStartedCodeReviewValue": ("gerrit-build-started-codereview-value", int),
"gerritBuildSuccessfulVerifiedValue": ("gerrit-build-successful-verified-value", int),
"gerritBuildSuccessfulCodeReviewValue": ("gerrit-build-successful-codereview-value", int),
"gerritBuildFailedVerifiedValue": ("gerrit-build-failed-verified-value", int),
"gerritBuildFailedCodeReviewValue": ("gerrit-build-failed-codereview-value", int),
"gerritBuildUnstableVerifiedValue": ("gerrit-build-unstable-verified-value", int),
"gerritBuildUnstableCodeReviewValue": ("gerrit-build-unstable-codereview-value", int),
"gerritBuildNotBuiltVerifiedValue": ("gerrit-build-notbuilt-verified-value", int),
"gerritBuildNotBuiltCodeReviewValue": ("gerrit-build-notbuilt-codereview-value", int)
})
gerrit_trigger = {}
is_override_votes = False
for child in top:
if mapper.map_element(child, gerrit_trigger):
pass # Handled by the mapper.
elif mapper_gerrit_build.map_element(child, gerrit_trigger):
# Jenkins Job Builder implementation uses "override-votes"
# key to override default vote values. For detail:
# https://docs.openstack.org/infra/jenkins-job-builder/triggers.html#triggers.gerrit
is_override_votes = True
elif child.tag == "gerritProjects":
gerrit_trigger["projects"] = __gerrit_process_gerrit_projects(child)
elif child.tag == "dynamicGerritProjects":
pass # No implementation by JJB
elif child.tag == "spec":
pass # Not needed in yml
elif child.tag == "skipVote":
skip_vote = {}
for attribute in child:
if attribute.tag == "onSuccessful":
skip_vote["successful"] = get_bool(attribute.text)
if attribute.tag == "onFailed":
skip_vote["failed"] = get_bool(attribute.text)
if attribute.tag == "onUnstable":
skip_vote["unstable"] = get_bool(attribute.text)
if attribute.tag == "onNotBuilt":
skip_vote["notbuilt"] = get_bool(attribute.text)
gerrit_trigger["skip-vote"] = skip_vote
elif child.tag == "notificationLevel":
if child.text is None:
gerrit_trigger["notification-level"] = "SERVER_DEFAULT"
else:
gerrit_trigger["notification-level"] = child.text
elif child.tag == "triggerOnEvents":
gerrit_trigger["trigger-on"] = __gerrit_process_trigger_on_events(child)
elif child.tag == "gerritTriggerTimerTask":
pass # Unconfigurable Attribute
elif child.tag == "triggerInformationAction":
pass # Unconfigurable Attribute
else:
raise NotImplementedError("Not implemented Gerrit Trigger Plugin's attribute: ", child.tag)
gerrit_trigger["override-votes"] = is_override_votes
parent.append({'gerrit': gerrit_trigger})
def githubpushtrigger(top, parent):
parent.append('github')
def ghprbtrigger(top, parent):
ghpr = {}
for child in top:
if child.tag == 'spec' or child.tag == 'cron':
ghpr['cron'] = child.text
elif child.tag == 'configVersion':
pass # Not needed
elif child.tag == 'adminlist':
if child.text:
ghpr['admin-list'] = child.text.strip().split('\n')
else:
ghpr['admin-list'] = []
elif child.tag == 'allowMembersOfWhitelistedOrgsAsAdmin':
ghpr['allow-whitelist-orgs-as-admins'] = get_bool(child.text)
elif child.tag == 'whitelist':
if child.text:
ghpr['white-list'] = child.text.strip().split('\n')
else:
ghpr['white-list'] = []
elif child.tag == 'orgslist':
if child.text:
ghpr['org-list'] = child.text.strip().split('\n')
else:
ghpr['org-list'] = []
elif child.tag == 'buildDescTemplate':
ghpr['build-desc-template'] = child.text
elif child.tag == 'triggerPhrase':
ghpr['trigger-phrase'] = child.text
elif child.tag == 'onlyTriggerPhrase':
ghpr['only-trigger-phrase'] = get_bool(child.text)
elif child.tag == 'useGitHubHooks':
ghpr['github-hooks'] = get_bool(child.text)
elif child.tag == 'permitAll':
ghpr['permit-all'] = get_bool(child.text)
elif child.tag == 'autoCloseFailedPullRequests':
ghpr['auto-close-on-fail'] = get_bool(child.text)
elif child.tag == 'blackListCommitAuthor':
if child.text:
ghpr['black-list-commit-author'] = child.text.strip().split(' ')
else:
ghpr['black-list-commit-author'] = []
elif child.tag == 'blackListLabels':
if child.text:
ghpr['black-list-labels'] = child.text.strip().split('\n')
else:
ghpr['black-list-labels'] = []
elif child.tag == 'blackListTargetBranches':
ghpr['black-list-target-branches'] = [item[0].text.strip() for item in child if item[0].text is not None]
elif child.tag == 'displayBuildErrorsOnDownstreamBuilds':
ghpr['display-build-errors-on-downstream-builds'] = get_bool(child.text)
elif child.tag == 'excludedRegions':
if child.text:
ghpr['excluded-regions'] = child.text.strip().split('\n')
else:
ghpr['excluded-regions'] = []
elif child.tag == 'includedRegions':
if child.text:
ghpr['included-regions'] = child.text.strip().split('\n')
else:
ghpr['included-regions'] = []
elif child.tag == 'skipBuildPhrase':
ghpr['skip-build-phrase'] = child.text
elif child.tag == 'whiteListLabels':
if child.text:
ghpr['white-list-labels'] = child.text.strip().split('\n')
else:
ghpr['white-list-labels'] = []
elif child.tag == 'whiteListTargetBranches':
ghpr['white-list-target-branches'] = [item[0].text.strip() for item in child if item[0].text is not None]
elif child.tag == 'gitHubAuthId':
ghpr['auth-id'] = child.text
elif child.tag == 'extensions':
extensions_prefix = "org.jenkinsci.plugins.ghprb.extensions."
for extension in child:
if extension.tag == extensions_prefix+"status.GhprbSimpleStatus":
for extension_child in extension:
if extension_child.tag == "commitStatusContext":
ghpr['status-context'] = extension_child.text
elif extension_child.tag == "triggeredStatus":
ghpr['triggered-status'] = extension_child.text
elif extension_child.tag == "startedStatus":
ghpr['started-status'] = extension_child.text
elif extension_child.tag == "statusUrl":
ghpr['status-url'] = extension_child.text
elif extension_child.tag == "addTestResults":
ghpr['status-add-test-results'] = get_bool(extension_child.text)
elif extension_child.tag == "completedStatus":
for status in extension_child:
if status[1].text == "SUCCESS":
ghpr['success-status'] = status[0].text
elif status[1].text == "FAILURE":
ghpr['failure-status'] = status[0].text
elif status[1].text == "ERROR":
ghpr['error-status'] = status[0].text
else:
raise NotImplementedError("GHPRB status %s is not implemented."
% status[1].text)
else:
raise NotImplementedError("GHPRB simple status type %s is not implemented."
% extension_child.tag)
elif extension.tag == extensions_prefix+"comments.GhprbBuildStatus":
for extension_child in extension:
if extension_child.tag == "messages":
for message in extension_child:
if message[1].text == "SUCCESS":
ghpr['success-comment'] = message[0].text
elif message[1].text == "FAILURE":
ghpr['failure-comment'] = message[0].text
elif message[1].text == "ERROR":
ghpr['error-comment'] = message[0].text
else:
raise NotImplementedError("GHPRB message %s is not implemented." % message[0].text)
else:
raise NotImplementedError("GHPRB extension type %s is not implemented."
% extension_child.tag)
elif extension.tag == extensions_prefix+"build.GhprbCancelBuildsOnUpdate":
ghpr['cancel-builds-on-update'] = True
elif extension.tag == extensions_prefix+"comments.GhprbCommentFile":
ghpr['comment-file'] = extension[0].text
elif extension.tag == extensions_prefix+"status.GhprbNoCommitStatus":
ghpr['no-commit-status'] = True
else:
raise NotImplementedError("GHPRB extension %s is not implemented." % extension.tag)
else:
raise NotImplementedError("GHPRB tag %s is not implemented." % child.tag)
parent.append({'github-pull-request': ghpr})
| 51.572917
| 119
| 0.569683
| 1,821
| 19,804
| 6.075783
| 0.183965
| 0.036876
| 0.035792
| 0.022777
| 0.36325
| 0.262292
| 0.199837
| 0.109635
| 0.063539
| 0.038503
| 0
| 0.00178
| 0.319127
| 19,804
| 383
| 120
| 51.707572
| 0.818748
| 0.017067
| 0
| 0.147727
| 0
| 0
| 0.264549
| 0.132583
| 0
| 0
| 0
| 0.002611
| 0
| 1
| 0.028409
| false
| 0.022727
| 0.005682
| 0
| 0.048295
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
051d3484ddd9be778a5ba470d36fedfb5de63393
| 4,097
|
py
|
Python
|
tools/clean-parallel.py
|
ZJaume/clean
|
0c3c6bab8bf173687ec0bba6908097ef7bc38db2
|
[
"MIT"
] | 1
|
2021-06-02T03:08:32.000Z
|
2021-06-02T03:08:32.000Z
|
tools/clean-parallel.py
|
ZJaume/clean
|
0c3c6bab8bf173687ec0bba6908097ef7bc38db2
|
[
"MIT"
] | 1
|
2021-05-30T22:55:44.000Z
|
2021-06-02T08:47:56.000Z
|
tools/clean-parallel.py
|
ZJaume/clean
|
0c3c6bab8bf173687ec0bba6908097ef7bc38db2
|
[
"MIT"
] | 2
|
2021-06-01T19:07:43.000Z
|
2021-06-03T11:03:04.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import regex
import argparse
# The variables below need to be adjusted for a language pair and dataset.
# To add a new language, define the list of alpha characters in the dict below.
MIN_LENGTH = 1 # minimum number of words in a sentence
MAX_LENGTH = 200 # maximum number of words in a sentence
RATIO_LENGTH = 0.3 # maximum length difference between the source and target sentence
RATIO_ALPHA_WORDS = 0.4 # minimum fraction of "real" words in a source sentence
RATIO_ALPHA_CHARS = 0.5 # minimum fraction of alpha characters in a source sentence
CHARS = {
'bg': r'[АаБбВвГгДддЕеЖжЗзИиЙйКкkasЛлМмНнОоПпРрСсТтУуФфХхЦцЧчШшЩщЪъЬьЮюЯя]',
'cs': r'[a-zÁáČčĎďÉéěÍíŇňÓóŘřŠšŤťÚúůÝýŽž]',
'ca': r'[a-zÀàÈèÉéÍíÒòÓóÚúÇç]',
'da': r'[a-zÆæØøÅå]',
'de': r'[a-zÄäÖöÜüß]',
'en': r'[a-z]',
'el': r'[a-zΑαΒβΓγΔδΕεΖζΗηΘθΙιΚκΛλΜμΝνΞξΟοΠπΡρΣσςΤτΥυΦφΧχΨψΩω]',
'es': r'[a-zÁáÉéÍíÓóÚúñÑ]',
'et': r'[a-zÕõÄäÖöÜü]',
'eu': r'[a-zñÑ]',
'fi': r'[a-zÅåÄäÖö]',
'fr': r'[a-zÂâÁáÀàâÇçÉéÈèÊêÓóÒòÔôŒœÜüÛûŸÿ]',
'ga': r'[abcdefghilmnoprstuáéíóúÁÉÍÓÚ]',
'gl': r'[a-zÁáÉéÍíÓóÚúÑñ]',
'hr': r'[abcčČćĆdđĐefghijklmnoprsšŠtuvzžŽ]',
'hu': r'[a-zÁáÉéÍíÓóÖöŐőŰű]',
'is': r'[abdefghijklmnoprstuvxyÁáðÐÉéÍíÓóÚúÝýÞþÆæÖö]',
'it': r'[a-zàÀèÈéÉìÌíÍîÎòÒóÓùÙúÚ]',
'lt': r'[aąbcČčdeĘęĖėfghiĮįyjklmnoprsŠštuŲųŪūvzŽž]',
'lv': r'[aĀābcČčdeĒēfgĢģhiĪījkĶķlĻļmnŅņoprsŠštuŪūvzŽž]',
'mt': r'[abĊċdefĠġghĦħiiejklmnopqrstuvwxŻżz]',
'nb': r'[a-zÂâÁáÀàâÉéÈèÊêÓóÒòÔôÜüÆæØøÅå]',
'nl': r'[a-zÂâÁáÀàâÉéÈèÊêÓóÒòÔôÚú]',
'no': r'[a-zÂâÁáÀàâÉéÈèÊêÓóÒòÔôÜüÆæØøÅå]',
'nn': r'[a-zÂâÁáÀàâÉéÈèÊêÓóÒòÔôÜüÆæØøÅå]',
'pl': r'[a-zĄąĆćĘꣳŃńÓóŚśŹźŻż]',
'ro': r'[a-zĂăÂâÎîȘșȚț]',
'sk': r'[a-záäÁÄčČďĎžéÉíÍĺĹľĽňŇóÓôÔŕŔšŠťŤúÚýÝžŽ]',
'sl': r'[abcčČdđĐefghijklmnoprsšŠtuvzžŽ]',
'sv': r'[a-zÅåÄäÖö]',
}
middle_period = regex.compile(r'\w+[\.\?\!] \p{Lu}\w*,? ')
def main():
args = parse_user_args()
for i, line in enumerate(sys.stdin):
fields = line.strip().split('\t')
if len(fields) < 2:
continue
src = fields[-2].strip()
trg = fields[-1].strip()
skip = clean_parallel(src, trg, args.src_lang, args.trg_lang)
if skip:
if args.debug:
sys.stderr.write("{}\t{}".format(skip, line))
continue
sys.stdout.write(line)
def clean_parallel(src, trg, src_lang, trg_lang):
if src.lower() == trg.lower():
return "IDENTICAL"
src_toks = src.split()
trg_toks = trg.split()
src_len = len(src_toks)
trg_len = len(trg_toks)
if not src_len or not trg_len:
return "EMPTY"
ratio_len = src_len / float(trg_len)
if ratio_len < RATIO_LENGTH or ratio_len > (1. / RATIO_LENGTH):
return "RATIO_LENGTH"
if src_len < MIN_LENGTH or trg_len < MIN_LENGTH:
return "TOO_SHORT"
if src_len > MAX_LENGTH or trg_len > MAX_LENGTH:
return "TOO_LONG"
num_alpha = sum(
[1 if re.match(CHARS[src_lang], t, re.IGNORECASE) else 0 for t in src_toks])
if num_alpha / float(src_len) < RATIO_ALPHA_WORDS:
return "RATIO_ALPHA"
char_alpha = len(re.findall(CHARS[src_lang], src, re.IGNORECASE))
if char_alpha / float(len(src.replace(' ', ''))) < RATIO_ALPHA_CHARS:
return "RATIO_CHARS"
if len(middle_period.findall(src)) != len(middle_period.findall(trg)):
return "MIDDLE_PERIOD"
if src_lang in CHARS and trg_lang in CHARS:
if (src[0].isalpha() and not src[0].isupper() and (len(src)>1 and src[1]!=')')) \
or (trg[0].isalpha() and not trg[0].isupper() and (len(trg)>1 and trg[1]!=')')):
return "START_CAPITAL"
return None
def parse_user_args():
parser = argparse.ArgumentParser()
parser.add_argument("-l1", "--src-lang", default='es')
parser.add_argument("-l2", "--trg-lang", default='en')
parser.add_argument("--debug", action='store_true')
return parser.parse_args()
if __name__ == "__main__":
main()
| 32.515873
| 96
| 0.640469
| 531
| 4,097
| 4.79661
| 0.340866
| 0.017275
| 0.009423
| 0.01492
| 0.018846
| 0.018846
| 0
| 0
| 0
| 0
| 0
| 0.008313
| 0.207225
| 4,097
| 125
| 97
| 32.776
| 0.775862
| 0.10886
| 0
| 0.021277
| 0
| 0
| 0.291323
| 0.18726
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031915
| false
| 0
| 0.042553
| 0
| 0.191489
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0520b1fd12c6c807e99e2585c0ad990c4a9c1185
| 3,001
|
py
|
Python
|
undercrawler/crazy_form_submitter.py
|
abael/ScrapyGenericCrawler
|
9d210fb862a7fddd58c548847d8f5c2d72eae5c1
|
[
"MIT"
] | 88
|
2016-04-07T18:41:19.000Z
|
2022-01-03T12:18:44.000Z
|
undercrawler/crazy_form_submitter.py
|
shekar9160/generic_scraper
|
e5104dca5a5d9fe4b9ddd085c7b0935a712ea74d
|
[
"MIT"
] | 61
|
2016-04-06T18:31:45.000Z
|
2021-07-15T12:10:23.000Z
|
undercrawler/crazy_form_submitter.py
|
shekar9160/generic_scraper
|
e5104dca5a5d9fe4b9ddd085c7b0935a712ea74d
|
[
"MIT"
] | 31
|
2016-04-14T07:49:49.000Z
|
2021-08-08T17:07:36.000Z
|
import logging
import random
import string
from scrapy.http.request.form import _get_inputs as get_form_data
logger = logging.getLogger(__name__)
SEARCH_TERMS = list(string.ascii_lowercase) + list('123456789 *%.?')
def search_form_requests(url, form, meta,
search_terms=None, extra_search_terms=None):
''' yield kwargs for search requests, using default search terms and
extra_search_terms, also randomly refining search if there are such
options in the form.
'''
refinement_options = [False]
if not any(input_type == 'search query'
for input_type in meta['fields'].values()):
return
n_target_inputs = sum(
input_type == 'search query' or
_is_refinement_input(input_type, form.inputs[input_name])
for input_name, input_type in meta['fields'].items())
assert n_target_inputs >= 0
# 2 and 4 here are just some values that feel right, need tuning
refinement_options.append([True] * 2 * min(2, n_target_inputs))
extra_search_terms = set(extra_search_terms or [])
main_search_terms = set(
search_terms if search_terms is not None else SEARCH_TERMS)
for search_term in (main_search_terms | extra_search_terms):
for do_random_refinement in refinement_options:
formdata = _fill_search_form(
search_term, form, meta, do_random_refinement)
if formdata is not None:
priority = -3 if do_random_refinement else -1
if search_term not in main_search_terms:
min_priority = min(
priority, -int(len(extra_search_terms) / 10))
priority = random.randint(min_priority, priority)
logger.debug(
'Scheduled search: "%s" at %s with priority %d%s',
search_term, url, priority,
' with random refinement' if do_random_refinement else '')
yield dict(
url=url,
formdata=formdata,
method=form.method,
priority=priority,
)
def _fill_search_form(search_term, form, meta, do_random_refinement=False):
additional_formdata = {}
search_fields = []
for input_name, input_type in meta['fields'].items():
input_el = form.inputs[input_name]
if input_type == 'search query':
search_fields.append(input_name)
elif do_random_refinement and \
_is_refinement_input(input_type, input_el):
if input_el.type == 'checkbox' and random.random() > 0.5:
additional_formdata[input_name] = 'on'
additional_formdata[random.choice(search_fields)] = search_term
return get_form_data(form, additional_formdata, None, None, None)
def _is_refinement_input(input_type, input_el):
return (input_type == 'search category / refinement' and
getattr(input_el, 'type', None) in ['checkbox'])
| 40.554054
| 78
| 0.638121
| 372
| 3,001
| 4.857527
| 0.284946
| 0.091312
| 0.053127
| 0.033204
| 0.186497
| 0.133924
| 0.133924
| 0.097399
| 0.097399
| 0.05534
| 0
| 0.009246
| 0.27924
| 3,001
| 73
| 79
| 41.109589
| 0.826167
| 0.072309
| 0
| 0
| 0
| 0
| 0.068017
| 0
| 0
| 0
| 0
| 0
| 0.017544
| 1
| 0.052632
| false
| 0
| 0.070175
| 0.017544
| 0.175439
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0524ab92ab97c6f8922dd3dd0c03bf3b79b8a0ee
| 921
|
py
|
Python
|
libs/libssh2/libssh2.py
|
simont77/craft-blueprints-kde
|
3c0a40923c7c8e0341ad08afde22f86bb1517ddf
|
[
"BSD-2-Clause"
] | null | null | null |
libs/libssh2/libssh2.py
|
simont77/craft-blueprints-kde
|
3c0a40923c7c8e0341ad08afde22f86bb1517ddf
|
[
"BSD-2-Clause"
] | 1
|
2020-01-10T01:06:16.000Z
|
2020-01-10T01:06:16.000Z
|
libs/libssh2/libssh2.py
|
simont77/craft-blueprints-kde
|
3c0a40923c7c8e0341ad08afde22f86bb1517ddf
|
[
"BSD-2-Clause"
] | 2
|
2020-01-02T18:22:12.000Z
|
2020-08-05T13:39:21.000Z
|
# -*- coding: utf-8 -*-
import info
class subinfo(info.infoclass):
def setTargets( self ):
self.svnTargets['master'] = 'https://github.com/libssh2/libssh2.git||libssh2-1.8.0'
self.targets['1.8.0'] = "https://www.libssh2.org/download/libssh2-1.8.0.tar.gz"
self.targetInstSrc['1.8.0'] = "libssh2-1.8.0"
self.patchToApply['master'] = ('0001-Ensure-other-libraries-are-told-the-correct-linkage-.patch', 1)
self.defaultTarget = 'master'
def setDependencies( self ):
self.buildDependencies['virtual/base'] = 'default'
self.runtimeDependencies['libs/zlib'] = 'default'
self.runtimeDependencies['libs/openssl'] = 'default'
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__( self, **args ):
CMakePackageBase.__init__( self )
self.subinfo.options.configure.defines = "-DENABLE_ZLIB_COMPRESSION=ON "
| 38.375
| 108
| 0.667752
| 108
| 921
| 5.601852
| 0.546296
| 0.016529
| 0.024793
| 0.049587
| 0.046281
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03534
| 0.170467
| 921
| 23
| 109
| 40.043478
| 0.756545
| 0.022801
| 0
| 0
| 0
| 0.058824
| 0.326644
| 0.101449
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.117647
| 0
| 0.411765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05273ebf4b8d4eb6302f146e1b519e163f850d92
| 5,289
|
py
|
Python
|
tooling/maven.py
|
AntonisGkortzis/Vulnerabilities-in-Reused-Software
|
16b2087cb595b48446dadda8cae75dad6ef1433b
|
[
"MIT"
] | 3
|
2020-11-24T20:30:59.000Z
|
2021-05-26T02:33:53.000Z
|
tooling/maven.py
|
AntonisGkortzis/Vulnerabilities-in-Reused-Software
|
16b2087cb595b48446dadda8cae75dad6ef1433b
|
[
"MIT"
] | null | null | null |
tooling/maven.py
|
AntonisGkortzis/Vulnerabilities-in-Reused-Software
|
16b2087cb595b48446dadda8cae75dad6ef1433b
|
[
"MIT"
] | null | null | null |
import os
import re
import logging
import zipfile
logger = logging.getLogger(__name__)
class MvnArtifact:
"""
Class representing a fully defined maven artifact
(e.g., <groupId>:<artifactId>:<type>:<version>[:<dep_type>])
"""
__elem_re = re.compile(r'^(.+?):(.+?):(.+?):(.+?)((:)(.+))?$')
def __init__(self, artifact_str):
elems = MvnArtifact.__elem_re.match(artifact_str).groups()
self.groupId = elems[0]
self.artifactId = elems[1]
self.type = elems[2]
self.version = elems[3]
self.dep_type = elems[6] # (e.g., compile, test, provided)
def __str__(self):
dt = '' if not 'dep_type' in self._dict_ else f':{self.dep_type}'
return f'{self.groupId}:{self.artifactId}:{self.type}:{self.version}{dt}'
def __eq__(self, other):
if isinstance(other, MvnArtifact):
return self.groupId == other.groupId and self.artifactId == other.artifactId \
and self.type == other.type and self.version == other.version
return NotImplemented
def __hash__(self):
d = self.__dict__
del d['dep_type']
return hash(tuple(sorted(d.items())))
def get_class_list(self, m2_home=os.path.expanduser('~/.m2')):
m2_home="/media/agkortzis/Data/m2"
art_path = self.get_m2_path(m2_home)
logger.debug("@@-zip file={}".format(art_path))
container = zipfile.ZipFile(art_path)
len_preffix = len('WEB-INF/classes/') if art_path.endswith('.war') else 0
if not art_path.endswith('.war') and not art_path.endswith('.jar'):
logger.warning(f'Unsupported file type: {os.path.splitext(art_path)[1]}')
return []
return [i[len_preffix:-6].replace(os.path.sep,'.') for i in container.namelist() if i.endswith('.class')]
def get_m2_path(self, m2_home=os.path.expanduser('~/.m2')):
m2_home="/media/agkortzis/Data/m2"
return os.sep.join([m2_home, 'repository',
self.groupId.replace('.', os.sep),
self.artifactId,
self.version,
f"{self.artifactId}-{self.version}.{self.type}"])
class ArtifactTree:
def __init__(self, artifact):
self.artifact = MvnArtifact(artifact)
self.deps = []
def __iter__(self):
yield self
for d in self.deps:
for t in d.__iter__():
yield t
def print_tree(self, indent=0):
print(' ' * indent, self.artifact)
for i in self.deps:
i.print_tree(indent+2)
def filter_deps(self, filter):
self.deps = [i for i in self.deps if filter(i)]
for i in self.deps:
i.filter_deps(filter)
def missing_m2_pkgs(self, m2_home=os.path.expanduser('~/.m2')):
m2_home="/media/agkortzis/Data/m2"
return [p for p in self if not os.path.exists(p.artifact.get_m2_path(m2_home))]
@staticmethod
def parse_tree_str(tree_str):
return ArtifactTree.__parse_tree([l[7:].rstrip() for l in tree_str.split('\n')], 0)
@staticmethod
def __parse_tree(tree_lst, i):
root_level, root_artifact = ArtifactTree.__parse_item(tree_lst[i])
t = ArtifactTree(root_artifact)
while i+1 < len(tree_lst) and root_level < ArtifactTree.__parse_item(tree_lst[i+1])[0]:
t.deps.append(ArtifactTree.__parse_tree(tree_lst, i+1))
tree_lst.pop(i+1)
return t
@staticmethod
def __parse_item(item):
parts = re.match(r'([ \+\-\|\\]*)(.+)', item).groups()
return int(len(parts[0])/3), parts[1]
def get_compiled_modules(project_trees_file):
with open(project_trees_file) as f:
try:
str_trees = split_trees([l.rstrip() for l in f.readlines()])
except:
logger.error(f'File is malformed: {project_trees_file}')
return []
trees = []
for t in str_trees:
t = ArtifactTree.parse_tree_str('\n'.join(t))
if t.artifact.type in ['jar', 'war']:
t.filter_deps(lambda d : d.artifact.dep_type == 'compile' and d.artifact.type in ['jar', 'war'])
trees.append(t)
return [t for t in trees if not t.missing_m2_pkgs()]
def filter_mvn_output(mvn_tree_output):
re_tree_element = re.compile(r'^\[INFO\] (\||\\\-|\+\-| )*([a-zA-Z_$][a-zA-Z\d_\-$]*\.)*[a-zA-Z_$][a-zA-Z\d_\-$]*:.+?:([a-zA-Z]+?):.+?(:[a-zA-Z\-]+)?$')
with open(tree_file, 'r') as f:
lines = f.readlines()
tree_lines = [l.rstrip() for l in lines if re_tree_element.match(l)]
return tree_lines
def split_trees(tree_lines):
re_artifact = re.compile(r'^\[INFO\] ([a-zA-Z_$][a-zA-Z\d_\-$]*\.)*[a-zA-Z_$][a-zA-Z\d_\-$]*:.+?:([a-zA-Z]+?):.+$')
trees = []
tree = None
for l in tree_lines:
if re_artifact.match(l):
if tree:
trees.append([tree['root']] + tree['deps'])
tree = {'root': l, 'deps': []}
else:
tree['deps'].append(l)
trees.append([tree['root']] + tree['deps'])
return trees
| 33.687898
| 156
| 0.560219
| 701
| 5,289
| 4.00428
| 0.203994
| 0.011756
| 0.015675
| 0.008906
| 0.191664
| 0.145351
| 0.089063
| 0.089063
| 0.089063
| 0.089063
| 0
| 0.010455
| 0.276612
| 5,289
| 156
| 157
| 33.903846
| 0.72321
| 0.027037
| 0
| 0.126126
| 0
| 0.018018
| 0.13466
| 0.080991
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153153
| false
| 0
| 0.036036
| 0.009009
| 0.351351
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0527ccd6baf873620f163e0b3ed2a44bfa92eff6
| 1,812
|
py
|
Python
|
ptsites/sites/hares.py
|
kbnq/flexget_qbittorrent_mod
|
e52d9726b80aab94cf3d9ee6c382b6721b757d3b
|
[
"MIT"
] | null | null | null |
ptsites/sites/hares.py
|
kbnq/flexget_qbittorrent_mod
|
e52d9726b80aab94cf3d9ee6c382b6721b757d3b
|
[
"MIT"
] | null | null | null |
ptsites/sites/hares.py
|
kbnq/flexget_qbittorrent_mod
|
e52d9726b80aab94cf3d9ee6c382b6721b757d3b
|
[
"MIT"
] | null | null | null |
from ..schema.nexusphp import Attendance
from ..schema.site_base import Work, SignState
from ..utils.net_utils import NetUtils
class MainClass(Attendance):
URL = 'https://club.hares.top/'
USER_CLASSES = {
'downloaded': [8796093022208],
'share_ratio': [5.5],
'days': [364]
}
def build_workflow(self, entry, config):
return [
Work(
url='/attendance.php',
method='get',
succeed_regex=[
'这是您的第 \\d+ 次签到,已连续签到 \\d+ 天,本次签到获得 \\d+ 个奶糖。',
'已签到'
],
check_state=('final', SignState.SUCCEED),
is_base_content=True
)
]
def build_selector(self):
selector = super(MainClass, self).build_selector()
NetUtils.dict_merge(selector, {
'detail_sources': {
'default': {
'do_not_strip': True,
'link': '/userdetails.php?id={}',
'elements': {
'bar': 'ul.list-inline',
'table': 'div.layui-col-md10 > table:nth-child(1) > tbody'
}
}
},
'details': {
'points': {
'regex': '奶糖.*?([\\d,.]+)',
'handle': self.handle_points
},
'seeding': {
'regex': ('(做种中).*?(\\d+)', 2)
},
'leeching': {
'regex': ('(下载中).*?\\d+\\D+(\\d+)', 2)
},
'hr': None
}
})
return selector
def handle_points(self, value):
if value in ['.']:
return '0'
else:
return value
| 29.225806
| 82
| 0.400662
| 148
| 1,812
| 4.790541
| 0.655405
| 0.028209
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024218
| 0.453091
| 1,812
| 61
| 83
| 29.704918
| 0.691221
| 0
| 0
| 0
| 0
| 0
| 0.197572
| 0.024283
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0
| 0.053571
| 0.017857
| 0.232143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05299930cfe175dfdd505fa507a88544ad0e95c1
| 716
|
py
|
Python
|
tests/garage/sampler/test_rl2_worker.py
|
blacksph3re/garage
|
b4abe07f0fa9bac2cb70e4a3e315c2e7e5b08507
|
[
"MIT"
] | 1,500
|
2018-06-11T20:36:24.000Z
|
2022-03-31T08:29:01.000Z
|
tests/garage/sampler/test_rl2_worker.py
|
blacksph3re/garage
|
b4abe07f0fa9bac2cb70e4a3e315c2e7e5b08507
|
[
"MIT"
] | 2,111
|
2018-06-11T04:10:29.000Z
|
2022-03-26T14:41:32.000Z
|
tests/garage/sampler/test_rl2_worker.py
|
blacksph3re/garage
|
b4abe07f0fa9bac2cb70e4a3e315c2e7e5b08507
|
[
"MIT"
] | 309
|
2018-07-24T11:18:48.000Z
|
2022-03-30T16:19:48.000Z
|
from garage.envs import GymEnv
from garage.tf.algos.rl2 import RL2Worker
from tests.fixtures import TfGraphTestCase
from tests.fixtures.envs.dummy import DummyBoxEnv
from tests.fixtures.policies import DummyPolicy
class TestRL2Worker(TfGraphTestCase):
def test_rl2_worker(self):
env = GymEnv(DummyBoxEnv(obs_dim=(1, )))
policy = DummyPolicy(env_spec=env.spec)
worker = RL2Worker(seed=1,
max_episode_length=100,
worker_number=1,
n_episodes_per_trial=5)
worker.update_agent(policy)
worker.update_env(env)
episodes = worker.rollout()
assert episodes.rewards.shape[0] == 500
| 32.545455
| 50
| 0.655028
| 83
| 716
| 5.506024
| 0.566265
| 0.059081
| 0.111597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030476
| 0.26676
| 716
| 21
| 51
| 34.095238
| 0.84
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 1
| 0.058824
| false
| 0
| 0.294118
| 0
| 0.411765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
052a76693b3fb6c307548d396e0accbc369737c8
| 660
|
py
|
Python
|
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20001208.py
|
aleasims/Peach
|
bb56841e943d719d5101fee0a503ed34308eda04
|
[
"MIT"
] | null | null | null |
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20001208.py
|
aleasims/Peach
|
bb56841e943d719d5101fee0a503ed34308eda04
|
[
"MIT"
] | null | null | null |
dependencies/src/4Suite-XML-1.0.2/test/Xml/Xslt/Borrowed/uo_20001208.py
|
aleasims/Peach
|
bb56841e943d719d5101fee0a503ed34308eda04
|
[
"MIT"
] | 1
|
2020-07-26T03:57:45.000Z
|
2020-07-26T03:57:45.000Z
|
#Uche Ogbuji exercises format-number on Brad Marshall's behalf
from Xml.Xslt import test_harness
sheet_1 = """\
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:template match = "/">
<xsl:value-of select='format-number(10000000000.75 + 10000000000.50, "##.##")'/>
</xsl:template>
</xsl:stylesheet>"""
#"
source_1 = "<spam/>"
expected_1 = """<?xml version="1.0" encoding="UTF-8"?>
20000000001.25"""
def Test(tester):
source = test_harness.FileInfo(string=source_1)
sheet = test_harness.FileInfo(string=sheet_1)
test_harness.XsltTest(tester, source, [sheet], expected_1)
return
| 24.444444
| 84
| 0.671212
| 89
| 660
| 4.865169
| 0.573034
| 0.101617
| 0.04157
| 0.115473
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098214
| 0.151515
| 660
| 26
| 85
| 25.384615
| 0.675
| 0.093939
| 0
| 0
| 0
| 0.125
| 0.518456
| 0.060403
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
052bebc9ce249268deadd50cd183873b6f1a799a
| 2,697
|
py
|
Python
|
tests/test_connection.py
|
daniel-herrero/fastapi-mailman
|
a174d0ec777d3330dc5464f71fafa7829db07bf1
|
[
"MIT"
] | 6
|
2021-10-08T10:20:37.000Z
|
2022-03-30T08:56:10.000Z
|
tests/test_connection.py
|
daniel-herrero/fastapi-mailman
|
a174d0ec777d3330dc5464f71fafa7829db07bf1
|
[
"MIT"
] | 2
|
2021-11-11T11:44:29.000Z
|
2022-03-08T06:54:54.000Z
|
tests/test_connection.py
|
daniel-herrero/fastapi-mailman
|
a174d0ec777d3330dc5464f71fafa7829db07bf1
|
[
"MIT"
] | 1
|
2022-03-04T14:43:22.000Z
|
2022-03-04T14:43:22.000Z
|
import typing as t
import pytest as pt
from fastapi_mailman import BadHeaderError, EmailMessage
if t.TYPE_CHECKING:
from fastapi_mailman import Mail
@pt.mark.anyio
async def test_send_message(mail: "Mail"):
mail.backend = "locmem"
msg = EmailMessage(
subject="testing",
to=["to@example.com"],
body="testing",
)
await msg.send()
assert len(mail.outbox) == 1
sent_msg = mail.outbox[0]
assert sent_msg.from_email == mail.default_sender
@pt.mark.anyio
async def test_send_message_using_connection(mail: "Mail"):
async with mail.get_connection() as conn:
msg = EmailMessage(
subject="testing",
to=["to@example.com"],
body="testing",
connection=conn,
)
await msg.send()
assert len(mail.outbox) == 1
sent_msg = mail.outbox[0]
assert sent_msg.from_email == mail.default_sender
await conn.send_messages([msg])
assert len(mail.outbox) == 2
@pt.mark.anyio
async def test_send_single(mail: "Mail"):
async with mail.get_connection() as conn:
msg = EmailMessage(
subject="testing",
to=["to@example.com"],
body="testing",
connection=conn,
)
await msg.send()
assert len(mail.outbox) == 1
sent_msg = mail.outbox[0]
assert sent_msg.subject == "testing"
assert sent_msg.to == ["to@example.com"]
assert sent_msg.body == "testing"
assert sent_msg.from_email == mail.default_sender
@pt.mark.anyio
async def test_send_many(mail: "Mail"):
async with mail.get_connection() as conn:
msgs = []
for _ in range(10):
msg = EmailMessage(mailman=mail, subject="testing", to=["to@example.com"], body="testing")
msgs.append(msg)
await conn.send_messages(msgs)
assert len(mail.outbox) == 10
sent_msg = mail.outbox[0]
assert sent_msg.from_email == mail.default_sender
@pt.mark.anyio
async def test_send_without_sender(mail: "Mail"):
mail.default_sender = None
msg = EmailMessage(mailman=mail, subject="testing", to=["to@example.com"], body="testing")
await msg.send()
assert len(mail.outbox) == 1
sent_msg = mail.outbox[0]
assert sent_msg.from_email is None
@pt.mark.anyio
async def test_send_without_to(mail: "Mail"):
msg = EmailMessage(subject="testing", to=[], body="testing")
assert await msg.send() == 0
@pt.mark.anyio
async def test_bad_header_subject(mail):
msg = EmailMessage(subject="testing\n\r", body="testing", to=["to@example.com"])
with pt.raises(BadHeaderError):
await msg.send()
| 28.389474
| 102
| 0.629218
| 355
| 2,697
| 4.63662
| 0.180282
| 0.055286
| 0.063183
| 0.068044
| 0.699271
| 0.6452
| 0.631227
| 0.614824
| 0.565006
| 0.540705
| 0
| 0.007353
| 0.243604
| 2,697
| 94
| 103
| 28.691489
| 0.79951
| 0
| 0
| 0.578947
| 0
| 0
| 0.090471
| 0
| 0
| 0
| 0
| 0
| 0.197368
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
052c8a3287a40e2446164e87ba133bbda46f1779
| 294
|
py
|
Python
|
Workshops/enBuyukSayi.py
|
brkyydnmz/Python
|
8cde0421edda6ac5b7fd30e8f20ad7cb6e1708b0
|
[
"MIT"
] | null | null | null |
Workshops/enBuyukSayi.py
|
brkyydnmz/Python
|
8cde0421edda6ac5b7fd30e8f20ad7cb6e1708b0
|
[
"MIT"
] | null | null | null |
Workshops/enBuyukSayi.py
|
brkyydnmz/Python
|
8cde0421edda6ac5b7fd30e8f20ad7cb6e1708b0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
sayi1 = int(input("1. Sayı:"))
sayi2 = int(input("2. Sayı:"))
sayi3 = int(input("3. Sayı:"))
if (sayi1>=sayi2) and (sayi1>=sayi3):
enBuyuk = sayi1
elif(sayi2>=sayi1) and (sayi2>=sayi3):
enBuyuk = sayi2
else:
enBuyuk = sayi3
print("En Büyük Sayı:",enBuyuk)
| 21
| 38
| 0.608844
| 42
| 294
| 4.261905
| 0.47619
| 0.134078
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 0.173469
| 294
| 14
| 39
| 21
| 0.662551
| 0.071429
| 0
| 0
| 0
| 0
| 0.139706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
052d317538142bae7b508c18b4e71450d9b3e276
| 399
|
py
|
Python
|
08/seven-segment_part1.py
|
ReinprechtStefan/AdventOfCode2021
|
a2750c5fbcc7fc927d710f4db6926d015a2fb673
|
[
"Apache-2.0"
] | null | null | null |
08/seven-segment_part1.py
|
ReinprechtStefan/AdventOfCode2021
|
a2750c5fbcc7fc927d710f4db6926d015a2fb673
|
[
"Apache-2.0"
] | null | null | null |
08/seven-segment_part1.py
|
ReinprechtStefan/AdventOfCode2021
|
a2750c5fbcc7fc927d710f4db6926d015a2fb673
|
[
"Apache-2.0"
] | null | null | null |
with open('input.txt') as f:
lines = f.readlines()
counter = 0
for line in lines:
right_part = line.split(" | ")[1]
for segment in right_part.strip().split(" "):
#print(segment, len(segment))
if len(segment) in [2,3,4,7]:
counter += 1
#else:
#print("NO ", segment, len(segment))
print(counter)
| 22.166667
| 53
| 0.491228
| 49
| 399
| 3.959184
| 0.571429
| 0.154639
| 0.175258
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027451
| 0.360902
| 399
| 17
| 54
| 23.470588
| 0.733333
| 0.170426
| 0
| 0
| 0
| 0
| 0.039634
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
052ffb78d4e1a7b366b635d756b5d2bbba48de18
| 7,605
|
py
|
Python
|
main/gui.py
|
MBmasher/weighted-object
|
eaaf25338240873b7c4197097b2bb73be256b702
|
[
"MIT"
] | null | null | null |
main/gui.py
|
MBmasher/weighted-object
|
eaaf25338240873b7c4197097b2bb73be256b702
|
[
"MIT"
] | null | null | null |
main/gui.py
|
MBmasher/weighted-object
|
eaaf25338240873b7c4197097b2bb73be256b702
|
[
"MIT"
] | null | null | null |
import Tkinter
import weighted_objects
import tkFileDialog
import time
import ttk
import numpy
import sys
while True:
# Ask user for file dialog.
Tkinter.Tk().withdraw()
osu_file_path = tkFileDialog.askopenfilename(title="Select an osu file", filetypes=(("osu files", "*.osu"),))
# Calculate final nerf.
final_nerf = weighted_objects.calculate_nerf(osu_file_path)
distance_snap_list = weighted_objects.weighted_distance_snap_list
time_list = weighted_objects.time_list
# Separate list into multiple lists when breaks exist.
time_break_separated_list = [[]]
list_number = 0
for i in range(len(time_list) - 1):
if time_list[i + 1] - time_list[i] > 3000:
# Create new list.
list_number += 1
time_break_separated_list.append([])
time_break_separated_list[list_number].append(time_list[i])
# Coordinates to be later used in the canvas.
canvas_distance_snap_list = []
canvas_time_list = []
# Calculating coordinates.
for i in time_list:
canvas_time_list.append(350 * (i - time_list[0]) / (time_list[-1] - time_list[0]))
for i in distance_snap_list:
canvas_distance_snap_list.append(150 - i * 75)
# Creating the GUI.
root = Tkinter.Tk()
root.resizable(width=False, height=False)
root.geometry("400x500")
root.title("Weighted Objects")
# Stuff for the timer.
ms = time_list[0]
note_number = 0
# Function to be used to initialize the timer.
def first_load():
# Variable relative_time is the time when the user has clicked the button to start timer.
global relative_time
relative_time = int(round(time.time() * 1000)) - time_list[0]
tick()
# Function to be used to run the timer.
def tick():
# Variable ms is the time that constantly goes up during the timer.
global ms
time_label.after(30, tick)
ms = int(round(time.time() * 1000)) - relative_time
time_label["text"] = "Timer: {}ms".format(ms)
update_labels()
draw_timer_line()
# Function to be used to update the labels that need constant updates.
def update_labels():
global note_number
# Updates note number depending on where the timer is at.
for i in range(len(time_list)):
if ms < time_list[i]:
note_number = i - 1
break
distance_snap_label["text"] = "Weighted: {:.2f}x".format(distance_snap_list[note_number])
progress_bar["value"] = distance_snap_list[note_number]
cumulative_label["text"] = "Cumulative Value: {}".format(numpy.cumsum(distance_snap_list)[note_number])
# Function to be used to draw the green line that indicates where the timer is at.
def draw_timer_line():
if ms < time_list[-1]:
draw_x = 350 * (ms - time_list[0]) / (time_list[-1] - time_list[0])
difficulty_graph.coords(timer_line, draw_x, 0, draw_x, 150)
# Function used to kill the GUI.
def stop():
root.quit()
root.destroy()
# Function used to kill the program entirely.
def kill():
sys.exit()
Tkinter.Label(root, fg="black",
text="Old Amount of Objects: {}".format(len(distance_snap_list))).pack()
Tkinter.Label(root, fg="black",
text="New Calculated Weighted Objects: {:.2f}".format(sum(distance_snap_list))).pack()
Tkinter.Label(root, fg="black",
text="Raw Percentage Change: {:.2f}%".format(100 * sum(distance_snap_list)
/ len(distance_snap_list))).pack()
Tkinter.Label(root, fg="black",
text="Calculated Nerf/Buff: {:.2f}%".format(100 * final_nerf)).pack()
Tkinter.Label(root, fg="blue", text="Graph of Distance Snap/Cumulative Sum of Distance Snap against Time").pack()
difficulty_graph = Tkinter.Canvas(root, width=350, height=150)
difficulty_graph.pack()
Tkinter.Label(root, fg="black", text="Red/Blue: Distance Snap").pack()
Tkinter.Label(root, fg="black", text="Yellow: Cumulative Sum of Distance Snap").pack()
# Draw grid lines and fill background
difficulty_graph.create_rectangle(0, 0, 350, 150, fill="#dddddd")
difficulty_graph.create_line(0, 30, 350, 30, fill="#cccccc")
difficulty_graph.create_line(0, 60, 350, 60, fill="#cccccc")
difficulty_graph.create_line(0, 90, 350, 90, fill="#cccccc")
difficulty_graph.create_line(0, 120, 350, 120, fill="#cccccc")
difficulty_graph.create_line(70, 0, 70, 150, fill="#cccccc")
difficulty_graph.create_line(140, 0, 140, 150, fill="#cccccc")
difficulty_graph.create_line(210, 0, 210, 150, fill="#cccccc")
difficulty_graph.create_line(280, 0, 280, 150, fill="#cccccc")
# Draw blue line graph, distance snap.
for i in range(len(distance_snap_list) - 1):
# Don't continue the graph if there is a break.
if time_list[i + 1] - time_list[i] < 3000:
difficulty_graph.create_line(canvas_time_list[i], canvas_distance_snap_list[i],
canvas_time_list[i + 1], canvas_distance_snap_list[i + 1],
fill="#9999ff")
# Draw red line graph, the average thing (what do you call this?).
for n in range(len(time_break_separated_list)):
for x in range(len(time_break_separated_list[n]) - 20):
if n == 0:
i = x
else:
i = x + numpy.cumsum(map(len, time_break_separated_list))[n - 1]
# Don't continue graph if there's a break.
if time_list[i + 11] - time_list[i + 10] < 3000:
difficulty_graph.create_line(canvas_time_list[i + 10],
sum(canvas_distance_snap_list[i:i + 20]) / 20.0,
canvas_time_list[i + 11],
sum(canvas_distance_snap_list[i + 1:i + 21]) / 20.0,
fill="#990000")
# Draw yellow line graph, cumulative distance snap sum.
for i in range(len(distance_snap_list) - 1):
difficulty_graph.create_line(canvas_time_list[i],
150 - (149 * numpy.cumsum(distance_snap_list)[i] / sum(distance_snap_list)),
canvas_time_list[i + 1],
150 - (149 * numpy.cumsum(distance_snap_list)[i + 1] / sum(distance_snap_list)),
fill="#ffff00")
timer_line = difficulty_graph.create_line(0, 0, 0, 150, fill="#77ff77")
time_label = Tkinter.Label(root, fg="black")
time_label.pack()
distance_snap_label = Tkinter.Label(root, fg="black")
distance_snap_label.pack()
cumulative_label = Tkinter.Label(root, fg="black")
cumulative_label.pack()
progress_bar = ttk.Progressbar(root, orient="horizontal", length=200, mode="determinate")
progress_bar.pack()
progress_bar["maximum"] = 2
Tkinter.Button(root, fg="blue", text="Start Realtime!", command=first_load).pack()
Tkinter.Button(root, fg="red", text="Choose another map", command=stop).pack()
# If window is closed, stop the program.
root.protocol("WM_DELETE_WINDOW", kill)
root.mainloop()
| 39.201031
| 118
| 0.598028
| 984
| 7,605
| 4.426829
| 0.214431
| 0.085399
| 0.080808
| 0.068871
| 0.399679
| 0.297062
| 0.205923
| 0.115932
| 0.091139
| 0.033747
| 0
| 0.041096
| 0.289678
| 7,605
| 193
| 119
| 39.404145
| 0.765272
| 0.139645
| 0
| 0.049587
| 0
| 0
| 0.094229
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049587
| false
| 0
| 0.057851
| 0
| 0.107438
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05343aca0c5c82c59e3358b3b9d65dce1ef6b0de
| 806
|
py
|
Python
|
pyzfscmds/check.py
|
johnramsden/pyzfscmds
|
b5d430ffd0454bc6b09e256aeea67164714d9809
|
[
"BSD-3-Clause"
] | 9
|
2018-07-08T20:01:33.000Z
|
2022-03-29T11:31:51.000Z
|
pyzfscmds/check.py
|
johnramsden/pyzfscmds
|
b5d430ffd0454bc6b09e256aeea67164714d9809
|
[
"BSD-3-Clause"
] | 1
|
2019-07-10T12:16:53.000Z
|
2019-07-10T12:16:53.000Z
|
pyzfscmds/check.py
|
johnramsden/pyzfscmds
|
b5d430ffd0454bc6b09e256aeea67164714d9809
|
[
"BSD-3-Clause"
] | 5
|
2018-06-04T02:33:43.000Z
|
2020-05-25T22:48:58.000Z
|
"""
Startup checks
"""
import subprocess
import pyzfscmds.system.agnostic as zfssys
def is_root_on_zfs():
"""Check if running root on ZFS"""
system = zfssys.check_valid_system()
if system is None:
raise RuntimeError(f"System is not yet supported by pyzfscmds\n")
root_dataset = None
if zfssys.zfs_module_loaded() and zpool_exists():
root_dataset = zfssys.mountpoint_dataset("/")
if root_dataset is None:
raise RuntimeError("System is not booting off a ZFS root dataset\n")
return True
def zpool_exists() -> bool:
try:
subprocess.check_call(["zpool", "get", "-H", "version"],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
return False
return True
| 23.705882
| 77
| 0.66005
| 101
| 806
| 5.128713
| 0.504951
| 0.084942
| 0.034749
| 0.088803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.243176
| 806
| 33
| 78
| 24.424242
| 0.84918
| 0.05335
| 0
| 0.105263
| 0
| 0
| 0.141333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0536d3d2cb26fae2a4bb43f1a3c0258c006ca24c
| 2,015
|
py
|
Python
|
dist.py
|
dladustn95/Dialogue_generator
|
004fa49e3140e6c7ceb14448604c8aa45966f70d
|
[
"MIT"
] | 4
|
2020-09-03T03:39:53.000Z
|
2021-08-25T03:53:41.000Z
|
dist.py
|
dladustn95/Dialogue_generator
|
004fa49e3140e6c7ceb14448604c8aa45966f70d
|
[
"MIT"
] | null | null | null |
dist.py
|
dladustn95/Dialogue_generator
|
004fa49e3140e6c7ceb14448604c8aa45966f70d
|
[
"MIT"
] | 1
|
2020-09-04T07:04:50.000Z
|
2020-09-04T07:04:50.000Z
|
import sys
def distinct_1(path):
inFile = open(path, mode="r", encoding="utf8")
char_set = set()
all_unigram_count = 0
for line in inFile.readlines():
line = line.strip().split(" ")
for word in line:
char_set.add(word)
all_unigram_count += len(line)
distinct_unigram_count = len(char_set)
print("distinct_unigram: ", distinct_unigram_count)
print("all_unigram: ", all_unigram_count)
print("distinct 1: " + str(distinct_unigram_count / all_unigram_count))
inFile.close()
return distinct_unigram_count / all_unigram_count
sp="#####"
def distinct_2(path):
inFile = open(path, mode="r", encoding="utf8")
bichar_set = set()
all_bigram_count = 0
for line in inFile.readlines():
line = line.strip().split(" ")
char_len = len(line)
for idx in range(char_len - 1):
bichar_set.add(line[idx] + sp + line[idx + 1])
all_bigram_count += (char_len - 1)
distinct_bigram_count = len(bichar_set)
print("distinct_bigram: ", distinct_bigram_count)
print("all_bigram: ", all_bigram_count)
print("distinct 2: " + str(distinct_bigram_count / all_bigram_count))
inFile.close()
return distinct_bigram_count / all_bigram_count
def distinct_3(path):
inFile = open(path, mode="r", encoding="utf8")
bichar_set = set()
all_bigram_count = 0
for line in inFile.readlines():
line = line.strip().split(" ")
char_len = len(line)
for idx in range(char_len - 2):
bichar_set.add(line[idx] + sp + line[idx + 1] + sp + line[idx + 2])
all_bigram_count += (char_len -2)
distinct_bigram_count = len(bichar_set)
print("distinct_trigram: ", distinct_bigram_count)
print("all_trigram: ", all_bigram_count)
print("distinct 3: " + str(distinct_bigram_count / all_bigram_count))
inFile.close()
return distinct_bigram_count / all_bigram_count
distinct_1(sys.argv[1])
distinct_2(sys.argv[1])
distinct_3(sys.argv[1])
| 34.152542
| 79
| 0.655583
| 279
| 2,015
| 4.444444
| 0.143369
| 0.159677
| 0.112903
| 0.070968
| 0.737097
| 0.602419
| 0.545968
| 0.545968
| 0.446774
| 0.4
| 0
| 0.015843
| 0.216873
| 2,015
| 59
| 80
| 34.152542
| 0.769962
| 0
| 0
| 0.423077
| 0
| 0
| 0.074405
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.019231
| 0
| 0.134615
| 0.173077
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0537e1ab85799850e99a5e3c6bb0f22f481e1ab8
| 5,036
|
py
|
Python
|
Scripts/plot_PolarVortexStrength_PDFs.py
|
zmlabe/StratoVari
|
c5549f54482a2b05e89bded3e3b0b3c9faa686f3
|
[
"MIT"
] | 4
|
2019-11-23T19:44:21.000Z
|
2020-02-20T16:54:45.000Z
|
Scripts/plot_PolarVortexStrength_PDFs.py
|
zmlabe/StratoVari
|
c5549f54482a2b05e89bded3e3b0b3c9faa686f3
|
[
"MIT"
] | null | null | null |
Scripts/plot_PolarVortexStrength_PDFs.py
|
zmlabe/StratoVari
|
c5549f54482a2b05e89bded3e3b0b3c9faa686f3
|
[
"MIT"
] | 2
|
2019-06-21T19:27:55.000Z
|
2021-02-12T19:13:22.000Z
|
"""
Calculate PDFs for polar vortex response
Notes
-----
Author : Zachary Labe
Date : 25 June 2019
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
import datetime
import read_MonthlyData as MO
import calc_Utilities as UT
import cmocean
import scipy.stats as sts
### Define directories
directorydata = '/seley/zlabe/simu/'
directoryfigure = '/home/zlabe/Desktop/STRATOVARI/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Plotting PDF Polar Vortex Subsamples- %s----' % titletime)
### Alott time series (300 ensemble members)
year1 = 1701
year2 = 2000
years = np.arange(year1,year2+1,1)
###############################################################################
###############################################################################
###############################################################################
### Call arguments
varnames = ['U10']
period = 'JFM' # Enter temporal period (DJF,JFM,JFMA,ND)
simuh = 'Past' # Enter simulation time (Current,Past)
letters = [r'Mean',r'A',r'B',r'C']
###############################################################################
if simuh == 'Current':
simuq = 'Cu'
elif simuh == 'Past':
simuq = 'Pi'
else:
print(ValueError('Wrong simulation selected!'))
###############################################################################
###############################################################################
###############################################################################
### Call function for 4d variable data
lat,lon,lev,varfuture = MO.readExperiAll(varnames[0],'Future','surface')
lat,lon,lev,varpast = MO.readExperiAll(varnames[0],simuh,'surface')
### Create 2d array of latitude and longitude
lon2,lat2 = np.meshgrid(lon,lat)
### List of experiments
runs = [varfuture,varpast]
### Separate per monthly periods
if period == 'DJF':
varmo = np.empty((len(runs),varpast.shape[0]-1,varpast.shape[2],
varpast.shape[3]))
for i in range(len(runs)):
varmo[i,:,:,:] = UT.calcDecJanFeb(runs[i],runs[i],lat,
lon,'surface',17)
elif period == 'JFM':
varmo = np.empty((len(runs),varpast.shape[0],varpast.shape[2],
varpast.shape[3]))
for i in range(len(runs)):
varmo[i,:,:,:] = np.nanmean(runs[i][:,:3,:,:],axis=1)
elif period == 'JFMA':
varmo = np.empty((len(runs),varpast.shape[0],varpast.shape[2],
varpast.shape[3]))
for i in range(len(runs)):
varmo[i,:,:,:] = np.nanmean(runs[i][:,:4,:,:],axis=1)
elif period == 'ND':
varmo = np.empty((len(runs),varpast.shape[0],varpast.shape[2],
varpast.shape[3]))
for i in range(len(runs)):
varmo[i,:,:,:] = np.nanmean(runs[i][:,-2:,:,:],axis=1)
else:
ValueError('Wrong period selected! (DJF,JFM,JFMA,ND)')
### Remove missing data
varmo[np.where(varmo < -1e10)] = np.nan
###############################################################################
###############################################################################
###############################################################################
### Slice data for 60N
latq = np.where((lat >= 59.5) & (lat <= 60.5))[0]
latu = lat[latq].squeeze()
varmou = varmo[:,:,latq,:].squeeze()
### Calculate zonal mean
varmoz = np.nanmean(varmou[:,:,:],axis=2)
### Calculate anomalies
anom = varmoz[0,:] - varmoz[1,:]
### Remove nans
mask = ~np.isnan(anom)
anom = anom[mask]
### Fit a distribution
num_bins = np.arange(-50,50,1)
mA,sA = sts.norm.fit(anom[:100])
mB,sB = sts.norm.fit(anom[100:200])
mC,sC = sts.norm.fit(anom[200:])
mm,sm = sts.norm.fit(anom[:])
A = sts.norm.pdf(num_bins,mA,sA)
B = sts.norm.pdf(num_bins,mB,sB)
C = sts.norm.pdf(num_bins,mC,sC)
meann = sts.norm.pdf(num_bins,mm,sm)
plt.figure()
plt.plot(num_bins,A,color='darkblue',linewidth=2.0,label=r'A')
plt.plot(num_bins,B,color='darkgreen',linewidth=2.0,label=r'B')
plt.plot(num_bins,C,color='darkorange',linewidth=2.0,label=r'C')
plt.plot(num_bins,meann,color='k',linewidth=2.0,label=r'Mean',
linestyle='--',dashes=(1,0.3))
l = plt.legend(shadow=False,fontsize=7,loc='upper left',
fancybox=True,frameon=False,ncol=1,bbox_to_anchor=(0.72,1),
labelspacing=0.2,columnspacing=1,handletextpad=0.4)
for text in l.get_texts():
text.set_color('k')
### Statistical tests on distribution
tA,pA = sts.ks_2samp(A,meann)
tB,pB = sts.ks_2samp(B,meann)
tC,pC = sts.ks_2samp(C,meann)
print('\n\nP-value between A and mean --> %s!' % np.round(pA,4))
print('P-value between B and mean --> %s!' % np.round(pB,4))
print('P-value between C and mean --> %s!' % np.round(pC,4))
plt.savefig(directoryfigure + 'PDFs_PolarVortex_%s_%s.png' % \
(period,simuh),dpi=300)
| 34.027027
| 79
| 0.53475
| 642
| 5,036
| 4.158879
| 0.370717
| 0.053933
| 0.017978
| 0.022472
| 0.233333
| 0.138577
| 0.138577
| 0.138577
| 0.126592
| 0.126592
| 0
| 0.026763
| 0.146743
| 5,036
| 147
| 80
| 34.258503
| 0.594601
| 0.112788
| 0
| 0.141304
| 0
| 0
| 0.116022
| 0.015746
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076087
| 0
| 0.076087
| 0.054348
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
053869e3d79166cc0d895c117eef19a63bd977af
| 906
|
py
|
Python
|
test/test_airtunnel/operators/test_sql_helpers.py
|
joerg-schneider/airflow-bootstrap
|
bbed0a2d5addd0dd6221b75c06982f47e0d837d4
|
[
"MIT"
] | 23
|
2019-09-30T15:22:58.000Z
|
2021-04-09T10:53:23.000Z
|
test/test_airtunnel/operators/test_sql_helpers.py
|
joerg-schneider/airflow-bootstrap
|
bbed0a2d5addd0dd6221b75c06982f47e0d837d4
|
[
"MIT"
] | 1
|
2019-11-24T18:37:56.000Z
|
2019-11-24T18:37:56.000Z
|
test/test_airtunnel/operators/test_sql_helpers.py
|
joerg-schneider/airflow-bootstrap
|
bbed0a2d5addd0dd6221b75c06982f47e0d837d4
|
[
"MIT"
] | 4
|
2020-01-14T03:31:34.000Z
|
2021-05-07T21:34:22.000Z
|
import pytest
from airtunnel.operators.sql import sql_helpers
TEST_SCRIPT = "ddl/test_schema/test_table.sql"
@pytest.mark.parametrize(
argnames=("sql_path",),
argvalues=((TEST_SCRIPT,), ("/" + TEST_SCRIPT,), ((TEST_SCRIPT,),)),
)
def test_load_sql_script(sql_path: str):
# load with a single relative path
s = sql_helpers.load_sql_script(sql_path)
assert len(s) > 50
def test_split_sql_script():
sql_helpers.split_sql_script(sql_helpers.load_sql_script(TEST_SCRIPT))
def test_format_sql_script():
sql_helpers.format_sql_script(
sql_script=sql_helpers.load_sql_script(TEST_SCRIPT),
sql_params_dict={"idx_name": "i1", "idx_col": "c1"},
)
def test_prepare_sql_params(fake_airflow_context):
sql_helpers.prepare_sql_params(
compute_sql_params_function=lambda f: {"x": f["task_instance"]},
airflow_context=fake_airflow_context,
)
| 26.647059
| 74
| 0.728477
| 129
| 906
| 4.682171
| 0.372093
| 0.134106
| 0.139073
| 0.125828
| 0.319536
| 0.139073
| 0.139073
| 0.139073
| 0.139073
| 0
| 0
| 0.005202
| 0.151214
| 906
| 33
| 75
| 27.454545
| 0.780234
| 0.03532
| 0
| 0
| 0
| 0
| 0.082569
| 0.034404
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
053914ae8ca6bed144522d26cba1f2a52c6014f5
| 2,582
|
py
|
Python
|
EE475/Ch6P13.py
|
PhoeniXuzoo/NU-Projects
|
a217ad46e6876ceffb3dec1d6e52f775674b2e8b
|
[
"MIT"
] | null | null | null |
EE475/Ch6P13.py
|
PhoeniXuzoo/NU-Projects
|
a217ad46e6876ceffb3dec1d6e52f775674b2e8b
|
[
"MIT"
] | null | null | null |
EE475/Ch6P13.py
|
PhoeniXuzoo/NU-Projects
|
a217ad46e6876ceffb3dec1d6e52f775674b2e8b
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
## softmax: 0.1 600
## perceptron: 0.05 550
def readData(csvname):
data = np.loadtxt(csvname, delimiter=',')
x = data[:-1, :]
y = data[-1:, :]
return x, y
def softmaxCostFunc(x, y, w):
cost = np.sum(np.log(1 + np.exp(-y*np.transpose(np.dot(np.transpose(x), w)))))
return cost / float(np.size(y))
def gradientDescentOneStepForSoftmax(x, y, w, alpha=0.1):
total = np.zeros([9,1])
for i in range(np.size(y)):
power = np.exp(-y[:,i] * np.dot(x[:,i], w))
term = power / (1 + power)
total += term * y[:,i] * x[:,[i]]
w = w + alpha * (1/np.size(y)) * total
return w
def perceptronCostFunc(x, y, w):
cost = 0
a = (-y*np.transpose(np.dot(np.transpose(x), w)))[0]
for i in range(len(a)):
cost += a[i] if (a[i] > 0) else 0
return cost / float(np.size(y))
def gradientDescentOneStepForPerceptron(x, y, w, alpha=0.05):
total = np.zeros([9,1])
for i in range(np.size(y)):
term = -y[:,i] * np.dot(x[:,[i]].T, w)
total += 0 if term <= 0 else -y[:,i] * x[:,[i]]
w = w - alpha * (1/np.size(y)) * total
return w
if __name__ == "__main__":
csvname = 'breast_cancer_data.csv'
x, y = readData(csvname)
w = np.ones([x.shape[0] + 1, 1])
x = np.insert(x, 0, values=np.ones([1, x.shape[1]]), axis=0)
xSoftList = [0]
ySoftList = [softmaxCostFunc(x, y, w)]
for i in range(600):
w = gradientDescentOneStepForSoftmax(x, y, w)
xSoftList.append(i+1)
ySoftList.append(softmaxCostFunc(x, y, w))
yPredic = np.transpose(np.dot(np.transpose(x), w))
wrong = 0
for i in range(np.size(yPredic)):
if ((yPredic[0][i] > 0) != (y[0][i] > 0)):
wrong += 1
print("Softmax Wrong Prediction: ", wrong)
w = np.ones([x.shape[0], 1])
xPerceptronList = [0]
yPerceptronList = [perceptronCostFunc(x, y, w)]
for i in range(550):
w = gradientDescentOneStepForPerceptron(x, y, w)
xPerceptronList.append(i+1)
yPerceptronList.append(perceptronCostFunc(x, y, w))
plt.plot(xSoftList, ySoftList, label="Softmax Cost Function",color="#F08080")
plt.plot(xPerceptronList, yPerceptronList, label="Perceptro Cost Function")
plt.legend(loc="upper right")
plt.show()
plt.close()
yPredic = np.transpose(np.dot(np.transpose(x), w))
wrong = 0
for i in range(np.size(yPredic)):
if ((yPredic[0][i] > 0) != (y[0][i] > 0)):
wrong += 1
print("Perceptron Wrong Prediction: ", wrong)
| 29.011236
| 82
| 0.573199
| 390
| 2,582
| 3.769231
| 0.210256
| 0.016327
| 0.020408
| 0.052381
| 0.359184
| 0.338776
| 0.326531
| 0.253061
| 0.253061
| 0.212245
| 0
| 0.034712
| 0.241286
| 2,582
| 88
| 83
| 29.340909
| 0.715671
| 0.01433
| 0
| 0.272727
| 0
| 0
| 0.058268
| 0.008661
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075758
| false
| 0
| 0.030303
| 0
| 0.181818
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05399638e32621d9f8eab1ecc185a769af934b80
| 416
|
py
|
Python
|
square.py
|
Formalhalt/Phyton-Calculators
|
25f686e45a8333e9a141568c8f695350bde36bc6
|
[
"CC0-1.0"
] | null | null | null |
square.py
|
Formalhalt/Phyton-Calculators
|
25f686e45a8333e9a141568c8f695350bde36bc6
|
[
"CC0-1.0"
] | null | null | null |
square.py
|
Formalhalt/Phyton-Calculators
|
25f686e45a8333e9a141568c8f695350bde36bc6
|
[
"CC0-1.0"
] | null | null | null |
height = float(input("Enter height of the square: "))
width = float(input("Enter width of the Square: "))
perimeter = (2 * height) + (2 * width)
area = height * height
print("The perimeter of the square is", perimeter)
print("The area of the square is", area)
close = input("Press X to exit")
# The above line of code keeps the program open for the user to see the outcome of the problem.
| 23.111111
| 96
| 0.663462
| 65
| 416
| 4.246154
| 0.446154
| 0.09058
| 0.15942
| 0.094203
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00627
| 0.233173
| 416
| 17
| 97
| 24.470588
| 0.858934
| 0.223558
| 0
| 0
| 0
| 0
| 0.415282
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0541032df78b9eac36f755de81be4a580d936532
| 5,223
|
py
|
Python
|
src/AoC_2015/d24_sleigh_balance_subset_sum/sleigh_balance.py
|
derailed-dash/Advent-of-Code
|
12378baf33ef4a59958e84eb60e795b6530c22ba
|
[
"MIT"
] | 9
|
2021-12-31T20:13:03.000Z
|
2022-03-05T07:05:06.000Z
|
src/AoC_2015/d24_sleigh_balance_subset_sum/sleigh_balance.py
|
derailed-dash/Advent-of-Code
|
12378baf33ef4a59958e84eb60e795b6530c22ba
|
[
"MIT"
] | 1
|
2022-01-25T08:35:04.000Z
|
2022-01-29T00:07:00.000Z
|
src/AoC_2015/d24_sleigh_balance_subset_sum/sleigh_balance.py
|
derailed-dash/Advent-of-Code
|
12378baf33ef4a59958e84eb60e795b6530c22ba
|
[
"MIT"
] | null | null | null |
"""
Author: Darren
Date: 02/05/2021
Solving https://adventofcode.com/2015/day/24
We require three bags of equal weight.
Bag 1 in the passenger compartment, needs to have fewest packages.
Bags 2 and 3 to either side.
Solution:
Use subset sum function to work out which combinations of packages adds up to
total weight / number of bags (compartments).
The faster subsum is about 3x quicker than the version that uses itertools.combinations.
Once we have all combinations for the first bag, sort by the number of packages,
since we want the first bag to have fewest possible packages.
We don't care about what's in bags 2, 3...
I.e. because we know we will have valid combinations of packages that will add up to the same weight
"""
from __future__ import absolute_import
import logging
import os
import time
from math import prod
from itertools import combinations
# pylint: disable=logging-fstring-interpolation
SCRIPT_DIR = os.path.dirname(__file__)
INPUT_FILE = "input/input.txt"
SAMPLE_INPUT_FILE = "input/sample_input.txt"
def main():
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s:%(levelname)s:\t%(message)s")
# input_file = os.path.join(SCRIPT_DIR, SAMPLE_INPUT_FILE)
input_file = os.path.join(SCRIPT_DIR, INPUT_FILE)
with open(input_file, mode="rt") as f:
package_weights = [int(x) for x in f.read().splitlines()]
logging.info(f"Package weights: {package_weights}")
# Part 1
optimum_solution = distribute_packages(package_weights, 3)
logging.info(f"Solution found with QE {get_quantum_entanglement(optimum_solution)}")
logging.info(f"First bag: {optimum_solution}")
# Part 2
optimum_solution = distribute_packages(package_weights, 4)
logging.info(f"Solution found with QE {get_quantum_entanglement(optimum_solution)}")
logging.info(f"First bag: {optimum_solution}")
def distribute_packages(package_weights, number_of_bags) -> tuple:
logging.info(f"Solving for {number_of_bags} bags")
package_count = len(package_weights)
total_weight = sum(package_weights)
target_weight_per_bag = total_weight // number_of_bags
logging.debug(f"Total packages: {package_count}, with total weight: {total_weight}")
logging.debug(f"Target weight per bag: {target_weight_per_bag}")
# Get all combos for first bag.
# Sort by bags in the combo, since the first bag should have fewest packages.
first_bag_combos = faster_subset_sum(package_weights, target_weight_per_bag)
first_bag_combos = sorted(first_bag_combos, key=len)
# store first bag of optimum solution
optimum_solution = tuple()
for first_bag_combo in first_bag_combos:
# First bag must have smallest number of packages
# Skip any bag combos that have more packages than a previous solution
if len(optimum_solution) > 0:
if len(first_bag_combo) > len(optimum_solution):
continue
# if quantum entanglement of the first bag is higher than an existing solution,
# then skip it
if get_quantum_entanglement(first_bag_combo) >= get_quantum_entanglement(optimum_solution):
continue
optimum_solution = first_bag_combo
return optimum_solution
def get_quantum_entanglement(bag: tuple):
return prod(bag)
def faster_subset_sum(items: list, target: int, partial=[], results=[]) -> list:
"""
Determine all combinations of list items that add up to the target
Args:
numbers (list): A list of values
target (int): The total that the values need to add up to
partial (list, optional): Used by the function. Defaults to [].
results (list, optional): Used by the function. Defaults to [].
Returns:
list: The list of valid combinations
"""
total = sum(partial)
# check if the partial sum is equals to target, and if so
# add the current terms to the results list
if total == target:
results.append(partial)
# if the partial sum equals or exceed the target, no point in recursing through remaining terms.
if total >= target:
return []
for i, item in enumerate(items):
remaining_numbers = items[i + 1:]
faster_subset_sum(remaining_numbers, target, partial + [item], results)
return results
def simple_subset_sum(items, target: int) -> tuple:
""" Return a tuple of any combinations of items that adds up to the target
Args:
items (Sequence): List/set of items
target (int): The target sum to achieve
Yields:
Iterator[tuple]: Items that achieve the desired sum
"""
# Iterating through all possible subsets of collection from lengths 0 to n:
for i in range(len(items)+1):
for subset in combinations(items, i):
# printing the subset if its sum is x:
if sum(subset) == target:
yield subset
if __name__ == "__main__":
t1 = time.perf_counter()
main()
t2 = time.perf_counter()
print(f"Execution time: {t2 - t1:0.4f} seconds")
| 34.589404
| 103
| 0.681409
| 728
| 5,223
| 4.736264
| 0.296703
| 0.039443
| 0.020882
| 0.020882
| 0.180104
| 0.146172
| 0.11891
| 0.082367
| 0.059745
| 0.059745
| 0
| 0.008569
| 0.240283
| 5,223
| 150
| 104
| 34.82
| 0.860383
| 0.406663
| 0
| 0.098361
| 0
| 0
| 0.16522
| 0.05741
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081967
| false
| 0
| 0.098361
| 0.016393
| 0.245902
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0541425822ca873cc1104abcaddefbf0b86d3c05
| 8,946
|
py
|
Python
|
artap/algorithm_bayesopt.py
|
tamasorosz/artap
|
e8df160bfc9c378c3fc96b0b86e92d75d89cf26b
|
[
"MIT"
] | 5
|
2021-06-13T17:04:37.000Z
|
2022-03-04T17:16:06.000Z
|
artap/algorithm_bayesopt.py
|
tamasorosz/artap
|
e8df160bfc9c378c3fc96b0b86e92d75d89cf26b
|
[
"MIT"
] | null | null | null |
artap/algorithm_bayesopt.py
|
tamasorosz/artap
|
e8df160bfc9c378c3fc96b0b86e92d75d89cf26b
|
[
"MIT"
] | 8
|
2021-03-11T18:23:47.000Z
|
2022-02-22T11:13:23.000Z
|
from .problem import Problem
from .algorithm import Algorithm
from .config import artap_root
import time
import numpy as np
import os
import sys
sys.path.append(artap_root + os.sep + "lib" + os.sep)
import bayesopt
from multiprocessing import Process, Pipe, Queue, Manager
# from multiprocessing.managers import BaseManager
_l_type = ['L_FIXED', 'L_EMPIRICAL', 'L_DISCRETE', 'L_MCMC', 'L_ERROR']
_sc_type = ['SC_MTL', 'SC_ML', 'SC_MAP', 'SC_LOOCV', 'SC_ERROR']
_surr_name = ["sGaussianProcess", "sGaussianProcessML", "sGaussianProcessNormal", "sStudentTProcessJef", "sStudentTProcessNIG"]
# Python module to get run BayesOpt library in a OO pattern.
# The objective module should inherit this one and override evaluateSample.
class BayesOptContinuous(object):
# Let's define the vector.
#
# For different options: see vector.h and vector.cpp .
# If a parameter is not defined, it will be automatically set
# to a default value.
def __init__(self, n_dim):
## Library vector
self.params = {}
## n dimensions
self.n_dim = n_dim
## Lower bounds
self.lb = np.zeros((self.n_dim,))
## Upper bounds
self.ub = np.ones((self.n_dim,))
@property
def parameters(self):
return self.params
@parameters.setter
def parameters(self, params):
self.params = params
@property
def lower_bound(self):
return self.lb
@lower_bound.setter
def lower_bound(self, lb):
self.lb = lb
@property
def upper_bound(self):
return self.ub
@upper_bound.setter
def upper_bound(self, ub):
self.ub = ub
## Function for testing.
# It should be overriden.
def evaluateSample(self, x_in):
raise NotImplementedError("Please Implement this method")
## Main function. Starts the optimization process.
def optimize(self):
min_val, x_out, error = bayesopt.optimize(self.evaluateSample, self.n_dim,
self.lb, self.ub,
self.params)
return min_val, x_out, error
class BayesOpt(Algorithm):
""" BayesOpt algorithms """
def __init__(self, problem: Problem, name="BayesOpt"):
super().__init__(problem, name)
self.problem = problem
self.options.declare(name='l_type', default='L_EMPIRICAL', values=_l_type,
desc='Type of learning for the kernel params')
self.options.declare(name='sc_type', default='SC_MAP', values=_sc_type,
desc='Type of learning for the kernel params')
self.options.declare(name='n_iterations', default=50, lower=1,
desc='Maximum BayesOpt evaluations')
self.options.declare(name='init_method', default=1,
desc='Init method') # 1-LHS, 2-Sobol
self.options.declare(name='n_init_samples', default=10, lower=1,
desc='Number of samples before optimization')
self.options.declare(name='n_iter_relearn', default=10, lower=1,
desc='Number of samples before relearn kernel')
self.options.declare(name='surr_name', default='sGaussianProcessML', values=_surr_name,
desc='Name of the surrogate function')
self.options.declare(name='surr_noise', default=1e-10, lower=0.0,
desc='Variance of observation noise')
class BayesOptClassSerial(BayesOptContinuous):
def __init__(self, algorithm):
n = len(algorithm.problem.parameters)
super().__init__(n)
# algorithm
self.algorithm = algorithm
# Size design variables.
self.lb = np.empty((n,))
self.ub = np.empty((n,))
self.params = {}
def evaluateSample(self, x):
return self.algorithm.evaluator.evaluate_scalar(x)
class BayesOptSerial(BayesOpt):
""" BayesOpt algorithms """
def __init__(self, problem: Problem, name="BayesOpt"):
super().__init__(problem, name)
self.bo = BayesOptClassSerial(self)
def run(self):
# Figure out bounds vectors.
i = 0
for parameter in self.problem.parameters:
bounds = parameter['bounds']
self.bo.lb[i] = bounds[0]
self.bo.ub[i] = bounds[1]
i += 1
# set bayesopt
self.bo.params['n_iterations'] = self.options['n_iterations']
self.bo.params['n_init_samples'] = self.options['n_init_samples']
self.bo.params['n_iter_relearn'] = self.options['n_iter_relearn']
self.bo.params['surr_name'] = self.options['surr_name']
self.bo.params['surr_noise'] = self.options['surr_noise']
self.bo.params['init_method'] = self.options['init_method']
self.bo.params['l_type'] = self.options['l_type']
self.bo.params['sc_type'] = self.options['sc_type']
self.bo.params['verbose_level'] = self.options['verbose_level']
t_s = time.time()
self.problem.logger.info("BayesOpt: surr_name{}".format(self.options['surr_name']))
mvalue, x_out, error = self.bo.optimize()
t = time.time() - t_s
self.problem.logger.info("BayesOpt: elapsed time: {} s".format(t))
# sync changed individual informations
self.problem.data_store.sync_all()
if error != 0:
print('Optimization FAILED.')
print("Error", error)
print('-' * 35)
else:
pass
# print('Optimization Complete, %f seconds' % (clock() - start))
# print("Result", x_out, mvalue)
# print('-' * 35)
class BayesOptClassParallel(Process, BayesOptContinuous):
def __init__(self, pipe, algorithm):
n = len(algorithm.problem.parameters)
Process.__init__(self)
BayesOptContinuous.__init__(self, n)
# algorithm
self.algorithm = algorithm
# output
self.mvalue = -1.0
self.x_out = -1.0
self.error = 0
self.pipe = pipe
# Size design variables.
self.lb = np.empty((n,))
self.ub = np.empty((n,))
self.params = {}
def run(self):
mvalue, x_out, error = self.optimize()
self.pipe.send('STOP')
# set output values
self.mvalue = mvalue
self.x_out = x_out
self.error = error
# output
print("output")
print(self.mvalue)
print(self.x_out)
print(self.error)
def evaluateSample(self, x):
self.pipe.send(x)
result = self.pipe.recv()
return result
class BayesOptParallel(BayesOpt):
""" BayesOpt algorithms """
def __init__(self, problem: Problem, name="BayesOpt"):
super().__init__(problem, name)
self.pipe_par, self.pipe_child = Pipe()
self.bo = BayesOptClassParallel(self.pipe_child, self)
def worker(self, pipe):
x = None
while True:
x = pipe.recv()
if str(x) == 'STOP':
break
result = self.bo.job.evaluate_scalar(x)
pipe.send(result)
def run(self):
# Figure out bounds vectors.
i = 0
for parameter in self.problem.parameters:
bounds = parameter['bounds']
self.bo.lb[i] = bounds[0]
self.bo.ub[i] = bounds[1]
i += 1
# set bayesopt
self.bo.params['n_iterations'] = self.options['n_iterations']
self.bo.params['n_init_samples'] = self.options['n_init_samples']
self.bo.params['n_iter_relearn'] = self.options['n_iter_relearn']
self.bo.params['surr_name'] = self.options['surr_name']
self.bo.params['surr_noise'] = self.options['surr_noise']
self.bo.params['init_method'] = self.options['init_method']
self.bo.params['l_type'] = self.options['l_type']
self.bo.params['sc_type'] = self.options['sc_type']
self.bo.params['verbose_level'] = self.options['verbose_level']
# process = Process(target=self.worker, args=(self.pipe_par, self.problem, ))
process = Process(target=self.worker, args=(self.pipe_par, ))
self.bo.start()
process.start()
self.bo.join()
process.join()
print(self.bo.mvalue)
print(self.bo.x_out)
print(self.bo.error)
print()
print(self.problem.data_store, len(self.problem.populations[-1].individuals))
# self.result = self.mvalue
"""
if self.bo.error != 0:
print('Optimization FAILED.')
print("Error", self.bo.error)
print('-' * 35)
else:
print('Optimization Complete, %f seconds' % (clock() - start))
print("Result", self.bo.x_out, self.bo.mvalue)
print('-' * 35)
"""
| 31.611307
| 127
| 0.59099
| 1,064
| 8,946
| 4.81109
| 0.198308
| 0.041024
| 0.042196
| 0.034382
| 0.43993
| 0.383473
| 0.368236
| 0.352999
| 0.352999
| 0.316273
| 0
| 0.006405
| 0.284485
| 8,946
| 283
| 128
| 31.611307
| 0.793314
| 0.110999
| 0
| 0.331395
| 0
| 0
| 0.143709
| 0.002914
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0.005814
| 0.052326
| 0.023256
| 0.238372
| 0.069767
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0552a237d536bb49e4a74fe8039eabfd37370524
| 1,596
|
py
|
Python
|
main.py
|
WillyHHsu/rest
|
1adba475579cb2c0f9b8690b7f822c02b483146a
|
[
"MIT"
] | null | null | null |
main.py
|
WillyHHsu/rest
|
1adba475579cb2c0f9b8690b7f822c02b483146a
|
[
"MIT"
] | null | null | null |
main.py
|
WillyHHsu/rest
|
1adba475579cb2c0f9b8690b7f822c02b483146a
|
[
"MIT"
] | null | null | null |
import os
from fastapi import FastAPI
from fastapi_sqlalchemy import DBSessionMiddleware
from fastapi_sqlalchemy import db
from dotenv import load_dotenv
from sqlalchemy import schema
from db import models as db_model
from schemas import models as schema
load_dotenv()
POSTGRES_USER = os.getenv('POSTGRES_USER')
POSTGRES_PASSWORD = os.getenv('POSTGRES_PASSWORD')
POSTGRES_DB = os.getenv('POSTGRES_DB')
POSTGRES_URL = os.getenv('POSTGRES_URL')
POSTGRES_PORT = os.getenv('POSTGRES_PORT', 5432)
app = FastAPI(
title="API REST",
description="Uma API REST by WillyHHsu",
)
app.add_middleware(
DBSessionMiddleware,
db_url=f"postgresql://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_URL}:{POSTGRES_PORT}/{POSTGRES_DB}"
)
@app.get("/users")
def get_users():
users = db.session.query(db_model.Player).all()
return users
@app.post("/tournament",
summary='Cadastra um novo torneio',
response_model=schema.Tournament)
def new_tournament(tornament_request: schema.Tournament):
db.session.add(db_model.Tornament(tornament_request))
db.session.commit()
return schema.Tournament(**tornament_request)
@app.post("/tournament/{id_tournament}/competitor",
summary='Cadastra um novo competidor')
def new_tournament(id_tournament):
return db.session.query(db_model.Tournament).filter(id_tournament=id_tournament).first()
@app.get("/tournament/{id_tournament}/match",
summary='Lista as partidas de um torneio')
def list_match(id_tournament):
return db.session.query(db_model.Game).filter(id_tournament=id_tournament).all()
| 29.555556
| 107
| 0.759398
| 211
| 1,596
| 5.545024
| 0.303318
| 0.082051
| 0.068376
| 0.041026
| 0.135897
| 0.066667
| 0.066667
| 0.066667
| 0
| 0
| 0
| 0.002869
| 0.126566
| 1,596
| 53
| 108
| 30.113208
| 0.836442
| 0
| 0
| 0
| 0
| 0
| 0.226817
| 0.102757
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0.04878
| 0.195122
| 0.04878
| 0.390244
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05546175c9355e358802def95353b9059d638d79
| 866
|
py
|
Python
|
src/compas_blender/utilities/data.py
|
KEERTHANAUDAY/compas
|
4d1101cf302f95a4472a01a1265cc64eaec6aa4a
|
[
"MIT"
] | null | null | null |
src/compas_blender/utilities/data.py
|
KEERTHANAUDAY/compas
|
4d1101cf302f95a4472a01a1265cc64eaec6aa4a
|
[
"MIT"
] | null | null | null |
src/compas_blender/utilities/data.py
|
KEERTHANAUDAY/compas
|
4d1101cf302f95a4472a01a1265cc64eaec6aa4a
|
[
"MIT"
] | null | null | null |
import bpy
__all__ = [
"delete_all_data",
]
def delete_all_data():
"""Delete all collections, mesh and curve objects, meshes, curves, materials."""
for collection in bpy.data.collections:
bpy.data.collections.remove(collection)
for obj in bpy.data.objects:
if obj.type == 'MESH':
bpy.data.objects.remove(obj)
elif obj.type == 'CURVE':
bpy.data.objects.remove(obj)
for mesh in bpy.data.meshes:
bpy.data.meshes.remove(mesh)
for curve in bpy.data.curves:
bpy.data.curves.remove(curve)
for material in bpy.data.materials:
bpy.data.materials.remove(material)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == '__main__':
pass
| 27.0625
| 84
| 0.51963
| 93
| 866
| 4.666667
| 0.27957
| 0.177419
| 0.103687
| 0.092166
| 0.105991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.202079
| 866
| 31
| 85
| 27.935484
| 0.628075
| 0.274827
| 0
| 0.1
| 0
| 0
| 0.051613
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0.05
| 0.05
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
055668b6a61ba32a80522c93f3aa4dbcf035bb7b
| 2,335
|
py
|
Python
|
teams_to_tsv.py
|
FSU-ACM-OSSG/Contest-Server
|
f9aabd9742a6aa78cbefc685fd2760a1f83d7721
|
[
"MIT"
] | 8
|
2019-01-13T21:57:53.000Z
|
2021-11-29T12:32:48.000Z
|
teams_to_tsv.py
|
FSU-ACM-OSSG/Contest-Server
|
f9aabd9742a6aa78cbefc685fd2760a1f83d7721
|
[
"MIT"
] | 73
|
2018-02-13T00:58:39.000Z
|
2022-02-10T11:59:53.000Z
|
teams_to_tsv.py
|
FSU-ACM-OSSG/Contest-Server
|
f9aabd9742a6aa78cbefc685fd2760a1f83d7721
|
[
"MIT"
] | 4
|
2018-02-08T18:56:54.000Z
|
2019-02-13T19:01:53.000Z
|
##############
# team_to_tsv script
# Creates two tsv files for importing into domjudge
# Team info gets stored inside teams.tsv in the following format
# <team_id(int)> <external_id> <category_id> <team_name>
# Account info gets stored inside acccounts.tsv in the following format
# team <team-name> <user-name> <password> <teamid>
#
# Import teams.tsv first, then accounts.tsv
#
# NOTE 1 : Domjudge doesn't insert teams with ID < 1
from app.models.Team import *
with open("teams.tsv", "w+") as teams_tsv, \
open("accounts.tsv", "w+") as accounts_tsv:
# Headers requiered by domjudge
teams_tsv.write("teams\t1\n")
accounts_tsv.write("accounts\t1\n")
walkin_counter = 1
for team in Team.objects.all():
# Only make 100 walk-in accounts
if walkin_counter > 101:
break;
# Accounts that are not in use are assigned to walk-ins
if team.team_name is None:
team.team_name = "".join(("Walk-in-", str(walkin_counter)))
walkin_counter += 1
# Empty team names are assign a dummy value
if team.team_name.isspace():
team.team_name = "UnnamedTeam"
# Avoiding team number 0, refer to NOTE 1 in the header
if team.teamID == "acm-0":
continue
teams_tsv.write(u"\t".join(
[team.teamID.strip("acm-"), # To only get ID number
team.teamID, # Set to external ID for exporting
"2", # Category ID of Participants Category - See footnote
team.team_name.strip('\t'), # So tabs in team_name don't interfere
'\n']))
accounts_tsv.write(u"\t".join(
["team",
team.team_name.strip('\t'), # So tabs in team_name don't interfere
'{0}-{1}'.format('team', team.teamID.split('-')[1].zfill(3)),
team.domPass,
# team.teamID.strip("acm-"), # To only get ID number
'\n']))
#
# FOOTNOTE: Team Category
#
# This value determines the team_category. Domjudge's defaults are:
# 1 -> System
# 2 -> Self-Registered
# 3 -> Jury
#
# Since System and Jury are meant for admin, we assign teams to being
# "self-registered" because you can't self-register for our contests
# anyway, and this is easier than making you create a new category first.
#
| 36.484375
| 77
| 0.614561
| 330
| 2,335
| 4.272727
| 0.4
| 0.056738
| 0.059574
| 0.028369
| 0.177305
| 0.177305
| 0.116312
| 0.116312
| 0.116312
| 0.066667
| 0
| 0.013411
| 0.265525
| 2,335
| 63
| 78
| 37.063492
| 0.808746
| 0.525054
| 0
| 0.142857
| 0
| 0
| 0.099057
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.035714
| 0.035714
| 0
| 0.035714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
055a29385d9e76d3a424d3a90ed95bbdc4015019
| 4,906
|
py
|
Python
|
cleverapi/clever_api.py
|
oncecreated/cleverapi
|
39b41860604a909d3e5262c1c795c0741570a653
|
[
"MIT"
] | 13
|
2018-06-30T14:16:42.000Z
|
2020-03-04T20:23:47.000Z
|
cleverapi/clever_api.py
|
oncecreated/cleverapi
|
39b41860604a909d3e5262c1c795c0741570a653
|
[
"MIT"
] | 11
|
2018-09-09T09:54:27.000Z
|
2019-04-15T13:40:19.000Z
|
cleverapi/clever_api.py
|
oncecreated/cleverapi
|
39b41860604a909d3e5262c1c795c0741570a653
|
[
"MIT"
] | 14
|
2018-07-24T17:38:56.000Z
|
2020-03-04T20:24:12.000Z
|
import hashlib
import json
import uuid
import requests
import aiohttp
from .exceptions import ApiResponseError
from .action import Action
class BaseCleverApi():
def __init__(self, access_token, version="5.73"):
self.access_token = access_token
self.api_version = version
self.device_id = uuid.uuid4().hex[:16]
self.api_host = "api.vk.com"
def fetch(self, method, data=None):
if data is None:
data = {}
return method, data
def get_longpoll(self, owner_id, video_id):
data = {"owner_id": owner_id, "video_id": video_id}
return self.fetch("video.getLongPollServer", data)
def get_start_data(self):
data = {
"build_ver": "503028",
"need_leaderboard": "0",
"func_v": "6",
"lang": "ru",
"https": "1"
}
return self.fetch("execute.getStartData", data)
def get_user(self):
return self.fetch("users.get")
def get_hash(self, additional: list, user_id):
ids = "".join(map(str, additional)) + "3aUFMZGRCJ"
ids_hash = hashlib.md5(ids.encode()).hexdigest()
user = str(int(user_id) ^ 202520)
user_hash = hashlib.md5(user.encode()).hexdigest()
device = str(self.device_id) + "0MgLscD6R3"
device_hash = hashlib.md5(device.encode()).hexdigest()
return "{}#{}#{}".format(ids_hash, user_hash, device_hash)
def bump(self, lat, lon):
data = {"lat": lat, "lon": lon, "prod": 1, "func_v": 1}
return self.fetch("execute.bump", data)
def send_action(self, *, action_id: Action, user_id):
secure_hash = self.get_hash([action_id.value], user_id)
data = {"action_id": action_id.value, "hash": secure_hash}
return self.fetch("streamQuiz.trackAction", data)
def send_answer(self, *, coins_answer: bool, game_id, answer_id, question_id, user_id):
secure_hash = self.get_hash([game_id, question_id], user_id)
data = {
"answer_id": answer_id,
"question_id": question_id,
"device_id": self.device_id,
"hash": secure_hash,
}
if coins_answer:
data["coins_answer"] = True
return self.fetch("streamQuiz.sendAnswer", data)
def get_gifts(self):
return self.fetch("execute.getGifts")
def purchase_gift(self, gift_id):
data = {"gift_id": gift_id}
return self.fetch("streamQuiz.purchaseGift", data)
def get_daily_rewards(self):
return self.fetch("streamQuiz.getDailyRewardsData")
def get_train_questions(self):
return self.fetch("streamQuiz.getTrainQuestions")
def use_extra_life(self):
return self.fetch("streamQuiz.useExtraLife")
def get_nearby_users(self, lat, lon):
data = {"lat": lat, "lon": lon}
return self.fetch("execute.getNearbyUsers", data)
def comment(self, *, owner_id, video_id, message):
data = {
"owner_id": owner_id,
"video_id": video_id,
"message": message
}
return self.fetch("execute.createComment", data)
class CleverApi(BaseCleverApi):
def __init__(self, access_token, version="5.73"):
super().__init__(access_token, version=version)
self.session = requests.Session()
self.session.headers.update({
"User-Agent": "Клевер/2.3.3 (Redmi Note 5; "
"Android 28; VK SDK 1.6.8; com.vk.quiz)".encode(
"utf-8")
})
def fetch(self, method, data=None):
if data is None:
data = {}
data.update({
"access_token": self.access_token,
"v": self.api_version,
"lang": "ru",
"https": 1
})
url = f"https://{self.api_host}/method/{method}"
content = self.session.post(url, data=data).json()
error = content.get("error")
if error is not None:
raise ApiResponseError(json.dumps(content))
return content["response"]
class AsyncCleverApi(BaseCleverApi):
def __init__(self, access_token, connector, version="5.73"):
super().__init__(access_token, version=version)
self.connector = connector
async def fetch(self, method, data=None):
if data is None:
data = {}
data.update({
"access_token": self.access_token,
"v": self.api_version,
"lang": "ru",
"https": 1
})
url = f"https://{self.api_host}/method/{method}"
async with self.connector.session.post(url, data=data) as response:
content = await response.json()
error = content.get("error")
if error is not None:
raise ApiResponseError(json.dumps(content))
return content["response"]
| 28.858824
| 91
| 0.584183
| 580
| 4,906
| 4.75
| 0.244828
| 0.047187
| 0.07078
| 0.054446
| 0.422505
| 0.324138
| 0.311434
| 0.291833
| 0.272958
| 0.22069
| 0
| 0.013691
| 0.285365
| 4,906
| 169
| 92
| 29.029586
| 0.772105
| 0
| 0
| 0.319672
| 0
| 0
| 0.148797
| 0.043416
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155738
| false
| 0
| 0.057377
| 0.040984
| 0.377049
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
055ac96948dda92e22c15b66cc5f914681a2cae3
| 5,350
|
py
|
Python
|
blagging/views.py
|
androiddrew/blag-fork
|
249144c9a017581a6c5e387f5d86f33421d82ae3
|
[
"MIT"
] | null | null | null |
blagging/views.py
|
androiddrew/blag-fork
|
249144c9a017581a6c5e387f5d86f33421d82ae3
|
[
"MIT"
] | 7
|
2017-01-03T15:34:30.000Z
|
2017-07-13T15:27:08.000Z
|
blagging/views.py
|
androiddrew/blag-fork
|
249144c9a017581a6c5e387f5d86f33421d82ae3
|
[
"MIT"
] | null | null | null |
from datetime import datetime as dt
from flask import render_template, redirect, request, url_for, abort
from flask_login import login_user, logout_user, login_required, current_user, login_url
from . import app, db, login_manager
from .models import Post, Tag, Author, tags as Post_Tag
from .forms import LoginForm, PostForm
# Auth#################
@login_manager.user_loader
def load_user(userid):
return Author.query.get(int(userid))
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = Author.get_by_username(form.username.data)
if user is not None and user.check_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('index'))
return render_template('login.html', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
# MAIN##############
@app.route('/')
@app.route('/page/<int:page_num>')
def index(page_num=1):
query = Post.query.filter(Post.published == True)
pagination = query.order_by(Post.date.desc()).paginate(page=page_num, per_page=app.config['POST_PER_PAGE'],
error_out=True)
return render_template('blog.html', pagination=pagination, authors=Author.query.all())
@app.route('/post/<slug>', methods=['GET', 'POST'])
def post(slug):
post = Post.query.filter_by(_display_title=slug).filter(Post.published == True).first_or_404()
return render_template('post.html', post=post)
@app.route('/tag/<name>')
@app.route('/tag/<name>/<int:page_num>')
def tag(name, page_num=1):
tag = Tag.query.filter_by(name=name).first_or_404()
query = Post.query.join(Post_Tag).join(Tag).filter(Tag.id == tag.id).filter(Post.published == True)
pagination = query.filter(Post.published == True).order_by(Post.date.desc()).paginate(page=page_num,
per_page=app.config[
'POST_PER_PAGE'],
error_out=True)
return render_template('tag.html', pagination=pagination, tag=tag)
@app.route('/author/<display_name>')
def user(display_name):
user = Author.query.filter_by(display_name=display_name).first_or_404()
return render_template('author.html', author=user)
@app.route('/add', methods=['GET', 'POST'])
@login_required
def add():
form = PostForm()
if form.validate_on_submit():
title = form.title.data
short_desc = form.short_desc.data
body = form.body.data
tags = form.tags.data
published = form.published.data
post = Post(author=current_user, title=title, display_title=title, short_desc=short_desc, body=body, tags=tags,
published=published)
with db.session.no_autoflush:
db.session.add(post)
db.session.commit()
return redirect(url_for('index'))
return render_template('post_form.html', form=form)
@app.route('/edit')
@login_required
def edit():
posts = Post.query.filter(Post.author_id == current_user.id).order_by(Post.date.desc()).all()
return render_template('edit_list.html', posts=posts)
@app.route('/edit/<int:post_id>', methods=['GET', 'POST'])
@login_required
def edit_post(post_id):
post = Post.query.get_or_404(post_id)
if current_user != post.author:
abort(403)
form = PostForm(obj=post, post_id=post.id)
if form.validate_on_submit():
form.populate_obj(post)
db.session.commit()
return redirect(url_for('index'))
return render_template('post_form.html', form=form)
@app.route('/preview', methods=['GET', 'POST'])
@login_required
def preview_post():
result = request.get_json(force=True)
form_data = dict()
form_data['date'] = dt.utcnow()
form_data['author'] = current_user
for field in result:
form_data[field['name']] = field['value']
form_data['tags'] = form_data.get('tags').split(',')
return render_template('post_preview.html', post=form_data)
# MAIN OTHER###########
@app.errorhandler(403)
def page_not_found(e):
return render_template('403.html'), 403
@app.errorhandler(404) # bluprintname.app_errorhandler will register for the entire app when using blueprints
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def server_error(e):
app.logger.error('Server Error: {}'.format(e))
return render_template('500.html'), 500
@app.context_processor
def inject_tags():
"""context_processor similar to the app_context_processor for blueprints"""
return dict(all_tags=Tag.all, tags_count=Tag.tag_count)
@app.context_processor
def inject_recent_posts():
"""context_processor similar to the app_context_processor for blueprints for recent posts"""
return dict(recent_posts=Post.recent)
@app.context_processor
def inject_auth_url():
return dict(auth_url=login_url)
@app.template_filter('strftime')
def _jinja2_filter_datetime(date, fmt=None):
if fmt is None:
fmt = '%Y-%m-%d'
return date.strftime(fmt)
| 33.647799
| 119
| 0.65271
| 715
| 5,350
| 4.688112
| 0.197203
| 0.054296
| 0.071599
| 0.027446
| 0.318914
| 0.246718
| 0.170048
| 0.170048
| 0.148568
| 0.148568
| 0
| 0.010576
| 0.204673
| 5,350
| 158
| 120
| 33.860759
| 0.777203
| 0.049159
| 0
| 0.181034
| 0
| 0
| 0.082687
| 0.009541
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155172
| false
| 0.008621
| 0.051724
| 0.034483
| 0.387931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
055cc455230997c5276c879e8d734a4e3c932b7e
| 1,652
|
py
|
Python
|
g13gui/g13/manager_tests.py
|
jtgans/g13gui
|
aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc
|
[
"MIT"
] | 3
|
2021-10-16T01:28:24.000Z
|
2021-12-07T21:49:54.000Z
|
g13gui/g13/manager_tests.py
|
jtgans/g13gui
|
aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc
|
[
"MIT"
] | 12
|
2021-05-09T16:57:18.000Z
|
2021-06-16T19:20:57.000Z
|
g13gui/g13/manager_tests.py
|
jtgans/g13gui
|
aa07ee91b0fd89eb8d9991291e11ca3a97ca11cc
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import unittest
import time
import usb.util
from g13gui.observer.observer import ObserverTestCase
from g13gui.model.prefs import Preferences
from g13gui.g13.manager import DeviceManager
from g13gui.g13.manager import LCD_BUFFER_SIZE
class DeviceManagerTests(ObserverTestCase):
def setUp(self):
prefs = Preferences()
self.m = DeviceManager(prefs)
self.m.start()
while self.m.state != DeviceManager.State.FOUND:
time.sleep(1)
self.assertEqual(self.m.state, DeviceManager.State.FOUND)
def tearDown(self):
self.m.shutdown()
self.m.join()
def testLeds(self):
for i in range(0, 17):
self.m.setLedsMode(i)
def testBacklight(self):
for i in range(0, 256):
self.m.setBacklightColor(i, 0, 0)
for i in range(0, 256):
self.m.setBacklightColor(0, i, 0)
for i in range(0, 256):
self.m.setBacklightColor(0, 0, i)
for i in range(0, 256):
self.m.setBacklightColor(i, i, 0)
for i in range(0, 256):
self.m.setBacklightColor(0, i, i)
for i in range(0, 256):
self.m.setBacklightColor(i, 0, i)
for i in range(0, 256):
self.m.setBacklightColor(i, i, i)
def testLCD(self):
whiteBuffer = [0x5A] * LCD_BUFFER_SIZE
blackBuffer = [0xA5] * LCD_BUFFER_SIZE
for i in range(1, 10):
self.m.setLCDBuffer(whiteBuffer)
time.sleep(0.5)
self.m.setLCDBuffer(blackBuffer)
time.sleep(0.5)
if __name__ == '__main__':
unittest.main()
| 24.656716
| 65
| 0.598668
| 218
| 1,652
| 4.472477
| 0.270642
| 0.082051
| 0.055385
| 0.101538
| 0.427692
| 0.374359
| 0.286154
| 0.286154
| 0.286154
| 0.286154
| 0
| 0.054608
| 0.290557
| 1,652
| 66
| 66
| 25.030303
| 0.777304
| 0.009685
| 0
| 0.195652
| 0
| 0
| 0.004893
| 0
| 0
| 0
| 0.004893
| 0
| 0.021739
| 1
| 0.108696
| false
| 0
| 0.152174
| 0
| 0.282609
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
055df8a4d5bc728dd507e18c15a01996fcd7eeb9
| 754
|
py
|
Python
|
mpikat/utils/unix_socket.py
|
ewanbarr/mpikat
|
1c9a7376f9e79dfeec5a151d8f483d6fdf3e7cc9
|
[
"MIT"
] | 2
|
2018-11-12T12:17:27.000Z
|
2019-02-08T15:44:14.000Z
|
mpikat/utils/unix_socket.py
|
ewanbarr/mpikat
|
1c9a7376f9e79dfeec5a151d8f483d6fdf3e7cc9
|
[
"MIT"
] | 3
|
2018-08-03T12:05:20.000Z
|
2018-08-03T12:13:53.000Z
|
mpikat/utils/unix_socket.py
|
ewanbarr/mpikat
|
1c9a7376f9e79dfeec5a151d8f483d6fdf3e7cc9
|
[
"MIT"
] | 4
|
2019-01-21T16:31:34.000Z
|
2019-12-03T09:27:15.000Z
|
import socket
import logging
log = logging.getLogger('mpikat.utils.unix_socket')
class UDSClient(object):
def __init__(self, socket_name):
self._socket_name = socket_name
self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
self._sock.connect(self._socket_name)
except Exception:
log.exception("Unable to connect to Unix domain socket {}".format(
self._socket_name))
self._sock.settimeout(2)
def close(self):
self._sock.close()
def send(self, message):
message += "\r\n"
self._sock.sendall(message)
def recv(self, maxsize=8192, timeout=2):
self._sock.settimeout(2)
return self._sock.recv(maxsize)
| 26.928571
| 78
| 0.635279
| 93
| 754
| 4.913978
| 0.419355
| 0.122538
| 0.122538
| 0.078775
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012456
| 0.254642
| 754
| 27
| 79
| 27.925926
| 0.800712
| 0
| 0
| 0.095238
| 0
| 0
| 0.092838
| 0.03183
| 0
| 0
| 0
| 0
| 0
| 1
| 0.190476
| false
| 0
| 0.095238
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0560aa251cb9f57348aa3861ec51b4ed5e27e782
| 1,021
|
py
|
Python
|
mlearn/static/py/funcs.py
|
achandir/django-machine-learning-beta
|
9604953addee0c1bea90d308b4248a69d332f5a8
|
[
"BSD-3-Clause"
] | null | null | null |
mlearn/static/py/funcs.py
|
achandir/django-machine-learning-beta
|
9604953addee0c1bea90d308b4248a69d332f5a8
|
[
"BSD-3-Clause"
] | null | null | null |
mlearn/static/py/funcs.py
|
achandir/django-machine-learning-beta
|
9604953addee0c1bea90d308b4248a69d332f5a8
|
[
"BSD-3-Clause"
] | null | null | null |
from django.core.files.storage import FileSystemStorage
from django.conf import settings
import os
class OverwriteStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
# If the filename already exists, remove it as if it was a true file system
if self.exists(name):
os.remove(os.path.join(settings.MEDIA_ROOT, name))
return name
class StrToList:
def strtolist(string):
'''
Transforms the string stored by Prepross model to list
'''
to_rem = ['[',
']',
'[]',
',']
string = string.replace(" ", "").split("'")
for i in to_rem:
try:
string = list(filter((i).__ne__, string))
except:
pass
return string
| 30.029412
| 84
| 0.539667
| 112
| 1,021
| 4.830357
| 0.616071
| 0.036969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.369246
| 1,021
| 33
| 85
| 30.939394
| 0.840062
| 0.233105
| 0
| 0
| 0
| 0
| 0.010072
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0.047619
| 0.142857
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
056594b9b59d36dfeef52d15b7455e3dcb8e0bf9
| 1,362
|
py
|
Python
|
federateme.py
|
elitest/federateme.py
|
887d27ddae814d7ed03fd7c993493d927d2492d5
|
[
"Unlicense"
] | null | null | null |
federateme.py
|
elitest/federateme.py
|
887d27ddae814d7ed03fd7c993493d927d2492d5
|
[
"Unlicense"
] | null | null | null |
federateme.py
|
elitest/federateme.py
|
887d27ddae814d7ed03fd7c993493d927d2492d5
|
[
"Unlicense"
] | 1
|
2021-04-13T20:02:14.000Z
|
2021-04-13T20:02:14.000Z
|
#!/usr/bin/env python3
import boto.utils, json, requests
def detect_ec2():
try:
r = requests.get('http://169.254.169.254/latest/meta-data/ami-id')
print(r)
# probably should check for something in the response here.
return True
except:
return False
def gen_link():
s = json.dumps({'sessionId': boto.utils.get_instance_metadata()['identity-credentials']['ec2']['security-credentials']['ec2-instance']['AccessKeyId'],
'sessionKey': boto.utils.get_instance_metadata()['identity-credentials']['ec2']['security-credentials']['ec2-instance']['SecretAccessKey'],
'sessionToken': boto.utils.get_instance_metadata()['identity-credentials']['ec2']['security-credentials']['ec2-instance']['Token']})
r = requests.get("https://signin.aws.amazon.com/federation", params={'Action': 'getSigninToken', 'SessionDuration': 7200, 'Session': s})
t = r.json()
rs = requests.Request('GET', 'https://signin.aws.amazon.com/federation',
params={'Action': 'login', 'Issuer': 'Internet Widgets Pty.', 'Destination': 'https://console.aws.amazon.com/', 'SigninToken': t['SigninToken']})
l = rs.prepare()
return l.url
if detect_ec2():
print(gen_link())
else:
print("This is not an AWS instance. Please run on an AWS EC2 instance.")
| 41.272727
| 175
| 0.642438
| 162
| 1,362
| 5.339506
| 0.537037
| 0.09711
| 0.041619
| 0.069364
| 0.388439
| 0.388439
| 0.388439
| 0.388439
| 0.388439
| 0.277457
| 0
| 0.023297
| 0.180617
| 1,362
| 32
| 176
| 42.5625
| 0.751792
| 0.058003
| 0
| 0
| 0
| 0
| 0.440281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.045455
| 0
| 0.272727
| 0.136364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
056746e5dbf852638494e8c736e9cb3208ccd43b
| 1,964
|
py
|
Python
|
recycler.py
|
LAION-AI/crawlingathome
|
43a477777fb403046d67224747cde1dac9f2094a
|
[
"MIT"
] | 11
|
2021-06-02T03:46:52.000Z
|
2021-09-11T22:19:12.000Z
|
recycler.py
|
LAION-AI/crawlingathome
|
43a477777fb403046d67224747cde1dac9f2094a
|
[
"MIT"
] | 9
|
2021-06-14T07:46:20.000Z
|
2021-08-28T22:50:46.000Z
|
recycler.py
|
LAION-AI/crawlingathome
|
43a477777fb403046d67224747cde1dac9f2094a
|
[
"MIT"
] | 7
|
2021-06-01T11:59:36.000Z
|
2022-03-20T13:44:18.000Z
|
import numpy as np
from requests import session
from .core import CPUClient, GPUClient, HybridClient
from .temp import TempCPUWorker
from .errors import *
# Dump a client's attributes into a dictionary so that it can be used remotely.
def dump(c):
try:
return {
"_type": c.type,
"url": c.url,
"token": c.token,
"nickname": c.nickname,
"shard": c.shard if hasattr(c, 'shard') else None,
"start_id": str(c.start_id) if hasattr(c, 'start_id') else None,
"end_id": str(c.end_id) if hasattr(c, 'end_id') else None,
"shard_piece": c.shard_piece if hasattr(c, 'shard_piece') else None,
"wat": c.wat if hasattr(c, 'wat') else None,
"shards": c.shards if hasattr(c, 'shards') else None
}
except AttributeError as e:
raise DumpError(f"[crawling@home] unable to dump client: {e}")
# Load an existing client using its attributes. It's best to load using an existing dumpClient(): `loadClient(**dump)`
def load(_type=None, url=None, token=None, nickname=None, shard=None,
start_id=None, end_id=None, shard_piece=None, wat=None, shards=None):
if _type == "HYBRID":
c = HybridClient(*[None] * 2, _recycled=True)
elif _type == "CPU":
c = CPUClient(*[None] * 2, _recycled=True)
elif _type == "GPU":
c = GPUClient(*[None] * 2, _recycled=True)
elif _type == "FULLWAT":
c = TempCPUWorker(url, nickname, _recycled=True)
else:
raise ValueError(f"Invalid worker type: {_type}")
c.s = session()
c.type = _type
c.url = url
c.token = token
c.nickname = nickname
c.shard = shard
c.start_id = start_id if isinstance(start_id, np.int64) else np.int64(start_id)
c.end_id = end_id if isinstance(end_id, np.int64) else np.int64(end_id)
c.shard_piece = shard_piece
c.wat = wat
c.shards = shards
return c
| 35.709091
| 118
| 0.614562
| 282
| 1,964
| 4.159574
| 0.280142
| 0.047741
| 0.051151
| 0.043478
| 0.098039
| 0.098039
| 0
| 0
| 0
| 0
| 0
| 0.007591
| 0.26222
| 1,964
| 54
| 119
| 36.37037
| 0.801932
| 0.098778
| 0
| 0
| 0
| 0
| 0.106395
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.111111
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0567803d049b2b08966e5134ef97c6b64fdfc130
| 1,921
|
py
|
Python
|
config.py
|
uncharted-distil/distil-auto-ml
|
244661942cff11617c81830d7f58a9f9b5c9499d
|
[
"Apache-2.0"
] | 2
|
2019-06-20T23:32:10.000Z
|
2021-01-24T22:32:07.000Z
|
config.py
|
uncharted-distil/distil-auto-ml
|
244661942cff11617c81830d7f58a9f9b5c9499d
|
[
"Apache-2.0"
] | 157
|
2019-04-09T18:40:42.000Z
|
2021-05-06T13:44:33.000Z
|
config.py
|
uncharted-distil/distil-auto-ml
|
244661942cff11617c81830d7f58a9f9b5c9499d
|
[
"Apache-2.0"
] | 1
|
2019-07-12T22:17:46.000Z
|
2019-07-12T22:17:46.000Z
|
import os
DB_LOCATION = os.getenv("DB_URI", "test.db")
# Debug flag to output more verbose logging
# - defaults to False
DEBUG = os.getenv("DEBUG", False)
# Configurable output directory for saving machine learning model pickles
# - defaults to ../output
OUTPUT_DIR = os.getenv("OUTPUT_DIR", "output")
# Port to make worker service available on
PORT = os.getenv("PORT", "45042")
# Configurable filename for output logs
LOG_FILENAME = os.getenv("LOG_FILENAME", "distil-auto-ml.log")
# User agent to supply to TA3 Systems
SERVER_USER_AGENT = "qntfy_ta2"
# Primitives static file directory
D3MSTATICDIR = os.getenv("D3MSTATICDIR", "/static")
# Enable GPU pipelines - "auto" will try to detect, "true" and "false" will force
GPU = os.getenv("GPU", "auto")
# Batch size to apply to primitives where feasible
REMOTE_SENSING_BATCH_SIZE = int(os.getenv("REMOTE_SENSING_BATCH_SIZE", 128))
# Solution serach progress update message interval in seconds
PROGRESS_INTERVAL = float(os.getenv("PROGRESS_INTERVAL", 10.0))
# maximum number of augment columns to support
AUG_MAX_COLS = int(os.getenv("AUG_MAX_COLS", 50))
# maximum number of augment rows to support
AUG_MAX_ROWS = int(os.getenv("AUG_MAX_ROWS", 50000))
# maximum amount of time for hyperparam tuning in seconds
TIME_LIMIT = int(os.getenv("TIME_LIMIT", 600))
# use untuned/internally tuned pipelines (faster) or external tuning (better results)
HYPERPARAMETER_TUNING = os.getenv("HYPERPARAMETER_TUNING", "True") == "True"
# controls parallelism within primitives - defaults to the number of CPUs
N_JOBS = int(os.getenv("N_JOBS", -1))
# enable use of mlp classifier + gradcam visualization
MLP_CLASSIFIER = os.getenv("MLP_CLASSIFIER", "False") == "True"
# whether or not received features for remote sensing are pooled or not
IS_POOLED = os.getenv("POOL_FEATURES", "True") == "True"
COMPUTE_CONFIDENCES = os.getenv("COMPUTE_CONFIDENCES", "False") == "False"
| 34.303571
| 85
| 0.753774
| 278
| 1,921
| 5.07554
| 0.464029
| 0.096386
| 0.038979
| 0.031184
| 0.024096
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015634
| 0.134305
| 1,921
| 55
| 86
| 34.927273
| 0.832832
| 0.476835
| 0
| 0
| 0
| 0
| 0.296146
| 0.046653
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.052632
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0569e6f550e0e8fb6bd11e2714deff2f7f71997f
| 2,274
|
py
|
Python
|
common/settings.py
|
hehanlin/jobbole
|
46d5fa26cfa1ebd5c6c3621f615ffecbb4152fa9
|
[
"BSD-3-Clause"
] | 2
|
2018-01-18T09:16:16.000Z
|
2022-02-12T08:59:23.000Z
|
common/settings.py
|
hehanlin/jobbole
|
46d5fa26cfa1ebd5c6c3621f615ffecbb4152fa9
|
[
"BSD-3-Clause"
] | null | null | null |
common/settings.py
|
hehanlin/jobbole
|
46d5fa26cfa1ebd5c6c3621f615ffecbb4152fa9
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
os_env = os.environ
class Config(object):
COMMON_PATH = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(COMMON_PATH, os.pardir))
DATABASE_URL = "postgresql://he:he@localhost:5432/jobbole"
class CeleryConfig(object):
BROKER_URL = 'redis://he@127.0.0.1:6379/0' # 指定 Broker
CELERY_RESULT_BACKEND = 'redis://he@127.0.0.1:6379/1' # 指定 Backend
CELERY_TIMEZONE = 'Asia/Shanghai' # 指定时区,默认是 UTC
CELERY_ENABLE_UTC = True
CELERY_TASK_SERIALIZER = 'msgpack' # 任务序列化和反序列化 ls: json yaml msgpack pickle(不推荐)
CELERY_RESULT_SERIALIZER = 'json' # 读取任务结果一般性能要求不高,所以使用了可读性更好的JSON
CELERY_TASK_RESULT_EXPIRES = 60 * 60 * 24 # 任务过期时间,不建议直接写86400,应该让这样的magic数字表述更明显
CELERY_IMPORTS = ( # 指定导入的任务模块
)
# logging
LoggingConfig = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"simple": {
"format": "%(asctime)s- %(module)s:%(lineno)d [%(levelname)1.1s] %(name)s: %(message)s",
'datefmt': '%Y/%m/%d %H:%M:%S'
}
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"level": "DEBUG",
"formatter": "simple",
"stream": "ext://sys.stdout"
},
"info_file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "INFO",
"formatter": "simple",
"filename": Config.PROJECT_ROOT + '/jobbole_info.log',
"maxBytes": 10485760,
"backupCount": 20,
"encoding": "utf8"
},
"error_file_handler": {
"class": "logging.handlers.RotatingFileHandler",
"level": "ERROR",
"formatter": "simple",
"filename": Config.PROJECT_ROOT + '/jobbole_error.log',
"maxBytes": 10485760,
"backupCount": 20,
"encoding": "utf8"
}
},
"loggers": {
"my_module": {
"level": "ERROR",
"handlers": ["info_file_handler"],
"propagate": False
}
},
"root": {
"level": "INFO",
"handlers": ["console", "info_file_handler", "error_file_handler"]
}
}
| 30.72973
| 100
| 0.554969
| 226
| 2,274
| 5.40708
| 0.5
| 0.045008
| 0.036825
| 0.02455
| 0.297872
| 0.266776
| 0.266776
| 0
| 0
| 0
| 0
| 0.038841
| 0.286719
| 2,274
| 73
| 101
| 31.150685
| 0.71455
| 0.08883
| 0
| 0.238095
| 0
| 0.015873
| 0.369238
| 0.113052
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.031746
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
056ef751fabceeae1db74a620559c093e5b86dfa
| 10,935
|
py
|
Python
|
load-testing/locustfile.py
|
MaksimAniskov/aws-global-odoo
|
0f225a2f4ede3215264fd3d3912fa7b4e87d4a8f
|
[
"MIT"
] | null | null | null |
load-testing/locustfile.py
|
MaksimAniskov/aws-global-odoo
|
0f225a2f4ede3215264fd3d3912fa7b4e87d4a8f
|
[
"MIT"
] | 1
|
2022-01-26T08:58:34.000Z
|
2022-01-26T08:58:34.000Z
|
load-testing/locustfile.py
|
MaksimAniskov/aws-global-odoo
|
0f225a2f4ede3215264fd3d3912fa7b4e87d4a8f
|
[
"MIT"
] | null | null | null |
from locust import HttpUser, task, between
import re
import random
import json
import os
class OdooUser:
if os.environ.get('HOST'):
host = os.environ.get('HOST')
wait_time = between(20, 40)
def on_start(self):
response = self.client.get("/web/login")
assert response.status_code == 200
csrf_token = re.search(
r'input type="hidden" name="csrf_token" value="(.+)"', response.text).group(1)
response = self.client.post(
"/web/login", data={
"csrf_token": csrf_token,
"login": os.environ.get('ODOO_USER_NAME'),
"password": os.environ.get('ODOO_USER_PASSWORD'),
"redirect": ""
})
assert response.status_code == 200
response = self.client.get("/web")
assert response.status_code == 200
session_info = re.search(
r'odoo.session_info\s*=\s*(.+);', response.text).groups(1)[0]
session_info = json.loads(session_info)
self.thecontext = {
"uid": session_info['uid'],
"company_id": session_info['company_id'],
"allowed_company_ids": [session_info['company_id']],
"lang": session_info['user_context']['lang'],
"tz": session_info['user_context']['tz']
}
response = self.client.get(
f'/web/webclient/load_menus/${session_info["cache_hashes"]["load_menus"]}')
assert response.status_code == 200
response = json.loads(response.content)
crm_menu = next(
filter(lambda item: item['name'] == 'CRM', response['children']))
self.crm_action_id = int(crm_menu['action'].split(',')[1])
self.call_jsonrpc(
"/web/dataset/call_kw/res.users/systray_get_activities",
model="res.users",
method="systray_get_activities",
kwargs={"context": self.thecontext},
args=[]
)
response = self.client.get(
"/web/image?model=res.users", params={'field': 'image_128', 'id': self.thecontext['uid']})
assert response.status_code == 200
response = self.call_action(
"/web/action/run", action_id=self.crm_action_id)
result = json.loads(response.content)['result']
self.thecontext.update(result['context'])
def call_jsonrpc(self, url, **params):
response = self.client.post(
url,
json={
"id": random.randrange(10000000000),
"params": {**params},
"jsonrpc": "2.0", "method": "call"
}
)
assert response.status_code == 200
response = json.loads(response.content)
assert 'error' not in response
return response['result']
def call_action(self, url, action_id):
response = self.client.post(
url,
json={
"id": random.randrange(10000000000),
"params": {
"action_id": action_id,
},
"jsonrpc": "2.0", "method": "call"
}
)
assert response.status_code == 200
assert 'error' not in json.loads(response.content)
return response
class OdooUserCrmKanban(OdooUser, HttpUser):
@task
def crm_kanban(self):
self.call_action("/web/action/run", action_id=self.crm_action_id)
domain = [
"&",
["type", "=", "opportunity"],
["user_id", "=", self.thecontext['uid']]
]
self.call_jsonrpc(
"/web/dataset/call_kw/crm.lead/read_progress_bar",
model="crm.lead", method="read_progress_bar",
kwargs={
"domain": domain,
"group_by": "stage_id",
"progress_bar": {
"field": "activity_state",
"colors": {
"planned": "success",
"today": "warning",
"overdue": "danger"
},
"sum_field": "expected_revenue",
"modifiers": {}
}
},
args=[]
)
result = self.call_jsonrpc(
"/web/dataset/call_kw/crm.lead/web_read_group",
model="crm.lead", method="web_read_group",
kwargs={
"domain": domain,
"fields": [
"stage_id",
"color",
"priority",
"expected_revenue",
"kanban_state",
"activity_date_deadline",
"user_email",
"user_id",
"partner_id",
"activity_summary",
"active",
"company_currency",
"activity_state",
"activity_ids",
"name",
"tag_ids",
"activity_exception_decoration",
"activity_exception_icon"
],
"groupby": ["stage_id"],
"orderby": "",
"lazy": True
},
args=[]
)
for group in result['groups']:
result = self.call_jsonrpc(
"/web/dataset/search_read",
model="crm.lead",
domain=[
"&", ["stage_id", "=", group['stage_id'][0]],
"&", ["type", "=", "opportunity"],
["user_id", "=", self.thecontext['uid']]
],
fields=[
"stage_id",
"color",
"priority",
"expected_revenue",
"kanban_state",
"activity_date_deadline",
"user_email",
"user_id",
"partner_id",
"activity_summary",
"active",
"company_currency",
"activity_state",
"activity_ids",
"name",
"tag_ids",
"activity_exception_decoration",
"activity_exception_icon"
],
limit=80,
sort="",
context={
"bin_size": True
}
)
# TODO: /web/dataset/call_kw/crm.tag/read
# TODO: /web/dataset/call_kw/crm.stage/read
class OdooUserCrmLeadCreate(OdooUser, HttpUser):
@task
def crm_lead_create(self):
partners = self.call_jsonrpc(
"/web/dataset/call_kw/res.partner/name_search",
model="res.partner", method="name_search",
kwargs={
"name": "",
"args": ["|", ["company_id", "=", False], ["company_id", "=", 1]],
"operator": "ilike",
"limit": 8
},
args=[]
)
random_partner_id = random.choice(partners)[0]
result = self.call_jsonrpc(
"/web/dataset/call_kw/crm.lead/onchange",
model="crm.lead", method="onchange",
kwargs={},
args=[
[],
{
"partner_id": random_partner_id,
"company_id": self.thecontext['company_id'],
"user_id": self.thecontext['uid'],
"team_id": self.thecontext['default_team_id'],
"name": False,
"email_from": False,
"phone": False,
"expected_revenue": 0,
"priority": "0",
"company_currency": 1,
"type": "opportunity",
"partner_name": False,
"contact_name": False,
"country_id": False,
"state_id": False,
"city": False,
"street": False,
"street2": False,
"zip": False,
"mobile": False,
"website": False,
"function": False,
"title": False
},
"partner_id",
{
"partner_id": "1",
"name": "",
"email_from": "",
"phone": "1",
"expected_revenue": "",
"priority": "",
"company_currency": "",
"company_id": "1",
"user_id": "1",
"team_id": "",
"type": "1",
"partner_name": "",
"contact_name": "",
"country_id": "1",
"state_id": "",
"city": "",
"street": "",
"street2": "",
"zip": "1",
"mobile": "1",
"website": "",
"function": "",
"title": ""
}
]
)
partner = result['value']
partner['id'] = random_partner_id
result = self.call_jsonrpc(
"/web/dataset/call_kw/crm.lead/create",
model="crm.lead", method="create",
kwargs={},
args=[{
"type": "opportunity",
"expected_revenue": random.randrange(1000, 1000000, 1000),
"company_id": self.thecontext['company_id'],
"user_id": self.thecontext['uid'],
"team_id": self.thecontext['default_team_id'],
"priority": "0",
"partner_id": partner['id'],
"name": partner.get('name', False),
"email_from": partner.get('email_from', False),
"phone": partner.get('phone', False),
"partner_name": partner.get('partner_name', False),
"contact_name": partner.get('contact_name', False),
"country_id": partner['country_id'][0],
"state_id": partner['state_id'][0],
"city": partner.get('city', False),
"street": partner.get('street', False),
"street2": partner.get('street2', False),
"zip": partner.get('zip', False),
"function": partner.get('function', False),
"title": partner.get('title', False)
}]
)
if result % 100 == 0:
print('CRM lead id created:', result)
if __name__ == "__main__":
from locust.env import Environment
my_env = Environment(user_classes=[OdooUserCrmKanban])
OdooUserCrmKanban(my_env).run()
| 34.714286
| 102
| 0.438317
| 921
| 10,935
| 4.990228
| 0.205212
| 0.036554
| 0.031332
| 0.02785
| 0.396649
| 0.321149
| 0.304395
| 0.271758
| 0.256963
| 0.248695
| 0
| 0.016062
| 0.424966
| 10,935
| 314
| 103
| 34.824841
| 0.714854
| 0.007407
| 0
| 0.340351
| 0
| 0
| 0.227629
| 0.053636
| 0
| 0
| 0
| 0.003185
| 0.031579
| 1
| 0.017544
| false
| 0.003509
| 0.021053
| 0
| 0.059649
| 0.003509
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05702fee1b4a5bd092fcebf23643ddbeb574cdf2
| 939
|
py
|
Python
|
code/model/testSpeedPolar.py
|
PBarde/IBoatPIE
|
dd8038f981940b732be979b49e9b14102c3d4cca
|
[
"MIT"
] | 1
|
2018-02-22T15:38:01.000Z
|
2018-02-22T15:38:01.000Z
|
code/model/testSpeedPolar.py
|
PBarde/IBoatPIE
|
dd8038f981940b732be979b49e9b14102c3d4cca
|
[
"MIT"
] | null | null | null |
code/model/testSpeedPolar.py
|
PBarde/IBoatPIE
|
dd8038f981940b732be979b49e9b14102c3d4cca
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 13 18:03:27 2017
@author: paul
"""
from SimulatorTLKT import Boat
from SimulatorTLKT import FIT_VELOCITY
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from math import pi
matplotlib.rcParams.update({'font.size': 22})
pOfS=np.arange(0,360,0.5)
wMags=np.arange(0,25,2)
polars=[]
legends=[]
fig=plt.figure()
for mag in wMags:
pol=[]
legends.append('Wind mag = '+str(mag) + ' m/s')
for p in pOfS :
pol.append(Boat.getDeterDyn(p,mag,FIT_VELOCITY))
polars.append(list(pol))
ax=plt.polar(pOfS*pi/180,pol,label=str(mag) + ' m/s')
#plt.legend(legends)
plt.legend(bbox_to_anchor=(1.1,1), loc=2, borderaxespad=0.)
#plt.xlabel('Polar plot of Boat velocity [m/s] wrt. point of sail [deg]',fontsize=22)
#ax.xaxis.set_label_position('top')
fig.savefig('../../../Article/Figures/polar_modified2.pdf', bbox_inches='tight')
| 25.378378
| 85
| 0.690096
| 154
| 939
| 4.155844
| 0.590909
| 0.009375
| 0.071875
| 0.025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045623
| 0.136315
| 939
| 36
| 86
| 26.083333
| 0.743527
| 0.246006
| 0
| 0
| 0
| 0
| 0.110632
| 0.063218
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05705dae303e8a7ae7b9765283158fc78c1a5987
| 3,387
|
py
|
Python
|
src/mcxlib/usage_examples.py
|
carlashley/meecxprofile
|
1fe776b3f23dd9b224d87dd155cc1681cf13fb5e
|
[
"Apache-2.0"
] | 2
|
2021-09-10T12:52:43.000Z
|
2021-09-10T15:38:29.000Z
|
src/mcxlib/usage_examples.py
|
carlashley/meecxprofile
|
1fe776b3f23dd9b224d87dd155cc1681cf13fb5e
|
[
"Apache-2.0"
] | null | null | null |
src/mcxlib/usage_examples.py
|
carlashley/meecxprofile
|
1fe776b3f23dd9b224d87dd155cc1681cf13fb5e
|
[
"Apache-2.0"
] | null | null | null |
from pprint import pformat
ds_obj_mcx_note = ('The MCX data returned back from \'dscl\' is a string nested in the attribute queried.\n'
'Settings can be filtered by using key filters.\n'
'Multiple values can be filtered for specific domains by comma seperating the values\n'
'Filter syntax examples:\n'
' - \'com.apple.MCX=\' will keep the preference domain \'com.apple.MCX\'.\n'
' - \'com.apple.MCX=com.apple.cachedaccounts.CreateAtLogin\' will keep the preference\n'
' domain value from the \'com.apple.MCX\' preference domain _specifically_.\n'
' - \'com.apple.MCX=com.apple.cachedaccounts.CreateAtLogin,com.apple.cachedaccounts.WarnOnCreate\'\n'
' will keep the two values for the \'com.apple.MCX\' preference domain.\n'
'Please note that filtering values is only done if the preference domain is also specified\n\n'
'In the example dictionary below:\n'
' - \'com.apple.MCX\' is referred to as the \'preference domain\'.\n'
' - \'com.apple.cachedaccounts.CreateAtLogin\' is referred to as the \'preference domain value\'.\n'
' This domain value should be taken from the \'mcx_preference_settings\' dictionary.\n\n')
ds_obj_mcx_dict_example = {'com.apple.MCX': {'Forced': [{'mcx_preference_settings': {'com.apple.cachedaccounts.CreateAtLogin': True,
'com.apple.cachedaccounts.CreatePHDAtLogin': False,
'com.apple.cachedaccounts.WarnOnCreate': False}}]},
'com.apple.dock': {'Forced': [{'mcx_preference_settings': {'AppItems-Raw': [],
'DocItems-Raw': [],
'contents-immutable': False,
'static-only': False},
'mcx_union_policy_keys': [{'mcx_input_key_names': ['AppItems-Raw'],
'mcx_output_key_name': 'static-apps',
'mcx_remove_duplicates': True},
{'mcx_input_key_names': ['DocItems-Raw'],
'mcx_output_key_name': 'static-others',
'mcx_remove_duplicates': True},
{'mcx_input_key_names': ['MCXDockSpecialFolders-Raw'],
'mcx_output_key_name': 'MCXDockSpecialFolders',
'mcx_remove_duplicates': True}]}]}}
ds_obj_mcx = f'{ds_obj_mcx_note}{pformat(ds_obj_mcx_dict_example)}'
| 91.540541
| 138
| 0.437851
| 281
| 3,387
| 5.096085
| 0.330961
| 0.089385
| 0.061453
| 0.03352
| 0.282821
| 0.243017
| 0.166201
| 0.120112
| 0
| 0
| 0
| 0
| 0.472394
| 3,387
| 36
| 139
| 94.083333
| 0.801343
| 0
| 0
| 0.0625
| 0
| 0
| 0.379392
| 0.10127
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.03125
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0571570e4ea6cc0ac98e3e348473a3292c2d2151
| 797
|
py
|
Python
|
program_param.py
|
duszek123/Example_Project
|
72e65ce5f31774c250cf388dbfb0a6d2a6b3ffeb
|
[
"MIT"
] | null | null | null |
program_param.py
|
duszek123/Example_Project
|
72e65ce5f31774c250cf388dbfb0a6d2a6b3ffeb
|
[
"MIT"
] | null | null | null |
program_param.py
|
duszek123/Example_Project
|
72e65ce5f31774c250cf388dbfb0a6d2a6b3ffeb
|
[
"MIT"
] | null | null | null |
import torch
import cv2
#data dir with train i validation picture
data_dir = '/home/pawel/Pulpit/picture_data'
#source video stream
camera_source = '/dev/video2'
#flag, false, not used
save = False
#input picture size (px)
input_size = (224,224)
size_pict = input_size[0]
#part of the data from the database intended for training
batch_size = 8
#numb of process core
num_workers = 4
#numb of train epoch
epoch_num = 2
#old variable not use
frame_iterator = 0
#flag, not use
flag_start = False
#use device in project - cpu or gpu(cuda)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#using video stream in project
video_stream = vid = cv2.VideoCapture(camera_source)
if not video_stream.isOpened():
raise ValueError("Unable to open video source", camera_source)
| 24.151515
| 71
| 0.756587
| 130
| 797
| 4.515385
| 0.569231
| 0.074957
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022255
| 0.154329
| 797
| 32
| 72
| 24.90625
| 0.848665
| 0.378921
| 0
| 0
| 0
| 0
| 0.161826
| 0.064315
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0572b494de8de54123140e45c9c69a2ed0fbad3b
| 501
|
py
|
Python
|
models/fields/__init__.py
|
hengwei-chan/3D_SBDD
|
eda6d51aaf01ef25581a46920a25161678fab76d
|
[
"MIT"
] | 67
|
2021-12-02T05:53:44.000Z
|
2022-03-31T07:21:26.000Z
|
models/fields/__init__.py
|
hengwei-chan/3D_SBDD
|
eda6d51aaf01ef25581a46920a25161678fab76d
|
[
"MIT"
] | 13
|
2021-12-05T14:23:46.000Z
|
2022-03-25T21:07:20.000Z
|
models/fields/__init__.py
|
hengwei-chan/3D_SBDD
|
eda6d51aaf01ef25581a46920a25161678fab76d
|
[
"MIT"
] | 16
|
2022-01-11T11:48:24.000Z
|
2022-03-27T19:20:58.000Z
|
from .classifier import SpatialClassifier
def get_field(config, num_classes, num_indicators, in_channels):
if config.name == 'classifier':
return SpatialClassifier(
num_classes = num_classes,
num_indicators = num_indicators,
in_channels = in_channels,
num_filters = config.num_filters,
k = config.knn,
cutoff = config.cutoff,
)
else:
raise NotImplementedError('Unknown field: %s' % config.name)
| 31.3125
| 68
| 0.628743
| 51
| 501
| 5.941176
| 0.490196
| 0.09901
| 0.128713
| 0.151815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.293413
| 501
| 15
| 69
| 33.4
| 0.855932
| 0
| 0
| 0
| 0
| 0
| 0.053892
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
0572d30a3c1b204b7741919022f74dedf09c6c6c
| 1,693
|
py
|
Python
|
get_data/__init__.py
|
BrunoASNascimento/inmet_api
|
ec663543b1f6a77900166df2e6bf64d1f26f910d
|
[
"MIT"
] | null | null | null |
get_data/__init__.py
|
BrunoASNascimento/inmet_api
|
ec663543b1f6a77900166df2e6bf64d1f26f910d
|
[
"MIT"
] | null | null | null |
get_data/__init__.py
|
BrunoASNascimento/inmet_api
|
ec663543b1f6a77900166df2e6bf64d1f26f910d
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
import requests
import pandas as pd
def cleaner_data(data):
columns = ['ESTACAO', 'LATITUDE', 'LONGITUDE', 'ALTITUDE', 'ANO', 'MES', 'DIA', 'HORA', 'TEMP', 'TMAX', 'TMIN', 'UR', 'URMAX', 'URMIN',
'TD', 'TDMAX', 'TDMIN', 'PRESSAONNM', 'PRESSAONNM_MAX', 'PRESSAONNM_MIN', 'VELVENTO', 'DIR_VENTO', 'RAJADA', 'RADIACAO', 'PRECIPITACAO']
df = pd.DataFrame(columns=columns)
for i in range(1, len(data)):
try:
dado = [data[i].split(' ')]
dado = pd.DataFrame(dado, columns=columns)
# print(dado)
df = df.append(dado)
except:
pass
str_float = ['LATITUDE', 'LONGITUDE', 'ALTITUDE',
'TEMP', 'TMAX', 'TMIN', 'UR', 'URMAX', 'URMIN',
'TD', 'TDMAX', 'TDMIN',
'PRESSAONNM', 'PRESSAONNM_MAX',
'PRESSAONNM_MIN', 'VELVENTO', 'DIR_VENTO',
'RAJADA', 'RADIACAO', 'PRECIPITACAO']
str_int = ['ANO', 'MES', 'DIA', 'HORA']
df[str_float] = df[str_float].astype('float')
df[str_int] = df[str_int].astype('int64')
print(df.head)
def get_data():
date_now = datetime.utcnow()
date_delta = date_now - timedelta(days=1)
date_str = date_delta.strftime("%Y%m%d")
for hour in range(0, 24):
print(hour)
url = ("http://master.iag.usp.br/fig_dados/OBSERVACAO/INMET/UND_inmet_" +
str(date_str)+str(hour).zfill(2)+"00.txt")
# print(url)
response = requests.request("GET", url)
data = response.text.split('\n')
print(len(data))
cleaner_data(data)
return data
cleaner_data(get_data())
| 32.557692
| 151
| 0.559362
| 200
| 1,693
| 4.605
| 0.46
| 0.021716
| 0.032573
| 0.02823
| 0.247557
| 0.247557
| 0.247557
| 0.247557
| 0.247557
| 0.247557
| 0
| 0.008026
| 0.264028
| 1,693
| 51
| 152
| 33.196078
| 0.73114
| 0.012995
| 0
| 0
| 0
| 0
| 0.244005
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0.026316
| 0.078947
| 0
| 0.157895
| 0.078947
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
057648a66341634f2bd91398e33248914e65d08f
| 435
|
py
|
Python
|
src/pynorare/cli_util.py
|
concepticon/pynorare
|
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
|
[
"MIT"
] | null | null | null |
src/pynorare/cli_util.py
|
concepticon/pynorare
|
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
|
[
"MIT"
] | 5
|
2020-07-20T11:05:07.000Z
|
2022-03-11T15:51:52.000Z
|
src/pynorare/cli_util.py
|
concepticon/pynorare
|
3cf5ea2d1597c5acc84963f781ff49d96b4d7e02
|
[
"MIT"
] | null | null | null |
from pyconcepticon import Concepticon
from pynorare.dataset import get_dataset_cls
def add_datasets(parser):
parser.add_argument(
'dataset',
nargs='+',
help='select your dataset',
type=str)
def iter_datasets(args):
for dsid in args.dataset:
cls = get_dataset_cls(args.api.datasets[dsid].path.parent)
yield cls(repos=args.norarepo, concepticon=Concepticon(args.repos.repos))
| 24.166667
| 81
| 0.691954
| 55
| 435
| 5.345455
| 0.545455
| 0.102041
| 0.088435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.206897
| 435
| 17
| 82
| 25.588235
| 0.852174
| 0
| 0
| 0
| 0
| 0
| 0.062069
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
057dcb0e3d38cc7460f6b046f1c4949c4d391cb9
| 2,478
|
py
|
Python
|
sktime/transformations/hierarchical/tests/test_aggregate.py
|
biologioholic/sktime
|
9d0391a04b11d22bd783b452f01aa5b4529b41a2
|
[
"BSD-3-Clause"
] | 1
|
2021-12-22T02:45:39.000Z
|
2021-12-22T02:45:39.000Z
|
sktime/transformations/hierarchical/tests/test_aggregate.py
|
biologioholic/sktime
|
9d0391a04b11d22bd783b452f01aa5b4529b41a2
|
[
"BSD-3-Clause"
] | null | null | null |
sktime/transformations/hierarchical/tests/test_aggregate.py
|
biologioholic/sktime
|
9d0391a04b11d22bd783b452f01aa5b4529b41a2
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
"""Tests for hierarchical aggregator."""
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["ciaran-g"]
import pytest
from sktime.transformations.hierarchical.aggregate import Aggregator
from sktime.utils._testing.hierarchical import _bottom_hier_datagen
# test for equal output with with named/unnamed indexes
@pytest.mark.parametrize("flatten_single_levels", [True, False])
def test_aggregator_fit_transform_index(flatten_single_levels):
"""Tests fit_transform of aggregator function.
This test asserts that the output of Aggregator using fit_transfrom() with a
named multiindex is equal to an unnamed one. It also tests that
Aggregator does not change the names of the input index in both cases.
"""
agg = Aggregator(flatten_single_levels=flatten_single_levels)
X = _bottom_hier_datagen(
no_bottom_nodes=3,
no_levels=1,
)
# named indexes
X_agg = agg.fit_transform(X)
msg = "Aggregator returns wrong index names."
assert X_agg.index.names == X.index.names, msg
# unnamed indexes
X.index.rename([None] * X.index.nlevels, inplace=True)
X_agg_unnamed = agg.fit_transform(X)
assert X_agg_unnamed.index.names == X.index.names, msg
msg = "Aggregator returns different output for named and unnamed indexes."
assert X_agg.equals(X_agg_unnamed), msg
# test that flatten_single_levels works as expected
def test_aggregator_flatten():
"""Tests Aggregator flattening single levels.
This tests that the flatten_single_levels argument works as expected for a
fixed example of a complicated hierarchy.
"""
agg = Aggregator(flatten_single_levels=False)
agg_flat = Aggregator(flatten_single_levels=True)
X = _bottom_hier_datagen(
no_bottom_nodes=10,
no_levels=4,
random_seed=111,
)
# aggregate without flattening
X_agg = agg.fit_transform(X)
# aggregate with flattening
X_agg_flat = agg_flat.fit_transform(X)
msg = (
"Aggregator without flattening should have 21 unique levels, "
"with the time index removed, for random_seed=111."
)
assert len(X_agg.droplevel(-1).index.unique()) == 21, msg
msg = (
"Aggregator with flattening should have 17 unique levels, "
"with the time index removed, for random_seed=111."
)
assert len(X_agg_flat.droplevel(-1).index.unique()) == 17, msg
| 33.486486
| 80
| 0.717514
| 341
| 2,478
| 5.014663
| 0.343109
| 0.023392
| 0.088889
| 0.050877
| 0.222807
| 0.162573
| 0.111111
| 0.074854
| 0.074854
| 0.074854
| 0
| 0.013561
| 0.196529
| 2,478
| 73
| 81
| 33.945205
| 0.845304
| 0.305892
| 0
| 0.205128
| 0
| 0
| 0.208283
| 0.012605
| 0
| 0
| 0
| 0
| 0.128205
| 1
| 0.051282
| false
| 0
| 0.076923
| 0
| 0.128205
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
057e82bc7eee8bfd854f64e90c47dfe5089a763d
| 563
|
py
|
Python
|
doni/tests/unit/api/test_availability_window.py
|
ChameleonCloud/doni
|
e280a0fddf4ee7d2abb69ceed49a9728e88cf99b
|
[
"Apache-2.0"
] | null | null | null |
doni/tests/unit/api/test_availability_window.py
|
ChameleonCloud/doni
|
e280a0fddf4ee7d2abb69ceed49a9728e88cf99b
|
[
"Apache-2.0"
] | 49
|
2021-03-16T14:58:18.000Z
|
2022-03-14T22:06:36.000Z
|
doni/tests/unit/api/test_availability_window.py
|
ChameleonCloud/doni
|
e280a0fddf4ee7d2abb69ceed49a9728e88cf99b
|
[
"Apache-2.0"
] | null | null | null |
from flask.testing import FlaskClient
from doni.tests.unit import utils
def test_list_availability_windows(
mocker, user_auth_headers, client: "FlaskClient", database: "utils.DBFixtures"
):
mock_authorize = mocker.patch("doni.api.availability_window.authorize")
hw = database.add_hardware()
res = client.get(
f"/v1/hardware/{hw['uuid']}/availability", headers=user_auth_headers
)
assert res.status_code == 200
assert res.json == {
"availability": [],
}
assert mock_authorize.called_once_with("hardware:get")
| 29.631579
| 82
| 0.708703
| 68
| 563
| 5.661765
| 0.617647
| 0.041558
| 0.077922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008584
| 0.172291
| 563
| 18
| 83
| 31.277778
| 0.817597
| 0
| 0
| 0
| 0
| 0
| 0.225577
| 0.134991
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
057fec44c986714a8f02d47b39f9f891463a6252
| 848
|
py
|
Python
|
peuler_012_better.py
|
bayramcicek/mini-programs
|
3f876e3274b7beeb5e7413ac9c5275813d9f0d2d
|
[
"Unlicense"
] | null | null | null |
peuler_012_better.py
|
bayramcicek/mini-programs
|
3f876e3274b7beeb5e7413ac9c5275813d9f0d2d
|
[
"Unlicense"
] | null | null | null |
peuler_012_better.py
|
bayramcicek/mini-programs
|
3f876e3274b7beeb5e7413ac9c5275813d9f0d2d
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/python3
import math
class Solution:
@staticmethod
def number_of_factor(self):
count = 0
if self == 1:
return 1
for i in range(1, math.ceil(math.sqrt(self))):
if self % i == 0:
count += 2
if math.ceil(math.sqrt(self)) == math.floor(math.sqrt(self)):
count += 1
return count
test = Solution
triangle_arr = [0]
temp, box, curr_num = 0, 0, 0
for i in range(1, 1001):
while temp <= i:
box += 1
curr_num = (box * (box + 1)) / 2
temp = test.number_of_factor(curr_num)
triangle_arr.append(curr_num)
print(curr_num)
# number_test = int(input())
#
# limit_list = []
# for a in range(number_test):
# limit_list.append(int(input()))
#
# for limit in limit_list:
# print(int(triangle_arr[limit]))
| 20.190476
| 69
| 0.5625
| 122
| 848
| 3.770492
| 0.344262
| 0.076087
| 0.078261
| 0.047826
| 0.13913
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033784
| 0.301887
| 848
| 41
| 70
| 20.682927
| 0.743243
| 0.21934
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.217391
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05803580ad5cf536a86b26fbe2b79573b774b99b
| 9,253
|
py
|
Python
|
swyft/plot/plot.py
|
undark-lab/swyft
|
50aa524e2f3a2b3d1354543178ff72bc7f055a35
|
[
"MIT"
] | 104
|
2020-11-26T09:46:03.000Z
|
2022-03-18T06:22:03.000Z
|
swyft/plot/plot.py
|
cweniger/swyft
|
2c0ed514622a37e8ec4e406b99a8327ecafb7ab4
|
[
"MIT"
] | 83
|
2021-03-02T15:54:26.000Z
|
2022-03-10T08:09:05.000Z
|
swyft/plot/plot.py
|
undark-lab/swyft
|
50aa524e2f3a2b3d1354543178ff72bc7f055a35
|
[
"MIT"
] | 10
|
2021-02-04T14:27:36.000Z
|
2022-03-31T17:39:34.000Z
|
import numpy as np
import pylab as plt
from scipy.integrate import simps
def grid_interpolate_samples(x, y, bins=1000, return_norm=False):
idx = np.argsort(x)
x, y = x[idx], y[idx]
x_grid = np.linspace(x[0], x[-1], bins)
y_grid = np.interp(x_grid, x, y)
norm = simps(y_grid, x_grid)
y_grid_normed = y_grid / norm
if return_norm:
return x_grid, y_grid_normed, norm
else:
return x_grid, y_grid_normed
def get_HDI_thresholds(x, cred_level=[0.68268, 0.95450, 0.99730]):
x = x.flatten()
x = np.sort(x)[::-1] # Sort backwards
total_mass = x.sum()
enclosed_mass = np.cumsum(x)
idx = [np.argmax(enclosed_mass >= total_mass * f) for f in cred_level]
levels = np.array(x[idx])
return levels
def plot_posterior(
samples,
pois,
weights_key=None,
ax=plt,
grid_interpolate=False,
bins=100,
color="k",
contours=True,
**kwargs
):
if isinstance(pois, int):
pois = (pois,)
w = None
# FIXME: Clean up ad hoc code
if weights_key is None:
weights_key = tuple(sorted(pois))
try:
w = samples["weights"][tuple(weights_key)]
except KeyError:
if len(weights_key) == 1:
for k in samples["weights"].keys():
if weights_key[0] in k:
weights_key = k
break
w = samples["weights"][tuple(weights_key)]
elif len(weights_key) == 2:
for k in samples["weights"].keys():
if set(weights_key).issubset(k):
weights_key = k
w = samples["weights"][k]
if w is None:
return
if len(pois) == 1:
x = samples["v"][:, pois[0]]
if grid_interpolate:
# Grid interpolate samples
log_prior = samples["log_priors"][pois[0]]
w_eff = np.exp(np.log(w) + log_prior) # p(z|x) = r(x, z) p(z)
zm, v = grid_interpolate_samples(x, w_eff)
else:
v, e = np.histogram(x, weights=w, bins=bins, density=True)
zm = (e[1:] + e[:-1]) / 2
levels = sorted(get_HDI_thresholds(v))
if contours:
contour1d(zm, v, levels, ax=ax, color=color)
ax.plot(zm, v, color=color, **kwargs)
ax.set_xlim([x.min(), x.max()])
ax.set_ylim([-v.max() * 0.05, v.max() * 1.1])
# Diagnostics
mean = sum(w * x) / sum(w)
mode = zm[v == v.max()][0]
int2 = zm[v > levels[2]].min(), zm[v > levels[2]].max()
int1 = zm[v > levels[1]].min(), zm[v > levels[1]].max()
int0 = zm[v > levels[0]].min(), zm[v > levels[0]].max()
entropy = -simps(v * np.log(v), zm)
return dict(
mean=mean, mode=mode, HDI1=int2, HDI2=int1, HDI3=int0, entropy=entropy
)
elif len(pois) == 2:
# FIXME: use interpolation when grid_interpolate == True
x = samples["v"][:, pois[0]]
y = samples["v"][:, pois[1]]
counts, xbins, ybins, _ = ax.hist2d(x, y, weights=w, bins=bins, cmap="gray_r")
levels = sorted(get_HDI_thresholds(counts))
try:
ax.contour(
counts.T,
extent=[xbins.min(), xbins.max(), ybins.min(), ybins.max()],
levels=levels,
linestyles=[":", "--", "-"],
colors=color,
)
except ValueError:
print("WARNING: 2-dim contours not well-defined.")
ax.set_xlim([x.min(), x.max()])
ax.set_ylim([y.min(), y.max()])
xm = (xbins[:-1] + xbins[1:]) / 2
ym = (ybins[:-1] + ybins[1:]) / 2
cx = counts.sum(axis=1)
cy = counts.sum(axis=0)
mean = (sum(xm * cx) / sum(cx), sum(ym * cy) / sum(cy))
return dict(mean=mean, mode=None, HDI1=None, HDI2=None, HDI3=None, entropy=None)
def plot_1d(
samples,
pois,
truth=None,
bins=100,
figsize=(15, 10),
color="k",
labels=None,
label_args={},
ncol=None,
subplots_kwargs={},
fig=None,
contours=True,
) -> None:
"""Make beautiful 1-dim posteriors.
Args:
samples: Samples from `swyft.Posteriors.sample`
pois: List of parameters of interest
truth: Ground truth vector
bins: Number of bins used for histograms.
figsize: Size of figure
color: Color
labels: Custom labels (default is parameter names)
label_args: Custom label arguments
ncol: Number of panel columns
subplot_kwargs: Subplot kwargs
"""
grid_interpolate = False
diags = {}
if ncol is None:
ncol = len(pois)
K = len(pois)
nrow = (K - 1) // ncol + 1
if fig is None:
fig, axes = plt.subplots(nrow, ncol, figsize=figsize, **subplots_kwargs)
else:
axes = fig.get_axes()
lb = 0.125
tr = 0.9
whspace = 0.15
fig.subplots_adjust(
left=lb, bottom=lb, right=tr, top=tr, wspace=whspace, hspace=whspace
)
if labels is None:
labels = [samples["parameter_names"][pois[i]] for i in range(K)]
for k in range(K):
if nrow == 1 and ncol > 1:
ax = axes[k]
elif nrow == 1 and ncol == 1:
ax = axes
else:
i, j = k % ncol, k // ncol
ax = axes[j, i]
ret = plot_posterior(
samples,
pois[k],
ax=ax,
grid_interpolate=grid_interpolate,
color=color,
bins=bins,
contours=contours,
)
ax.set_xlabel(labels[k], **label_args)
if truth is not None:
ax.axvline(truth[pois[k]], ls=":", color="r")
diags[(pois[k],)] = ret
return fig, diags
def plot_corner(
samples,
pois,
bins=100,
truth=None,
figsize=(10, 10),
color="k",
labels=None,
label_args={},
contours_1d: bool = True,
fig=None,
) -> None:
"""Make a beautiful corner plot.
Args:
samples: Samples from `swyft.Posteriors.sample`
pois: List of parameters of interest
truth: Ground truth vector
bins: Number of bins used for histograms.
figsize: Size of figure
color: Color
labels: Custom labels (default is parameter names)
label_args: Custom label arguments
contours_1d: Plot 1-dim contours
fig: Figure instance
"""
K = len(pois)
if fig is None:
fig, axes = plt.subplots(K, K, figsize=figsize)
else:
axes = np.array(fig.get_axes()).reshape((K, K))
lb = 0.125
tr = 0.9
whspace = 0.1
fig.subplots_adjust(
left=lb, bottom=lb, right=tr, top=tr, wspace=whspace, hspace=whspace
)
diagnostics = {}
if labels is None:
labels = [samples["parameter_names"][pois[i]] for i in range(K)]
for i in range(K):
for j in range(K):
ax = axes[i, j]
# Switch off upper left triangle
if i < j:
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_yticks([])
ax.set_frame_on(False)
continue
# Formatting labels
if j > 0 or i == 0:
ax.set_yticklabels([])
# ax.set_yticks([])
if i < K - 1:
ax.set_xticklabels([])
# ax.set_xticks([])
if i == K - 1:
ax.set_xlabel(labels[j], **label_args)
if j == 0 and i > 0:
ax.set_ylabel(labels[i], **label_args)
# Set limits
# ax.set_xlim(x_lims[j])
# if i != j:
# ax.set_ylim(y_lims[i])
# 2-dim plots
if j < i:
ret = plot_posterior(
samples, [pois[j], pois[i]], ax=ax, color=color, bins=bins
)
if truth is not None:
ax.axvline(truth[pois[j]], color="r")
ax.axhline(truth[pois[i]], color="r")
diagnostics[(pois[j], pois[i])] = ret
if j == i:
ret = plot_posterior(
samples,
pois[i],
ax=ax,
color=color,
bins=bins,
contours=contours_1d,
)
if truth is not None:
ax.axvline(truth[pois[i]], ls=":", color="r")
diagnostics[(pois[i],)] = ret
return fig, diagnostics
def contour1d(z, v, levels, ax=plt, linestyles=None, color=None, **kwargs):
y0 = -1.0 * v.max()
y1 = 5.0 * v.max()
ax.fill_between(z, y0, y1, where=v > levels[0], color=color, alpha=0.1)
ax.fill_between(z, y0, y1, where=v > levels[1], color=color, alpha=0.1)
ax.fill_between(z, y0, y1, where=v > levels[2], color=color, alpha=0.1)
# if not isinstance(colors, list):
# colors = [colors]*len(levels)
# for i, l in enumerate(levels):
# zero_crossings = np.where(np.diff(np.sign(v-l*1.001)))[0]
# for c in z[zero_crossings]:
# ax.axvline(c, ls=linestyles[i], color = colors[i], **kwargs)
if __name__ == "__main__":
pass
| 29.848387
| 88
| 0.514428
| 1,232
| 9,253
| 3.770292
| 0.196429
| 0.019376
| 0.013563
| 0.020667
| 0.391173
| 0.345748
| 0.295156
| 0.257912
| 0.213563
| 0.185145
| 0
| 0.025182
| 0.347671
| 9,253
| 309
| 89
| 29.944984
| 0.744367
| 0.152167
| 0
| 0.328947
| 0
| 0
| 0.018922
| 0
| 0
| 0
| 0
| 0.003236
| 0
| 1
| 0.026316
| false
| 0.004386
| 0.013158
| 0
| 0.074561
| 0.004386
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05819bbe1c0902e6600dadc33453e92046d7a1ff
| 3,038
|
py
|
Python
|
control-gastos/python/main.py
|
manuelduarte077/Ejercicios-con-Python-NodeJS
|
d7b26fdeeb1640272847274b99b2f607145d58a4
|
[
"MIT"
] | 1
|
2021-07-13T18:43:59.000Z
|
2021-07-13T18:43:59.000Z
|
control-gastos/python/main.py
|
manuelduarte077/Ejercicios-con-Python-NodeJS
|
d7b26fdeeb1640272847274b99b2f607145d58a4
|
[
"MIT"
] | null | null | null |
control-gastos/python/main.py
|
manuelduarte077/Ejercicios-con-Python-NodeJS
|
d7b26fdeeb1640272847274b99b2f607145d58a4
|
[
"MIT"
] | null | null | null |
import os
from tabulate import tabulate
import requests
def iniciar():
os.system('cls')
while True:
print('Seleccione una opción: ')
print('\t1. Registrar movimiento')
print('\t2. Ver todos los movimientos')
print('\t3. Buscar un movimiento')
print('\t4. Modificar un movimiento')
print('\t5. Eliminar un movimiento')
print('\t6. Salir')
opcion = input('Ingrese una opción: ')
if opcion == '1':
nuevo_movimiento()
elif opcion == '2':
mostrar_movimientos()
elif opcion == '3':
buscar_movimiento()
elif opcion == '4':
modificar_movimiento()
elif opcion == '5':
eliminar_movimiento()
elif opcion == '6':
break
else:
print('Escoja una opción correcta')
def nuevo_movimiento():
tipo = input('Ingrese el tipo de movimiento \n- Ingreso\n- Gasto\n')
cantidad = input('Ingrese la cantidad: ')
fecha = input('Ingrese la fecha: ')
data = {'tipo': tipo, 'cantidad': cantidad, 'fecha': fecha}
respuesta = requests.post(
url='http://localhost:3000/movimientos/registrar', data=data)
print(respuesta.text)
def mostrar_movimientos():
response = requests.get(url='http://localhost:3000/movimientos/todos')
datos = []
for dato in response.json():
temp = []
for key, value in dato.items():
temp.append(value)
datos.append(temp)
headers = ['ID', 'TIPO DE MOVIMIENTO', 'CANTIDAD', 'FECHA']
tabla = tabulate(datos, headers, tablefmt='fancy_grid')
print(tabla)
def buscar_movimiento():
id = input('Ingrese el id del movimiento a buscar: ')
response = requests.get(url='http://localhost:3000/movimientos/buscar/'+id)
datos = []
for dato in response.json():
temp = []
for key, value in dato.items():
temp.append(value)
datos.append(temp)
headers = ['ID', 'TIPO DE MOVIMIENTO', 'CANTIDAD', 'FECHA']
tabla = tabulate(datos, headers, tablefmt='fancy_grid')
print(tabla)
def modificar_movimiento():
id = input('Ingrese el id del movimiento a modificar: ')
campo = input(
'Ingrese el campo a modificar:\n1. Tipo\n2. Cantidad\n3. Fecha')
nuevo_valor = ''
if(campo == '1'):
campo = 'tipo'
nuevo_valor = input('Ingrese el tipo de movimiento: ')
elif(campo == '2'):
campo = 'cantidad'
nuevo_valor = input('Ingrese la cantidad: ')
elif(campo == '3'):
campo = 'fecha'
nuevo_valor = input('Ingrese la fecha: ')
data = {'campo': campo, 'nuevo_valor': nuevo_valor}
respuesta = requests.post(
url='http://localhost:3000/movimientos/modificar/'+id, data=data)
print(respuesta.text)
def eliminar_movimiento():
id = input('Ingrese el id del movimiento a elimina: ')
respuesta = requests.post(
url='http://localhost:3000/movimientos/eliminar/'+id)
print(respuesta.text)
iniciar()
| 31
| 79
| 0.600066
| 347
| 3,038
| 5.201729
| 0.26513
| 0.07313
| 0.046537
| 0.055402
| 0.515235
| 0.470914
| 0.40554
| 0.40554
| 0.263712
| 0.193906
| 0
| 0.016949
| 0.262014
| 3,038
| 97
| 80
| 31.319588
| 0.788136
| 0
| 0
| 0.289157
| 0
| 0
| 0.301185
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072289
| false
| 0
| 0.036145
| 0
| 0.108434
| 0.156627
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05826df3789ad47bc005b4bcd34765514c7e2fd2
| 409
|
py
|
Python
|
examples/idioms/programs/016.1530-depth-first-traversing-of-a-binary-tree.py
|
laowantong/paroxython
|
4626798a60eeaa765dbfab9e63e04030c9fcb1d0
|
[
"MIT"
] | 31
|
2020-05-02T13:34:26.000Z
|
2021-06-06T17:25:52.000Z
|
examples/idioms/programs/016.1530-depth-first-traversing-of-a-binary-tree.py
|
laowantong/paroxython
|
4626798a60eeaa765dbfab9e63e04030c9fcb1d0
|
[
"MIT"
] | 108
|
2019-11-18T19:41:52.000Z
|
2022-03-18T13:58:17.000Z
|
examples/idioms/programs/016.1530-depth-first-traversing-of-a-binary-tree.py
|
laowantong/paroxython
|
4626798a60eeaa765dbfab9e63e04030c9fcb1d0
|
[
"MIT"
] | 4
|
2020-05-19T08:57:44.000Z
|
2020-09-21T08:53:46.000Z
|
"""Depth-first traversing of a binary tree.
Call a function _f on every node of binary tree _bt, in depth-first infix order
Source: programming-idioms.org
"""
# Implementation author: TinyFawks
# Created on 2016-02-18T08:50:27.130406Z
# Last modified on 2016-02-18T09:16:52.625429Z
# Version 2
# Recursive DFS.
def dfs(bt):
if bt is None:
return
dfs(bt.left)
f(bt)
dfs(bt.right)
| 18.590909
| 79
| 0.694377
| 67
| 409
| 4.208955
| 0.716418
| 0.053191
| 0.056738
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125767
| 0.202934
| 409
| 21
| 80
| 19.47619
| 0.739264
| 0.723716
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
05830297f5e87cadfedcaa83499c7c9b2affb118
| 3,746
|
py
|
Python
|
ServeRest-APITesting-Python/Tests/test_cart.py
|
barbosamp/automacao-api-rest-jornada-learning
|
9ceb57bc6f4d845c35a149d760775c10c3a38614
|
[
"MIT"
] | 2
|
2020-11-20T18:40:32.000Z
|
2021-04-20T23:13:13.000Z
|
ServeRest-APITesting-Python/Tests/test_cart.py
|
barbosamp/automacao-api-rest-jornada-learning
|
9ceb57bc6f4d845c35a149d760775c10c3a38614
|
[
"MIT"
] | 1
|
2020-10-22T16:16:40.000Z
|
2020-10-22T16:16:40.000Z
|
ServeRest-APITesting-Python/Tests/test_cart.py
|
kpedron/automacao-api-rest-jornada-learning
|
50ceaf9f43b03383cc65e92460b6b9a398a88e02
|
[
"MIT"
] | 2
|
2020-10-16T02:37:20.000Z
|
2020-10-31T13:54:46.000Z
|
import unittest
import requests
import json
import pytest
BASE_URL = "https://api.serverest.dev"
class Products(unittest.TestCase):
def setUp(self):
# Do authentication
# Cart endpoint requires authentication
full_url = BASE_URL + "/login"
body = {
"email": "fulano@qa.com",
"password": "teste"
}
response = requests.post(url=full_url, json=body)
if response.status_code != 200:
pytest.fail("Some problem to get authorization token \n", False)
response_json = json.loads(response.text)
self.token = response_json["authorization"]
def test_get_all_cart(self):
full_url = BASE_URL + "/carrinhos"
# Send HTTP Request
response = requests.get(url=full_url)
# Check the response from ServeRest
self.assertEqual(response.status_code, 200, "Error in status code to get all carts")
def test_create_cart_to_user(self):
full_url = BASE_URL + "/carrinhos"
body = {
"produtos": [
{
"idProduto": "K6leHdftCeOJj8BJ",
"quantidade": 2
}
]
}
header = {"Authorization": self.token}
# Send HTTP Request
response = requests.post(url=full_url, headers=header, json=body)
# Check the response from ServeRest
self.assertEqual(response.status_code, 201, "Error in status code to create a cart")
response_json = json.loads(response.text)
self.assertEqual(response_json["message"], "Cadastro realizado com sucesso")
# Now we will delete the cart (this is a good practice)
# Buy the item will delete the cart automatically
full_url = BASE_URL + "/carrinhos/concluir-compra"
# The endpoint delete the cart using the Authorization token from the user
response = requests.delete(url=full_url, headers=header)
self.assertEqual(response.status_code, 200, "Error in status code to delete a cart")
def test_get_cart_from_specific_user(self):
full_url = BASE_URL + "/carrinhos"
query = {"idUsuario": "K6leHdftCeOJj8BJ"}
# Send HTTP Request
response = requests.get(url=full_url, params=query)
self.assertEqual(response.status_code, 200, "Error in status code to get a cart")
def test_create_cart_without_authentication(self):
full_url = BASE_URL + "/carrinhos"
body = {
"produtos": [
{
"idProduto": "K6leHdftCeOJj8BJ",
"quantidade": 2
}
]
}
# Send HTTP Request
response = requests.post(url=full_url, json=body)
# Check the response from ServeRest
self.assertEqual(response.status_code, 401)
response_json = json.loads(response.text)
self.assertEqual(response_json["message"], "Token de acesso ausente, inválido, expirado ou usuário "
"do token não existe mais")
def test_create_cart_unknown_product(self):
full_url = BASE_URL + "/carrinhos"
body = {
"produtos": [
{
"idProduto": "234",
"quantidade": 4
}
]
}
header = {"Authorization": self.token}
# Send HTTP Request
response = requests.post(url=full_url, headers=header, json=body)
# Check the response from ServeRest
self.assertEqual(response.status_code, 400)
response_json = json.loads(response.text)
self.assertEqual(response_json["message"], "Produto não encontrado")
| 32.017094
| 108
| 0.591564
| 406
| 3,746
| 5.317734
| 0.278325
| 0.045391
| 0.095878
| 0.045391
| 0.609541
| 0.579435
| 0.566929
| 0.53358
| 0.51598
| 0.432145
| 0
| 0.012901
| 0.317138
| 3,746
| 116
| 109
| 32.293103
| 0.831118
| 0.12173
| 0
| 0.351351
| 0
| 0
| 0.200672
| 0.007941
| 0
| 0
| 0
| 0
| 0.121622
| 1
| 0.081081
| false
| 0.013514
| 0.054054
| 0
| 0.148649
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|