hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
686a658b280bae15ace2b7f671539c4dd882685a | 1,943 | py | Python | SampleProblem/classDef.py | kamyarg/hake | 0aa9d43760f2c0f6c0321d69bacc1f6af0c5684e | [
"MIT"
] | null | null | null | SampleProblem/classDef.py | kamyarg/hake | 0aa9d43760f2c0f6c0321d69bacc1f6af0c5684e | [
"MIT"
] | null | null | null | SampleProblem/classDef.py | kamyarg/hake | 0aa9d43760f2c0f6c0321d69bacc1f6af0c5684e | [
"MIT"
] | null | null | null | class Matrix():
def __init__(self, x, y):
self.row = x
self.column = y
self.matrix = []
self.firstPointer = -1
self.secondPointer = -1
self.HorizontalLines = []
self.VerticalLines = []
def addLine(self, line):
self.matrix.append(list(line))
def __repr__(self):
return str(self.matrix)
def cell(self, x, y):
if x < self.row and y < self.column:
return self.matrix[x][y]
else:
return '-'
def startSearchingVertical(self):
push = False
for i in range(self.column):
for j in range(self.row):
if self.isCellBlack(j, i):
self.markingVert(i, j)
if self.isCellBlack(j, i) == False:
push = True
if j == self.row - 1 or push == True:
t = (i, self.firstPointer, self.secondPointer)
if self.firstPointer != -1 and self.secondPointer != -1:
self.VerticalLines.append(t)
self.firstPointer = -1
self.secondPointer = -1
push = False
#print self.VerticalLines
#print len(self.VerticalLines)
def markingVert(self, i, j):
if self.firstPointer == -1:
self.firstPointer = j
self.secondPointer = j
else:
self.secondPointer = j
def startSearchingHorizontal(self):
push = False
for i in range(self.row):
for j in range(self.column):
if self.isCellBlack(i, j):
self.markingHor(i, j)
if self.isCellBlack(i, j) == False:
push = True
if j == self.column - 1 or push == True:
t = (i, self.firstPointer, self.secondPointer)
if self.firstPointer != -1 and self.secondPointer != -1:
self.HorizontalLines.append(t)
self.firstPointer = -1
self.secondPointer = -1
push = False
#print self.HorizontalLines
#print len(self.HorizontalLines)
def markingHor(self, i, j):
if self.firstPointer == -1:
self.firstPointer = j
self.secondPointer = j
else:
self.secondPointer = j
def isCellBlack(self, i, j):
if self.cell(i, j) == '#':
return True
else:
return False
| 23.409639 | 61 | 0.637674 | 264 | 1,943 | 4.662879 | 0.159091 | 0.142973 | 0.096669 | 0.085297 | 0.614947 | 0.488221 | 0.427295 | 0.427295 | 0.381803 | 0.381803 | 0 | 0.009383 | 0.232115 | 1,943 | 82 | 62 | 23.695122 | 0.815684 | 0.056613 | 0 | 0.430769 | 0 | 0 | 0.001093 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.138462 | false | 0 | 0 | 0.015385 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
686c30b118f9a2fbbff7cd7ba55300c65e53153a | 3,248 | py | Python | utils/misc.py | lx10077/optimpy | 8d3a4faa1e7291297497446fc77df5409acd73b9 | [
"MIT"
] | null | null | null | utils/misc.py | lx10077/optimpy | 8d3a4faa1e7291297497446fc77df5409acd73b9 | [
"MIT"
] | null | null | null | utils/misc.py | lx10077/optimpy | 8d3a4faa1e7291297497446fc77df5409acd73b9 | [
"MIT"
] | null | null | null | import errno
import os
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.nn.init as init
__all__ = ['make_train_path', 'make_soft_link', 'get_mean_and_std', 'init_params', 'mkdir_p', 'mkdir', 'AverageMeter']
def make_train_path(train_prefix=None):
# make train dir
cwd = os.path.dirname(__file__)
path = os.path.dirname(cwd)
assert path[-6:] == 'config', path
basename = os.path.basename(cwd)
if train_prefix is not None:
base_train_path = os.path.join(train_prefix)
if not os.path.exists(base_train_path):
os.makedirs(base_train_path)
make_soft_link(base_train_path, os.path.join(path[:-6], 'train_log'))
pre_train_path = os.path.join(path[:-6], 'train_log', basename)
train_path = os.path.join(cwd, 'train_log')
if not os.path.exists(pre_train_path):
os.makedirs(pre_train_path)
make_soft_link(pre_train_path, train_path)
return train_path
def make_soft_link(base_path, path):
if not os.path.exists(path):
os.system('ln -s {} {}'.format(base_path, path))
elif os.path.realpath(path) != os.path.realpath(base_path):
os.system('rm {}'.format(path))
os.system('ln -s {} {}'.format(base_path, path))
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
return path
def get_mean_and_std(dataset):
"""Compute the mean and std value of dataset. """
dataloader = data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:, i, :, :].mean()
std[i] += inputs[:, i, :, :].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
"""Init layer parameters. """
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
def mkdir_p(path):
"""make dir if not exist and print msg out if exist. """
try:
os.makedirs(path)
except OSError as exc: # Python > 2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
print("File {} exists.".format(path))
pass
else:
raise
class AverageMeter(object):
"""Computes and stores the average and current value
Imported from https://github.com/pytorch/examples/blob/master/imagenet/helper.py#L247-L262
"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| 28.743363 | 118 | 0.601909 | 470 | 3,248 | 4.012766 | 0.291489 | 0.044539 | 0.031813 | 0.031813 | 0.267762 | 0.206257 | 0.191941 | 0.191941 | 0.149523 | 0.043478 | 0 | 0.013784 | 0.262931 | 3,248 | 112 | 119 | 29 | 0.774018 | 0.08867 | 0 | 0.228916 | 0 | 0 | 0.064935 | 0 | 0 | 0 | 0 | 0 | 0.012048 | 1 | 0.108434 | false | 0.012048 | 0.072289 | 0 | 0.228916 | 0.024096 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
686d3e3b3e145b3082dd94f67202722a5db2b036 | 386 | py | Python | scripts/twice.py | szkkt/robosys2 | 893dba7d4aea549517e8a47f016ac3fd9f595552 | [
"BSD-3-Clause"
] | null | null | null | scripts/twice.py | szkkt/robosys2 | 893dba7d4aea549517e8a47f016ac3fd9f595552 | [
"BSD-3-Clause"
] | null | null | null | scripts/twice.py | szkkt/robosys2 | 893dba7d4aea549517e8a47f016ac3fd9f595552 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
"""
Copyright (C) 2022 Ueda Ryuichi,Suzuki Keita All Rights Reserved.
"""
import rospy
import math
from std_msgs.msg import Int32
def cb(message):
for i in range(2,message.data):
if message.data % i == 0:
rospy.loginfo(message.data)
return
rospy.init_node('twice')
sub = rospy.Subscriber('count_up', Int32, cb)
rospy.spin()
| 20.315789 | 65 | 0.663212 | 57 | 386 | 4.438596 | 0.754386 | 0.130435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.036066 | 0.209845 | 386 | 18 | 66 | 21.444444 | 0.793443 | 0.225389 | 0 | 0 | 0 | 0 | 0.044828 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
686f5d51ad46d2a9bc8f8c07c07683ed18582d1f | 566 | py | Python | examples/plugin/mocasin-example-plugin/mocasin_example_plugin/graph.py | tud-ccc/mocasin | 6cf0a169e24d65d0fc859398f181dd500f928340 | [
"0BSD"
] | 1 | 2022-03-13T19:27:50.000Z | 2022-03-13T19:27:50.000Z | examples/plugin/mocasin-example-plugin/mocasin_example_plugin/graph.py | tud-ccc/mocasin | 6cf0a169e24d65d0fc859398f181dd500f928340 | [
"0BSD"
] | null | null | null | examples/plugin/mocasin-example-plugin/mocasin_example_plugin/graph.py | tud-ccc/mocasin | 6cf0a169e24d65d0fc859398f181dd500f928340 | [
"0BSD"
] | null | null | null | # Copyright (C) 2021 TU Dresden
# Licensed under the ISC license (see LICENSE.txt)
#
# Authors: Christian Menard
from mocasin.common.graph import DataflowGraph, DataflowProcess, DataflowChannel
class ExampleGraph(DataflowGraph):
def __init__(self):
super().__init__("example")
a = DataflowProcess("a")
b = DataflowProcess("b")
c = DataflowChannel("c", 16)
self.add_process(a)
self.add_process(b)
self.add_channel(c)
a.connect_to_outgoing_channel(c)
b.connect_to_incomming_channel(c)
| 23.583333 | 80 | 0.671378 | 67 | 566 | 5.41791 | 0.597015 | 0.057851 | 0.077135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013699 | 0.226148 | 566 | 23 | 81 | 24.608696 | 0.815068 | 0.183746 | 0 | 0 | 0 | 0 | 0.021882 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.083333 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68720fe3a4d1ec55283f7be8bb25153af6358462 | 3,007 | py | Python | src/ui/widgets/frame.py | Rabbithy/Fyks | 8a2e8fac75b445ae8a608dc873a732c6d66a0f6b | [
"MIT"
] | 1 | 2020-06-11T03:39:40.000Z | 2020-06-11T03:39:40.000Z | src/ui/widgets/frame.py | Rabbithy/Fyks | 8a2e8fac75b445ae8a608dc873a732c6d66a0f6b | [
"MIT"
] | 6 | 2020-10-19T23:08:27.000Z | 2020-11-24T12:03:59.000Z | src/ui/widgets/frame.py | Rabbithy/Fyks | 8a2e8fac75b445ae8a608dc873a732c6d66a0f6b | [
"MIT"
] | null | null | null | from pyglet import gl
from ui import widgets, elements
import graphicutils as gu
class Frame(widgets.Widget, elements.Frame):
def __init__(self, x, y, w, h, parent=None):
super().__init__(x, y, w, h, parent)
self.color = (0.9, 0.9, 0.9, 1)
self.border_color = (0, 0, 0, 0)
self.border_radius = 0
self.elements = []
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
if self.hover:
for widget in self.elements:
if widget.is_visible:
widget.on_mouse_scroll(
x=x, y=y,
scroll_x=scroll_x,
scroll_y=scroll_y)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
for widget in self.elements:
if widget.is_visible:
widget.on_mouse_drag(
x=x, y=y,
dx=dx, dy=dy,
buttons=buttons,
modifiers=modifiers
)
def on_mouse_motion(self, x, y, dx, dy):
for widget in self.elements:
if widget.is_visible:
widget.on_mouse_motion(x, y, dx, dy)
def on_mouse_press(self, x, y, button, modifiers):
super().on_mouse_press(x, y, button, modifiers)
if self.hover:
hover_widget = None
for widget in self.elements:
if widget.is_visible:
if widget.is_hover(x, y):
hover_widget = widget
self.pressed = False
else:
widget.pressed = False
if not self.pressed:
hover_widget.on_mouse_press(
x=x, y=y,
button=button,
modifiers=modifiers)
def on_mouse_release(self, x, y, button, modifiers):
for widget in self.elements:
if widget.is_visible:
widget.on_mouse_release(x, y, button, modifiers)
def on_key_press(self, symbol, modifiers):
for widget in self.elements:
if widget.is_visible:
widget.on_key_press(symbol, modifiers)
def update(self, dt):
for widget in self.elements:
if widget.is_visible:
widget.update(dt)
def draw_widgets(self):
for widget in sorted(self.elements, key=lambda i: i.z_index):
if widget.is_visible:
widget.draw()
def draw(self):
self.update_viewport()
gl.glColor4f(*self.color)
gu.draw_rounded_rect(
0, 0,
self.width,
self.height,
self.border_radius,
gl.GL_POLYGON)
gl.glColor4f(*self.border_color)
gu.draw_rounded_rect(
0, 0,
self.width,
self.height,
self.border_radius,
gl.GL_LINE_LOOP)
self.draw_widgets()
| 30.683673 | 69 | 0.50848 | 360 | 3,007 | 4.058333 | 0.194444 | 0.019165 | 0.061602 | 0.093087 | 0.445585 | 0.341547 | 0.341547 | 0.341547 | 0.341547 | 0.314168 | 0 | 0.010061 | 0.405055 | 3,007 | 97 | 70 | 31 | 0.806596 | 0 | 0 | 0.375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.0375 | 0 | 0.175 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6873c7f516cdb2204d177ae626e9149c228a16bf | 1,774 | py | Python | sleekxmpp/plugins/xep_0096/file_transfer.py | silkworm3725/https-github.com-fritzy-SleekXMPP | e5582694c07236e6830c20361840360a1dde37f3 | [
"BSD-3-Clause"
] | 4 | 2015-03-25T19:12:05.000Z | 2020-10-21T12:27:00.000Z | sleekxmpp/plugins/xep_0096/file_transfer.py | silkworm3725/https-github.com-fritzy-SleekXMPP | e5582694c07236e6830c20361840360a1dde37f3 | [
"BSD-3-Clause"
] | 4 | 2017-08-21T08:17:14.000Z | 2018-03-02T13:51:43.000Z | sleekxmpp/plugins/xep_0096/file_transfer.py | silkworm3725/https-github.com-fritzy-SleekXMPP | e5582694c07236e6830c20361840360a1dde37f3 | [
"BSD-3-Clause"
] | 5 | 2015-03-09T18:09:45.000Z | 2018-10-08T09:00:09.000Z | """
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2013 Nathanael C. Fritz, Lance J.T. Stout
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import logging
from sleekxmpp import Iq, Message
from sleekxmpp.plugins import BasePlugin
from sleekxmpp.xmlstream.handler import Callback
from sleekxmpp.xmlstream.matcher import StanzaPath
from sleekxmpp.xmlstream import register_stanza_plugin, JID
from sleekxmpp.plugins.xep_0096 import stanza, File
log = logging.getLogger(__name__)
class XEP_0096(BasePlugin):
name = 'xep_0096'
description = 'XEP-0096: SI File Transfer'
dependencies = set(['xep_0095'])
stanza = stanza
def plugin_init(self):
register_stanza_plugin(self.xmpp['xep_0095'].stanza.SI, File)
self.xmpp['xep_0095'].register_profile(File.namespace, self)
def session_bind(self, jid):
self.xmpp['xep_0030'].add_feature(File.namespace)
def plugin_end(self):
self.xmpp['xep_0030'].del_feature(feature=File.namespace)
self.xmpp['xep_0095'].unregister_profile(File.namespace, self)
def request_file_transfer(self, jid, sid=None, name=None, size=None,
desc=None, hash=None, date=None,
allow_ranged=False, mime_type=None,
**iqargs):
data = File()
data['name'] = name
data['size'] = size
data['date'] = date
data['desc'] = desc
if allow_ranged:
data.enable('range')
return self.xmpp['xep_0095'].offer(jid,
sid=sid,
mime_type=mime_type,
profile=File.namespace,
payload=data,
**iqargs)
| 30.067797 | 72 | 0.62345 | 213 | 1,774 | 5.042254 | 0.399061 | 0.072626 | 0.061453 | 0.055866 | 0.050279 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037325 | 0.275085 | 1,774 | 58 | 73 | 30.586207 | 0.797823 | 0.094138 | 0 | 0 | 0 | 0 | 0.07012 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.184211 | 0 | 0.447368 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6875d189d6569ebb3393fe16ec1e85259d697d41 | 2,517 | py | Python | awwards/tests.py | Kipkorir2017/Proj_Awwards | 3b5f898b725e14f28448019f85306845ecefe3a2 | [
"MIT"
] | null | null | null | awwards/tests.py | Kipkorir2017/Proj_Awwards | 3b5f898b725e14f28448019f85306845ecefe3a2 | [
"MIT"
] | null | null | null | awwards/tests.py | Kipkorir2017/Proj_Awwards | 3b5f898b725e14f28448019f85306845ecefe3a2 | [
"MIT"
] | null | null | null |
from django.test import TestCase
from .models import Profile, Project,Rates
from django.contrib.auth.models import User
class ProfileTestCase(TestCase):
"""
Test for the profile class
"""
def setUp(self):
self.user = User(username='kipkorir')
self.user.save()
self.profile = Profile(id=4, profile_pic='image.jpg', bio='test profile',contact='0722345678',
user=self.user)
def test_instance(self):
self.assertTrue(isinstance(self.profile, Profile))
def test_save_profile(self):
self.profile.save_profile()
profile = Profile.objects.all()
self.assertTrue(len(profile) > 0)
class ProjectTestClass(TestCase):
def setUp(self):
self.user = User.objects.create_user("username", "password")
self.new_profile = Profile(id=4, profile_pic='image.jpg', bio='Test profile',contact='0722345678',
user=self.user)
self.new_profile.save()
self.new_project = Project(image='image.png',title="image",url='http', description='test profile description', date='25/06/2021',
profile=self.new_profile)
def test_instance_true(self):
self.assertTrue(isinstance(self.new_project, Project))
def test_save_project(self):
self.new_project.save_project()
proj = Project.objects.all()
self.assertTrue(len(proj) == 1)
def test_delete_project(self):
self.new_project.save_project()
self.new_project.delete_project()
img = Profile.objects.all()
self.assertTrue(len(img) <= 1)
def test_project_by_id(self):
self.new_project.save_project()
proj = self.new_project.project_by_id(self.new_project.id)
images = Project.objects.filter(id=self.new_project.id)
self.assertTrue(proj, images)
class RatesTestCase(TestCase):
def setUp(self):
self.user = User(username='kipkorir')
self.user.save()
self.new_profile = Profile(id = 2,profile_pic='image.png',bio='test profile',user=self.user)
self.new_profile.save()
self.new_project = Project(image='image.png',title="image",url='http', description='test profile description', date='25/06/2021',
profile=self.new_profile)
self.rate = Rates(design='assssay',usability='good',content='good work',project = self.new_project, date="28/06/2021")
def test_instance(self):
self.assertTrue(isinstance(self.rate, Rates)) | 35.957143 | 137 | 0.653159 | 316 | 2,517 | 5.072785 | 0.21519 | 0.074236 | 0.09607 | 0.052402 | 0.612601 | 0.537118 | 0.494697 | 0.401747 | 0.343107 | 0.343107 | 0 | 0.025342 | 0.21613 | 2,517 | 70 | 138 | 35.957143 | 0.787126 | 0.010727 | 0 | 0.4 | 0 | 0 | 0.100647 | 0 | 0 | 0 | 0 | 0 | 0.14 | 1 | 0.2 | false | 0.02 | 0.06 | 0 | 0.32 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6878784fdc4895cf6aca9bfc45aa4ad88c87037f | 10,943 | py | Python | tapiriik/services/PolarFlow/polarflow.py | Decathlon/exercisync | e9df9d4f2210fff8cfc8b34e2e5f9d09d84bddef | [
"Apache-2.0"
] | null | null | null | tapiriik/services/PolarFlow/polarflow.py | Decathlon/exercisync | e9df9d4f2210fff8cfc8b34e2e5f9d09d84bddef | [
"Apache-2.0"
] | null | null | null | tapiriik/services/PolarFlow/polarflow.py | Decathlon/exercisync | e9df9d4f2210fff8cfc8b34e2e5f9d09d84bddef | [
"Apache-2.0"
] | null | null | null | # Synchronization module for flow.polar.com
# (c) 2018 Anton Ashmarin, aashmarin@gmail.com
from tapiriik.settings import WEB_ROOT, POLAR_CLIENT_SECRET, POLAR_CLIENT_ID, POLAR_RATE_LIMITS
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.api import APIException, UserException, UserExceptionType, APIExcludeActivity
from tapiriik.services.interchange import UploadedActivity, ActivityType, ActivityStatistic, ActivityStatisticUnit
from tapiriik.services.fit import FITIO
from tapiriik.database import redis
from datetime import datetime, timedelta
from django.urls import reverse
from urllib.parse import urlencode
from requests.auth import HTTPBasicAuth
from io import StringIO
import uuid
import gzip
import logging
import lxml
import pytz
import requests
import isodate
import json
logger = logging.getLogger(__name__)
class PolarFlowService(ServiceBase):
ID = "polarflow"
DisplayName = "Polar Flow"
DisplayAbbreviation = "PF"
AuthenticationType = ServiceAuthenticationType.OAuth
AuthenticationNoFrame = True # otherwise looks ugly in the small frame
UserProfileURL = "https://flow.polar.com/training/profiles/{0}"
UserActivityURL = "https://flow.polar.com/training/analysis/{1}"
SupportsHR = SupportsCalories = SupportsCadence = SupportsTemp = SupportsPower = True
ReceivesActivities = False # polar accesslink does not support polar data change.
GlobalRateLimits = POLAR_RATE_LIMITS
PartialSyncRequiresTrigger = True
PartialSyncTriggerPollInterval = timedelta(minutes=1)
# For mapping common->Polar Flow (text has no meaning due to upload unsupported)
_activity_type_mappings = {
ActivityType.Cycling: "Ride",
ActivityType.MountainBiking: "Ride",
ActivityType.Hiking: "Hike",
ActivityType.Running: "Run",
ActivityType.Walking: "Walk",
ActivityType.Snowboarding: "Snowboard",
ActivityType.Skating: "IceSkate",
ActivityType.CrossCountrySkiing: "NordicSki",
ActivityType.DownhillSkiing: "AlpineSki",
ActivityType.Swimming: "Swim",
ActivityType.Gym: "Workout",
ActivityType.Rowing: "Rowing",
ActivityType.RollerSkiing: "RollerSki",
ActivityType.StrengthTraining: "WeightTraining",
ActivityType.Climbing: "RockClimbing",
ActivityType.Wheelchair: "Wheelchair",
ActivityType.Other: "Other",
}
# Polar Flow -> common
_reverse_activity_type_mappings = {
"RUNNING": ActivityType.Running,
"JOGGING": ActivityType.Running,
"ROAD_RUNNING": ActivityType.Running,
"TRACK_AND_FIELD_RUNNING": ActivityType.Running,
"TRAIL_RUNNING": ActivityType.Running,
"TREADMILL_RUNNING": ActivityType.Running,
"CYCLING": ActivityType.Cycling,
"ROAD_BIKING": ActivityType.Cycling,
"INDOOR_CYCLING": ActivityType.Cycling,
"MOUNTAIN_BIKING": ActivityType.MountainBiking,
"WALKING": ActivityType.Walking,
"HIKING": ActivityType.Hiking,
"DOWNHILL_SKIING": ActivityType.DownhillSkiing,
"CROSS-COUNTRY_SKIING": ActivityType.CrossCountrySkiing,
"SNOWBOARDING": ActivityType.Snowboarding,
"SKATING": ActivityType.Skating,
"SWIMMING": ActivityType.Swimming,
"OPEN_WATER_SWIMMING": ActivityType.Swimming,
"POOL_SWIMMING": ActivityType.Swimming,
"PARASPORTS_WHEELCHAIR": ActivityType.Wheelchair,
"ROWING": ActivityType.Rowing,
"INDOOR_ROWING": ActivityType.Rowing,
"STRENGTH_TRAINING": ActivityType.StrengthTraining,
"OTHER_INDOOR": ActivityType.Other,
"OTHER_OUTDOOR": ActivityType.Other,
"ROLLER_SKIING_CLASSIC": ActivityType.RollerSkiing,
"ROLLER_SKIING_FREESTYLE": ActivityType.RollerSkiing,
# not supported somehow
#"": ActivityType.Elliptical,
"FUNCTIONAL_TRAINING": ActivityType.Gym,
"CORE": ActivityType.Gym,
"GROUP_EXERCISE": ActivityType.Gym,
"PILATES": ActivityType.Gym,
"YOGA": ActivityType.Gym,
"VERTICALSPORTS_WALLCLIMBING": ActivityType.Climbing,
}
SupportedActivities = list(_activity_type_mappings.keys())
_api_endpoint = "https://www.polaraccesslink.com"
def __init__(self):
logging.getLogger('PolarFlow SVC')
return None
def _register_user(self, access_token):
headers = {
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer {}".format(access_token)
}
res = requests.post(self._api_endpoint + "/v3/users",
json={"member-id": uuid.uuid4().hex},
headers=headers)
return res.status_code == 200
def _delete_user(self, serviceRecord):
res = requests.delete(self._api_endpoint + "/v3/users/{userid}".format(userid=serviceRecord.ExternalID),
headers=self._api_headers(serviceRecord))
def _api_headers(self, serviceRecord, headers={}):
headers.update({"Authorization": "Bearer {}".format(serviceRecord.Authorization["OAuthToken"])})
return headers
def WebInit(self):
params = {'response_type':'code',
'client_id': POLAR_CLIENT_ID,
'redirect_uri': WEB_ROOT + reverse("oauth_return", kwargs={"service": "polarflow"})}
self.UserAuthorizationURL = "https://flow.polar.com/oauth2/authorization?" + urlencode(params)
def RetrieveAuthorizationToken(self, req, level):
code = req.GET.get("code")
params = {"grant_type": "authorization_code",
"code": code,
"redirect_uri": WEB_ROOT + reverse("oauth_return", kwargs={"service": "polarflow"})}
response = requests.post("https://polarremote.com/v2/oauth2/token", data=params, auth=HTTPBasicAuth(POLAR_CLIENT_ID, POLAR_CLIENT_SECRET))
data = response.json()
if response.status_code != 200:
raise APIException(data["error"])
authorizationData = {"OAuthToken": data["access_token"]}
userId = data["x_user_id"]
try:
self._register_user(data["access_token"])
except requests.exceptions.HTTPError as err:
# Error 409 Conflict means that the user has already been registered for this client.
# That error can be ignored
if err.response.status_code != 409:
raise APIException("Unable to link user", block=True, user_exception=UserException(UserExceptionType.Authorization, intervention_required=True))
return (userId, authorizationData)
def RevokeAuthorization(self, serviceRecord):
self._delete_user(serviceRecord)
def SubscribeToPartialSyncTrigger(self, serviceRecord):
# There is no per-user webhook subscription with Polar Flow.
serviceRecord.SetPartialSyncTriggerSubscriptionState(True)
def UnsubscribeFromPartialSyncTrigger(self, serviceRecord):
# As above.
serviceRecord.SetPartialSyncTriggerSubscriptionState(False)
def DownloadActivityList(self, serviceRecord, exhaustive=False):
activities = []
exclusions = []
logging.info("\tPolar Start DownloadActivityList")
redis_key = "polarflow:webhook:"+str(serviceRecord.ExternalID)
activity_urls_list = redis.lrange(redis_key, 0, -1)
for act_url in activity_urls_list:
# We delete it from the redis list to avoid syncing a second time
# For an strange reason we have to do :
# redis.lrem(key, value)
# Even if redis, redis-py docs and the signature of the function in the container ask to do
# redis.lrem(key, count ,value)
result = redis.lrem(redis_key, act_url)
if result == 0:
logger.warning("Cant delete the activity id from the redis key %s" % (redis_key))
elif result > 1 :
logger.warning("Found more than one time the activity id from the redis key %s" % (redis_key))
response = requests.get(act_url.decode('utf-8')+"/fit", headers=self._api_headers(serviceRecord, {"Accept": "*/*"}))
activity_id = act_url.decode('utf-8').split('/')[-1]
if response.status_code == 404:
# Activity not found
exclusions.append(APIExcludeActivity("Can't find an activity for this user at this URL %s" % act_url.decode('utf-8'), activity_id=activity_id, user_exception=UserException(UserExceptionType.DownloadError)))
logging.warning("Can't find an activity with ID %s for POLARFLOW user ID %s" % (activity_id, serviceRecord.ExternalID))
continue
elif response.status_code == 204:
exclusions.append(APIExcludeActivity("FIT file does not exist for this user at this URL %s" % act_url.decode('utf-8'), activity_id=activity_id, user_exception=UserException(UserExceptionType.DownloadError)))
logging.warning("FIT file does not exist for activity with ID %s for POLARFLOW user ID %s" % (activity_id, serviceRecord.ExternalID))
continue
elif response.status_code == 401 or response.status_code == 403:
raise APIException("%i - No authorization to get activity for the user with POLARFLOW ID '%s' the user's token may have expired or been corrupted" %(response.status_code, serviceRecord.ExternalID), block=True,
user_exception=UserException(UserExceptionType.Authorization,
intervention_required=True))
activity = FITIO.Parse(response.content)
activity.SourceServiceID = self.ID
activity.ServiceData = {"ActivityID": activity_id}
activities.append(activity)
logger.info("Successfully downloaded %i/%i activities for POLARFLOW user ID %s" % (len(activities),len(activity_urls_list),serviceRecord.ExternalID))
return activities, exclusions
def DownloadActivity(self, serviceRecord, activity):
return activity
def DeleteCachedData(self, serviceRecord):
# Nothing to delete
pass
def DeleteActivity(self, serviceRecord, uploadId):
# Not supported
pass
def UploadActivity(self, serviceRecord, activity):
# Not supported
pass
def ExternalIDsForPartialSyncTrigger(self, req):
data = json.loads(req.body.decode("UTF-8"))
# Get user ids to sync
external_user_ids = []
if data.get("event") == "EXERCISE":
# Pushing the callback url in redis that will be used in downloadActivityList
redis.rpush("polarflow:webhook:%s" % data["user_id"], data["url"])
external_user_ids.append(data["user_id"])
return external_user_ids | 42.414729 | 225 | 0.675318 | 1,113 | 10,943 | 6.511231 | 0.329739 | 0.023458 | 0.017387 | 0.023734 | 0.155927 | 0.118256 | 0.112598 | 0.112598 | 0.112598 | 0.112598 | 0 | 0.005567 | 0.228457 | 10,943 | 258 | 226 | 42.414729 | 0.852777 | 0.084712 | 0 | 0.027322 | 0 | 0.005464 | 0.182628 | 0.011508 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081967 | false | 0.016393 | 0.103825 | 0.005464 | 0.338798 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6879d8225f50be2a281d66e38588926ff54a33c8 | 667 | py | Python | deprecated/rcbu/common/duration.py | nloadholtes/python-cloudbackup-sdk | 1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6 | [
"Apache-2.0"
] | 4 | 2015-02-10T14:28:12.000Z | 2016-12-26T22:52:07.000Z | deprecated/rcbu/common/duration.py | nloadholtes/python-cloudbackup-sdk | 1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6 | [
"Apache-2.0"
] | 17 | 2015-01-22T21:58:36.000Z | 2018-01-25T19:47:43.000Z | deprecated/rcbu/common/duration.py | nloadholtes/python-cloudbackup-sdk | 1866e23aaaac41c35be4cb6ab964fcd0ba9a8fe6 | [
"Apache-2.0"
] | 9 | 2015-01-26T19:25:45.000Z | 2018-11-01T20:14:12.000Z | from rcbu.common.assertions import assert_bounded
def seconds(time):
'''Given %H:%M:%S -> seconds. Hours can be arbitrarily large.'''
try:
hours, minutes, seconds = [int(f) for f in time.split(':')]
except ValueError:
msg = 'expecting format %H:%M:%S, not {0}'.format(time)
raise ValueError(msg)
assert_bounded('minutes', 0, 59, minutes)
assert_bounded('seconds', 0, 59, seconds)
return hours * 3600 + minutes * 60 + seconds
def tuple(seconds):
'''Returns (hours, minutes, seconds) from seconds.'''
return (seconds // 3600,
(seconds // 60) - ((seconds // 3600) * 60),
seconds % 60)
| 30.318182 | 68 | 0.602699 | 83 | 667 | 4.807229 | 0.481928 | 0.097744 | 0.015038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053571 | 0.244378 | 667 | 21 | 69 | 31.761905 | 0.738095 | 0.158921 | 0 | 0 | 0 | 0 | 0.089091 | 0 | 0 | 0 | 0 | 0 | 0.214286 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
687cb994d0d82247a6011bd8572dbc8d0da52620 | 420 | py | Python | tests/utils.py | nalind/cekit | f54345bb2c0f38c19adb7b8afa9272b9264591a9 | [
"MIT"
] | 1 | 2018-01-17T16:11:57.000Z | 2018-01-17T16:11:57.000Z | tests/utils.py | nalind/cekit | f54345bb2c0f38c19adb7b8afa9272b9264591a9 | [
"MIT"
] | 39 | 2017-12-12T09:32:33.000Z | 2018-02-27T16:04:48.000Z | tests/utils.py | nalind/cekit | f54345bb2c0f38c19adb7b8afa9272b9264591a9 | [
"MIT"
] | 2 | 2017-12-14T17:10:47.000Z | 2018-01-08T19:16:21.000Z | from collections import OrderedDict
def merge_dicts(*dict_args):
"""
Python 2/3 compatible method to merge dictionaries.
Ref: https://stackoverflow.com/questions/38987/how-to-merge-two-dictionaries-in-a-single-expression
:param dict_args: Dictionaries.
:return: Merged dicts.
"""
result = OrderedDict()
for dictionary in dict_args:
result.update(dictionary)
return result
| 24.705882 | 103 | 0.709524 | 51 | 420 | 5.764706 | 0.686275 | 0.081633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020649 | 0.192857 | 420 | 16 | 104 | 26.25 | 0.846608 | 0.495238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.166667 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68806dc2d88531a995fed01b0b407210770c25e7 | 2,596 | py | Python | bundle-workflow/src/manifests/build_manifest.py | kavilla/opensearch-build | a0f2614140b8c3243609e80010f190baa1f5642b | [
"Apache-2.0"
] | null | null | null | bundle-workflow/src/manifests/build_manifest.py | kavilla/opensearch-build | a0f2614140b8c3243609e80010f190baa1f5642b | [
"Apache-2.0"
] | null | null | null | bundle-workflow/src/manifests/build_manifest.py | kavilla/opensearch-build | a0f2614140b8c3243609e80010f190baa1f5642b | [
"Apache-2.0"
] | null | null | null | # Copyright OpenSearch Contributors.
# SPDX-License-Identifier: Apache-2.0
import yaml
"""
A BuildManifest is an immutable view of the outputs from a build step
The manifest contains information about the product that was built (in the `build` section),
and the components that made up the build in the `components` section.
The format for schema version 1.0 is:
schema-version: 1.0
build:
name: string
version: string
architecture: x64 or arm64
components:
- name: string
repository: URL of git repository
ref: git ref that was built (sha, branch, or tag)
commit_id: The actual git commit ID that was built (i.e. the resolved "ref")
artifacts:
maven:
- maven/relative/path/to/artifact
- ...
plugins:
- plugins/relative/path/to/artifact
- ...
libs:
- libs/relative/path/to/artifact
- ...
- ...
"""
class BuildManifest:
@staticmethod
def from_file(file):
return BuildManifest(yaml.safe_load(file))
def __init__(self, data):
self.version = str(data["schema-version"])
if self.version != "1.0":
raise ValueError(f"Unsupported schema version: {self.version}")
self.build = self.Build(data["build"])
self.components = list(
map(lambda entry: self.Component(entry), data["components"])
)
def to_dict(self):
return {
"schema-version": "1.0",
"build": self.build.to_dict(),
"components": list(
map(lambda component: component.to_dict(), self.components)
),
}
class Build:
def __init__(self, data):
self.name = data["name"]
self.version = data["version"]
self.architecture = data["architecture"]
self.id = data["id"]
def to_dict(self):
return {
"name": self.name,
"version": self.version,
"architecture": self.architecture,
"id": self.id,
}
class Component:
def __init__(self, data):
self.name = data["name"]
self.repository = data["repository"]
self.ref = data["ref"]
self.commit_id = data["commit_id"]
self.artifacts = data["artifacts"]
def to_dict(self):
return {
"name": self.name,
"repository": self.repository,
"ref": self.ref,
"commit_id": self.commit_id,
"artifacts": self.artifacts,
}
| 28.844444 | 92 | 0.560478 | 289 | 2,596 | 4.951557 | 0.314879 | 0.033543 | 0.025157 | 0.031447 | 0.146751 | 0.092243 | 0.092243 | 0.092243 | 0.048917 | 0 | 0 | 0.007986 | 0.32473 | 2,596 | 89 | 93 | 29.168539 | 0.808329 | 0.026965 | 0 | 0.265306 | 0 | 0 | 0.131395 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.020408 | 0.081633 | 0.306122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68860e0d48202e5adaa317f6a7dfe4f9bfdd298b | 29,617 | py | Python | official/vision/beta/projects/assemblenet/modeling/assemblenet_plus.py | ryan0507/20210922 | c66170930e33b63f072d5129235b62a59c5c9564 | [
"Apache-2.0"
] | null | null | null | official/vision/beta/projects/assemblenet/modeling/assemblenet_plus.py | ryan0507/20210922 | c66170930e33b63f072d5129235b62a59c5c9564 | [
"Apache-2.0"
] | null | null | null | official/vision/beta/projects/assemblenet/modeling/assemblenet_plus.py | ryan0507/20210922 | c66170930e33b63f072d5129235b62a59c5c9564 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains definitions for the AssembleNet++ [2] models (without object input).
Requires the AssembleNet++ architecture to be specified in
FLAGS.model_structure (and optionally FLAGS.model_edge_weights). This is
identical to the form described in assemblenet.py for the AssembleNet. Please
check assemblenet.py for the detailed format of the model strings.
AssembleNet++ adds `peer-attention' to the basic AssembleNet, which allows each
conv. block connection to be conditioned differently based on another block [2].
It is a form of channel-wise attention. Note that we learn to apply attention
independently for each frame.
The `peer-attention' implementation in this file is the version that enables
one-shot differentiable search of attention connectivity (Fig. 2 in [2]), using
a softmax weighted summation of possible attention vectors.
[2] Michael S. Ryoo, AJ Piergiovanni, Juhana Kangaspunta, Anelia Angelova,
AssembleNet++: Assembling Modality Representations via Attention
Connections. ECCV 2020
https://arxiv.org/abs/2008.08072
In order to take advantage of object inputs, one will need to set the flag
FLAGS.use_object_input as True, and provide the list of input tensors as an
input to the network, as shown in run_asn_with_object.py. This will require a
pre-processed object data stream.
It uses (2+1)D convolutions for video representations. The main AssembleNet++
takes a 4-D (N*T)HWC tensor as an input (i.e., the batch dim and time dim are
mixed), and it reshapes a tensor to NT(H*W)C whenever a 1-D temporal conv. is
necessary. This is to run this on TPU efficiently.
"""
import functools
import math
from typing import Any, Mapping, List, Callable, Optional
from absl import logging
import numpy as np
import tensorflow as tf
from official.modeling import hyperparams
from official.vision.beta.modeling import factory_3d as model_factory
from official.vision.beta.modeling.backbones import factory as backbone_factory
from official.vision.beta.projects.assemblenet.configs import assemblenet as cfg
from official.vision.beta.projects.assemblenet.modeling import rep_flow_2d_layer as rf
from official.vision.beta.projects.assemblenet.modeling import assemblenet as asn
layers = tf.keras.layers
def softmax_merge_peer_attentions(peers):
"""Merge multiple peer-attention vectors with softmax weighted sum.
Summation weights are to be learned.
Args:
peers: A list of `Tensors` of size `[batch*time, channels]`.
Returns:
The output `Tensor` of size `[batch*time, channels].
"""
data_format = tf.keras.backend.image_data_format()
dtype = peers[0].dtype
assert data_format == 'channels_last'
initial_attn_weights = tf.keras.initializers.TruncatedNormal(stddev=0.01)([len(peers)])
attn_weights = tf.cast(tf.nn.softmax(initial_attn_weights),dtype)
weighted_peers = []
for i,peer in enumerate(peers):
weighted_peers.append(attn_weights[i]*peer)
return tf.add_n(weighted_peers)
def apply_attention(inputs,
attention_mode=None,
attention_in=None,
use_5d_mode=False):
"""Applies peer-attention or self-attention to the input tensor.
Depending on the attention_mode, this function either applies channel-wise
self-attention or peer-attention. For the peer-attention, the function
combines multiple candidate attention vectors (given as attention_in), by
learning softmax-sum weights described in the AssembleNet++ paper. Note that
the attention is applied individually for each frame, which showed better
accuracies than using video-level attention.
Args:
inputs: A `Tensor`. Either 4D or 5D, depending of use_5d_mode.
attention_mode: `str` specifying mode. If not `peer', does self-attention.
attention_in: A list of `Tensors' of size [batch*time, channels].
use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.
Returns:
The output `Tensor` after concatenation.
"""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
if use_5d_mode:
h_channel_loc = 2
else:
h_channel_loc = 1
if attention_mode == 'peer':
attn = softmax_merge_peer_attentions(attention_in)
else:
attn = tf.math.reduce_mean(inputs, [h_channel_loc, h_channel_loc+1])
attn = tf.keras.layers.Dense(
units=inputs.shape[-1],
kernel_initializer=tf.random_normal_initializer(stddev=.01))(
inputs=attn)
attn = tf.math.sigmoid(attn)
channel_attn = tf.expand_dims(tf.expand_dims(attn, h_channel_loc), h_channel_loc)
inputs = tf.math.multiply(inputs, channel_attn)
return inputs
class _ApplyEdgeWeight(layers.Layer):
"""Multiply weight on each input tensor.
A weight is assigned for each connection (i.e., each input tensor). This layer
is used by the fusion_with_peer_attention to compute the weighted inputs.
"""
def __init__(self,
weights_shape,
index: int = None,
attention_mode: str = None,
attention_in: tf.Tensor = None,
use_5d_mode: bool = False,
model_edge_weights: Optional[List[Any]] = None,
num_object_classes: int = None, #todo: newly added - check https://github.com/google-research/google-research/blob/bc7791a7770ce3466fe8df84bec65fed0b77ecb8/assemblenet/run_asn_with_object.py#L57
**kwargs):
"""Constructor.
Args:
inputs: A list of `Tensors`. Either 4D or 5D, depending of use_5d_mode.
index: `int` index of the block within the AssembleNet architecture. Used
for summation weight initial loading.
attention_mode: `str` specifying mode. If not `peer', does self-attention.
attention_in: A list of `Tensors' of size [batch*time, channels].
use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.
model_edge_weights: AssembleNet model structure connection weights in the
string format.
**kwargs: pass through arguments.
Returns:
The output `Tensor` after concatenation.
"""
super(_ApplyEdgeWeight, self).__init__(**kwargs)
self._weights_shape = weights_shape
self._index = index
self._attention_mode = attention_mode
self._attention_in = attention_in
self._use_5d_mode = use_5d_mode
self._model_edge_weights = model_edge_weights
self._num_object_classes = num_object_classes
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
def get_config(self):
config = {
'weights_shape': self._weights_shape,
'index': self._index,
'attention_mode': self._attention_mode,
'attention_in' : self._attention_in,
'use_5d_mode': self._use_5d_mode,
'model_edge_weights': self._model_edge_weights,
'num_object_classes' : self._num_object_classes
}
base_config = super(_ApplyEdgeWeight, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def build(self, input_shape: tf.TensorShape):
if self._weights_shape[0] == 1:
self._edge_weights = 1.0
return
if self._index is None or not self._model_edge_weights:
self._edge_weights = self.add_weight(
shape=self._weights_shape,
initializer=tf.keras.initializers.TruncatedNormal(
mean=0.0, stddev=0.01),
trainable=True,
name='agg_weights')
else:
initial_weights_after_sigmoid = np.asarray(
self._model_edge_weights[self._index][0]).astype('float32')
# Initial_weights_after_sigmoid is never 0, as the initial weights are
# based the results of a successful connectivity search.
initial_weights = -np.log(1. / initial_weights_after_sigmoid - 1.)
self._edge_weights = self.add_weight(
shape=self._weights_shape,
initializer=tf.constant_initializer(initial_weights),
trainable=False,
name='agg_weights')
def call(self,
inputs: List[tf.Tensor],
training: bool = None) -> Mapping[Any, List[tf.Tensor]]:
use_5d_mode = self._use_5d_mode
dtype = inputs[0].dtype
assert len(inputs) > 1
if use_5d_mode:
h_channel_loc = 2
else:
h_channel_loc = 1
# get smallest spatial size and largest channels
sm_size = [10000, 10000]
lg_channel = 0
for inp in inputs:
# assume batch X height x width x channels
sm_size[0] = min(sm_size[0], inp.shape[h_channel_loc])
sm_size[1] = min(sm_size[1], inp.shape[h_channel_loc + 1])
# Note that, when using object inputs, object channel sizes are usually big.
# Since we do not want the object channel size to increase the number of
# parameters for every fusion, we exclude it when computing lg_channel.
if inp.shape[-1] > lg_channel and inp.shape[-1] != self._num_object_classes: # pylint: disable=line-too-long
lg_channel = inp.shape[3]
# loads or creates weight variables to fuse multiple inputs
weights = tf.math.sigmoid(tf.cast(self._edge_weights, dtype))
# Compute weighted inputs. We group inputs with the same channels.
per_channel_inps = dict({0: []})
for i, inp in enumerate(inputs):
if inp.shape[h_channel_loc] != sm_size[0] or inp.shape[h_channel_loc + 1] != sm_size[1]: # pylint: disable=line-too-long
assert sm_size[0] != 0
ratio = (inp.shape[h_channel_loc] + 1) // sm_size[0]
if use_5d_mode:
inp = tf.keras.layers.MaxPool3D([1, ratio, ratio], [1, ratio, ratio],
padding='same')(
inp)
else:
inp = tf.keras.layers.MaxPool2D([ratio, ratio], ratio,
padding='same')(
inp)
weights = tf.cast(weights, inp.dtype)
if inp.shape[-1] in per_channel_inps:
per_channel_inps[inp.shape[-1]].append(weights[i] * inp)
else:
per_channel_inps.update({inp.shape[-1]: [weights[i] * inp]})
# Implementation of connectivity with peer-attention
if self._attention_mode:
for key, channel_inps in per_channel_inps.items():
for idx in range(len(channel_inps)):
with tf.name_scope('Connection_' + str(key) + '_' + str(idx)):
channel_inps[idx] = apply_attention(channel_inps[idx],
self._attention_mode,
self._attention_in,
self._use_5d_mode)
return per_channel_inps
def fusion_with_peer_attention(inputs: List[tf.Tensor],
index: int = None,
attention_mode=None,
attention_in=None,
use_5d_mode: bool = False,
model_edge_weights: Optional[List[Any]] = None,
num_object_classes: int = None): # todo: newly added - check https://github.com/google-research/google-research/blob/bc7791a7770ce3466fe8df84bec65fed0b77ecb8/assemblenet/run_asn_with_object.py#L57
"""Weighted summation of multiple tensors, while using peer-attention.
Summation weights are to be learned. Uses spatial max pooling and 1x1 conv.
to match their sizes. Before the summation, each connection (i.e., each input)
itself is scaled with channel-wise peer-attention. Notice that attention is
applied for each connection, conditioned based on attention_in.
Args:
inputs: A list of `Tensors`. Either 4D or 5D, depending of use_5d_mode.
index: `int` index of the block within the AssembleNet architecture. Used
for summation weight initial loading.
attention_mode: `str` specifying mode. If not `peer', does self-attention.
attention_in: A list of `Tensors' of size [batch*time, channels].
use_5d_mode: `bool` indicating whether the inputs are in 5D tensor or 4D.
model_edge_weights: AssembleNet model structure connection weights in the
string format.
Returns:
The output `Tensor` after concatenation.
"""
if use_5d_mode:
h_channel_loc = 2
conv_function = asn.conv3d_same_padding
else:
h_channel_loc = 1
conv_function = asn.conv2d_fixed_padding
# If only 1 input.
if len(inputs) == 1:
inputs[0] = apply_attention(inputs[0],
attention_mode,
attention_in,
use_5d_mode)
return inputs[0]
# get smallest spatial size and largest channels
sm_size = [10000, 10000]
lg_channel = 0
for inp in inputs:
# assume batch X height x width x channels
sm_size[0] = min(sm_size[0], inp.shape[h_channel_loc])
sm_size[1] = min(sm_size[1], inp.shape[h_channel_loc + 1])
# Note that, when using object inputs, object channel sizes are usually big.
# Since we do not want the object channel size to increase the number of
# parameters for every fusion, we exclude it when computing lg_channel.
if inp.shape[-1] > lg_channel and inp.shape[-1] != num_object_classes: # pylint: disable=line-too-long
lg_channel = inp.shape[3]
per_channel_inps = _ApplyEdgeWeight(
weights_shape=[len(inputs)],
index=index,
attention_mode=attention_mode,
attention_in=attention_in,
use_5d_mode=use_5d_mode,
model_edge_weights=model_edge_weights)(
inputs)
# Adding 1x1 conv layers (to match channel size) and fusing all inputs.
# We add inputs with the same channels first before applying 1x1 conv to save
# memory.
inps = []
for key, channel_inps in per_channel_inps.items():
if len(channel_inps) < 1:
continue
if len(channel_inps) == 1:
if key == lg_channel:
inp = channel_inps[0]
else:
inp = conv_function(
channel_inps[0], lg_channel, kernel_size=1, strides=1)
inps.append(inp)
else:
if key == lg_channel:
inp = tf.add_n(channel_inps)
else:
inp = conv_function(
channel_inps[0], lg_channel, kernel_size=1, strides=1)
inps.append(inp)
return tf.add_n(inps)
class FusionWithPeerAttention(tf.keras.layers.Layer):
def __init__(self, index, attention_mode, use_5d_mode, **kwargs):
self.index = index
self.attention_mode = attention_mode
self.use_5d_mode = use_5d_mode
super().__init__(**kwargs)
def call(self, inputs, training = None):
return fusion_with_peer_attention(inputs[0], self.index,
self.attention_mode,inputs[1], self.use_5d_mode)
def mock_fusion_with_peer_attention(inputs: List[tf.Tensor],
index: int = None,
attention_mode = None,
attention_in = None,
use_5d_mode : bool = False,
model_edge_weights: Optional[List[Any]] =None,
num_object_classes: int = None):
outputs = FusionWithPeerAttention(index, attention_mode, use_5d_mode)([inputs, attention_in])
return outputs
def object_conv_stem(inputs):
"""Layers for an object input stem.
It expects its input tensor to have a separate channel for each object class.
Args:
inputs: A `Tensor`.
Returns:
The output `Tensor`.
"""
inputs = tf.keras.layers.MaxPool2D(
pool_size=4, strides=4, padding='SAME')(
inputs=inputs)
inputs = tf.identity(inputs, 'initial_max_pool')
return inputs
class AssembleNetPlus(tf.keras.Model):
"""AssembleNet++ backbone."""
def __init__(
self,
block_fn,
num_blocks: List[int],
num_frames: int,
model_structure: List[Any],
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, None, None, 3]),
model_edge_weights: Optional[List[Any]] = None,
bn_decay: float = rf.BATCH_NORM_DECAY,
bn_epsilon: float = rf.BATCH_NORM_EPSILON,
use_sync_bn: bool = False,
use_object_input: bool = False, #todo: newly added - doc later
attention_mode: str = 'peer', #todo: newly added - doc later
**kwargs):
"""Generator for AssembleNet++ models.
Args:
block_fn: `function` for the block to use within the model. Currently only
has `bottleneck_block_interleave as its option`.
num_blocks: list of 4 `int`s denoting the number of blocks to include in
each of the 4 block groups. Each group consists of blocks that take
inputs of the same resolution.
num_frames: the number of frames in the input tensor.
model_structure: AssembleNetPlus model structure in the string format.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
todo: add description on dimensionality of input_specs 'tuple'
Dimension should be `[batch*time, height, width, channels]`.
model_edge_weights: AssembleNet model structure connection weights in the
string format.
bn_decay: `float` batch norm decay parameter to use.
bn_epsilon: `float` batch norm epsilon parameter to use.
use_sync_bn: use synchronized batch norm for TPU.
use_object_input : 'bool' values whether using object inputs
attention_mode : 'str' , default 'self', 'peer'
**kwargs: pass through arguments.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the AssembleNet model.
"""
data_format = tf.keras.backend.image_data_format()
# Creation of the model graph.
logging.info('model_structure=%r', model_structure)
logging.info('model_structure=%r', model_structure)
logging.info('model_edge_weights=%r', model_edge_weights)
structure = model_structure
if use_object_input:
original_inputs = tf.keras.Input(shape=input_specs[0].shape[1:])
object_inputs = tf.keras.Input(shape=input_specs[1].shape[1:])
input_specs = input_specs[0]
else:
original_inputs = tf.keras.Input(shape=input_specs.shape[1:])
object_inputs = None
original_num_frames = num_frames
assert num_frames > 0, f'Invalid num_frames {num_frames}'
grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}
for i in range(len(structure)):
grouping[structure[i][0]].append(i)
stem_count = len(grouping[-3]) + len(grouping[-2]) + len(grouping[-1])
assert stem_count != 0
stem_filters = 128 // stem_count
if len(input_specs.shape) == 5:
first_dim = (
input_specs.shape[0] * input_specs.shape[1]
if input_specs.shape[0] and input_specs.shape[1] else -1)
reshape_inputs = tf.reshape(original_inputs, (first_dim,) + input_specs.shape[2:])
elif len(input_specs.shape) == 4:
reshape_inputs = original_inputs
else:
raise ValueError(
f'Expect input spec to be 4 or 5 dimensions {input_specs.shape}')
if grouping[-2]:
# Instead of loading optical flows as inputs from data pipeline, we are
# applying the "Representation Flow" to RGB frames so that we can compute
# the flow within TPU/GPU on fly. It's essentially optical flow since we
# do it with RGBs.
axis = 3 if data_format == 'channels_last' else 1
flow_inputs = rf.RepresentationFlow(
original_num_frames,
depth=reshape_inputs.shape.as_list()[axis],
num_iter=40,
bottleneck=1)(
reshape_inputs)
streams = []
for i in range(len(structure)):
with tf.name_scope('Node_' + str(i)):
if structure[i][0] == -1:
inputs = asn.rgb_conv_stem(
reshape_inputs,
original_num_frames,
stem_filters,
temporal_dilation=structure[i][1],
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
streams.append(inputs)
elif structure[i][0] == -2:
inputs = asn.flow_conv_stem(
flow_inputs,
stem_filters,
temporal_dilation=structure[i][1],
bn_decay=bn_decay,
bn_epsilon=bn_epsilon,
use_sync_bn=use_sync_bn)
streams.append(inputs)
elif structure[i][0] == -3:
# In order to use the object inputs, you need to feed your object
# input tensor here.
inputs = object_conv_stem(object_inputs)
streams.append(inputs)
else:
block_number = structure[i][0]
combined_inputs = [streams[structure[i][1][j]]
for j in range(0, len(structure[i][1]))]
logging.info(grouping)
nodes_below = []
for k in range(-3, structure[i][0]):
nodes_below = nodes_below + grouping[k]
peers = []
if attention_mode:
lg_channel = -1
logging.info(nodes_below)
for k in nodes_below:
logging.info(streams[k].shape)
lg_channel = max(streams[k].shape[3], lg_channel)
for node_index in nodes_below:
attn = tf.reduce_mean(streams[node_index], [1,2])
attn = tf.keras.layers.Dense(
units=lg_channel,
kernel_initializer=tf.random_normal_initializer(stddev=.01))(
inputs=attn)
peers.append(attn)
combined_inputs = mock_fusion_with_peer_attention(
combined_inputs,
index = i,
attention_mode = attention_mode,
attention_in = peers,
use_5d_mode= False)
graph = asn.block_group(
inputs=combined_inputs,
filters=structure[i][2],
block_fn=block_fn,
blocks=num_blocks[block_number],
strides=structure[i][4],
name='block_group' + str(i),
block_level=structure[i][0],
num_frames=num_frames,
temporal_dilation=structure[i][3])
streams.append(graph)
if use_object_input:
inputs = [original_inputs, object_inputs]
else:
inputs = original_inputs
super(AssembleNetPlus, self).__init__(
inputs=inputs, outputs=streams, **kwargs)
@tf.keras.utils.register_keras_serializable(package='Vision')
class AssembleNetPlusModel(tf.keras.Model):
"""An AssembleNet++ model builder."""
def __init__(self,
backbone,
num_classes,
num_frames: int,
model_structure: List[Any],
input_specs: Optional[Mapping[str,
tf.keras.layers.InputSpec]] = None,
max_pool_predictions: bool = False,
use_object_input : bool = False,
**kwargs):
if not input_specs:
input_specs = {
'image': layers.InputSpec(shape=[None, None, None, None, 3])
}
if use_object_input and 'object' not in input_specs:
input_specs['object'] = layers.InputSpec(shape=[None, None, None, None])
self._self_setattr_tracking = False
self._config_dict = {
'backbone': backbone,
'num_classes': num_classes,
'num_frames': num_frames,
'input_specs': input_specs,
'model_structure': model_structure,
}
self._input_specs = input_specs
self._backbone = backbone
grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}
for i in range(len(model_structure)):
grouping[model_structure[i][0]].append(i)
inputs = {
k: tf.keras.Input(shape=v.shape[1:]) for k, v in input_specs.items()
}
if use_object_input:
streams = self._backbone(inputs=[inputs['image'], inputs['object']])
else:
streams = self._backbone(inputs=inputs['image'])
outputs = asn.multi_stream_heads(
streams,
grouping[3],
num_frames,
num_classes,
max_pool_predictions=max_pool_predictions)
super(AssembleNetPlusModel, self).__init__(
inputs=inputs, outputs=outputs, **kwargs)
@property
def checkpoint_items(self):
"""Returns a dictionary of items to be additionally checkpointed."""
return dict(backbone=self.backbone)
@property
def backbone(self):
return self._backbone
def get_config(self):
return self._config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
def assemblenet_plus(assemblenet_depth: int,
num_classes: int,
num_frames: int,
model_structure: List[Any],
input_specs: layers.InputSpec = layers.InputSpec(
shape=[None, None, None, None, 3]),
model_edge_weights: Optional[List[Any]] = None,
use_object_input: bool = False, # todo: newly added - doc later
attention_mode: str = None, # todo: newly added - doc later
max_pool_predictions: bool = False,
**kwargs):
"""Returns the AssembleNet++ model for a given size and number of output classes."""
data_format = tf.keras.backend.image_data_format()
assert data_format == 'channels_last'
if assemblenet_depth not in asn.ASSEMBLENET_SPECS:
raise ValueError('Not a valid assemblenet_depth:', assemblenet_depth)
if use_object_input: #todo: assuming input_specs = [vid, obj] when use_object_input = True
input_specs_dict = {'image': input_specs[0], 'object': input_specs[1]}
else:
input_specs_dict = {'image': input_specs}
params = asn.ASSEMBLENET_SPECS[assemblenet_depth]
backbone = AssembleNetPlus(
block_fn=params['block'],
num_blocks=params['num_blocks'],
num_frames=num_frames,
model_structure=model_structure,
input_specs=input_specs,
model_edge_weights=model_edge_weights,
use_object_input=use_object_input,
attention_mode=attention_mode,
**kwargs)
return AssembleNetPlusModel( #todo: clean up unnecessary/duplicate parameters
backbone,
num_classes=num_classes,
num_frames=num_frames,
model_structure=model_structure,
input_specs=input_specs_dict,
use_object_input=use_object_input,
max_pool_predictions=max_pool_predictions,
**kwargs)
@backbone_factory.register_backbone_builder('assemblenet_plus')
def build_assemblenet_plus(
input_specs: tf.keras.layers.InputSpec,
backbone_config: hyperparams.Config,
norm_activation_config: hyperparams.Config,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None
) -> tf.keras.Model:
"""Builds assemblenet++ backbone."""
del l2_regularizer
backbone_type = backbone_config.type
backbone_cfg = backbone_config.get()
assert backbone_type == 'assemblenet_plus'
assemblenet_depth = int(backbone_cfg.model_id)
if assemblenet_depth not in asn.ASSEMBLENET_SPECS:
raise ValueError('Not a valid assemblenet_depth:', assemblenet_depth)
model_structure, model_edge_weights = cfg.blocks_to_flat_lists(
backbone_cfg.blocks)
params = asn.ASSEMBLENET_SPECS[assemblenet_depth]
block_fn = functools.partial(
params['block'],
use_sync_bn=norm_activation_config.use_sync_bn,
bn_decay=norm_activation_config.norm_momentum,
bn_epsilon=norm_activation_config.norm_epsilon)
backbone = AssembleNetPlus(
block_fn=block_fn,
num_blocks=params['num_blocks'],
num_frames=backbone_cfg.num_frames,
model_structure=model_structure,
input_specs=input_specs,
model_edge_weights=model_edge_weights,
use_object_input= backbone_cfg.use_object_input,
attention_mode=backbone_cfg.attention_mode,
use_sync_bn=norm_activation_config.use_sync_bn,
bn_decay=norm_activation_config.norm_momentum,
bn_epsilon=norm_activation_config.norm_epsilon)
logging.info('Number of parameters in AssembleNet++ backbone: %f M.',
backbone.count_params() / 10.**6)
return backbone
@model_factory.register_model_builder('assemblenet_plus')
def build_assemblenet_plus_model(
input_specs: tf.keras.layers.InputSpec,
model_config: cfg.AssembleNetPlusModel,
num_classes: int,
l2_regularizer: Optional[tf.keras.regularizers.Regularizer] = None):
"""Builds assemblenet++ model."""
input_specs_dict = {'image': input_specs}
backbone = build_assemblenet_plus(input_specs, model_config.backbone,
model_config.norm_activation, l2_regularizer)
backbone_cfg = model_config.backbone.get()
model_structure, _ = cfg.blocks_to_flat_lists(backbone_cfg.blocks)
model = AssembleNetPlusModel(
backbone,
num_classes=num_classes,
num_frames=backbone_cfg.num_frames,
model_structure=model_structure,
input_specs=input_specs_dict,
max_pool_predictions=model_config.max_pool_predictions,
use_object_input=model_config.use_object_input)
return model
| 39.227815 | 227 | 0.668299 | 3,879 | 29,617 | 4.880639 | 0.145914 | 0.024297 | 0.014262 | 0.009508 | 0.443376 | 0.3867 | 0.32791 | 0.307574 | 0.277678 | 0.256603 | 0 | 0.014144 | 0.240875 | 29,617 | 754 | 228 | 39.279841 | 0.827914 | 0.293345 | 0 | 0.40121 | 0 | 0 | 0.034245 | 0.001022 | 0 | 0 | 0 | 0.006631 | 0.018145 | 1 | 0.040323 | false | 0 | 0.024194 | 0.008065 | 0.106855 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68881b0298541b78d2a28af9b658ec19e29062b5 | 2,823 | py | Python | mono/mono_packet_conversation.py | Jalv/Mono_amp | 17eeefd899d4bfd7db6fac29ae0245d31ef545af | [
"MIT"
] | null | null | null | mono/mono_packet_conversation.py | Jalv/Mono_amp | 17eeefd899d4bfd7db6fac29ae0245d31ef545af | [
"MIT"
] | null | null | null | mono/mono_packet_conversation.py | Jalv/Mono_amp | 17eeefd899d4bfd7db6fac29ae0245d31ef545af | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 15:00:17 2017
@author: robert
"""
import sys
import logging
if sys.version_info > (3, 3):
from pymysql import cursors
else:
from MySQLdb import cursors
from . import mono_tools
#API to deal with the PACKET_CONVERSATION table
#remove all the packet_conversation lines in the table with the given id_session
def remove_packetconversations(id_session, db):
l = logging.getLogger("mono_packet_conversation")
l.info("remove all packet_conversation from session %d"%(id_session,))
cursor = db.cursor()
try:
sql = "DELETE FROM PACKETS_CONVERSATIONS WHERE id_session = %s"
cursor.execute(sql, (id_session, ))
db.commit()
except Exception as e:
mono_tools.handle_db_exception(e, db, cursor)
raise
#t
#returns all packets from conversation
def get_packets_from_conversation(id_conversation, conv_type, db):
cursor = db.cursor(cursors.DictCursor)
try:
sql = "SELECT * FROM PACKETS_CONVERSATIONS pc INNER JOIN PACKETS p ON pc.id_packet = p.id_packet\
WHERE pc.id_conversation = %s AND pc.conversation_type=%s "
cursor.execute(sql, (id_conversation, conv_type))
db.commit()
return cursor.fetchall()
except Exception as e:
mono_tools.handle_db_exception(e, db, cursor)
raise
#returns the packet_conversation id of the newly inserted row
def add_packetconversation(id_session, id_packet, id_conv, conv_type, db):
cursor = db.cursor()
try:
sql = "INSERT INTO PACKETS_CONVERSATIONS (id_session, id_packet, id_conversation, conversation_type) "
sql += "VALUES (%s,%s,%s,%s) "
cursor.execute(sql, (id_session, id_packet, id_conv, conv_type))
db.commit()
return cursor.lastrowid
except Exception as e:
mono_tools.handle_db_exception(e, db, cursor)
raise
def get_packetconversation(id_pc, db):
cursor = db.cursor(cursors.DictCursor)
try:
sql = "SELECT * FROM PACKETS_CONVERSATIONS WHERE id_pc=%s "
cursor.execute(sql, (id_pc, ))
result = cursor.fetchone()
db.commit()
return result
except Exception as e:
mono_tools.handle_db_exception(e, db, cursor)
raise
#helper function (for test only)
def remove_packetconversation(id_packet_conversation, db):
cursor = db.cursor()
l = logging.getLogger("mono_packet_conversation")
l.debug("Remove packet_conversation with id "+str(id_packet_conversation))
try:
sql = "DELETE FROM PACKETS_CONVERSATIONS WHERE id_pc=%s "
cursor.execute(sql, (id_packet_conversation,))
db.commit()
except Exception as e:
mono_tools.handle_db_exception(e, db, cursor)
raise
#Call the functions where needed
| 32.825581 | 111 | 0.687566 | 381 | 2,823 | 4.902887 | 0.275591 | 0.059957 | 0.037473 | 0.045503 | 0.492505 | 0.447537 | 0.388116 | 0.345289 | 0.315846 | 0.280514 | 0 | 0.00726 | 0.21927 | 2,823 | 85 | 112 | 33.211765 | 0.84029 | 0.134963 | 0 | 0.516129 | 0 | 0 | 0.164536 | 0.054433 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080645 | false | 0 | 0.080645 | 0 | 0.209677 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
689141451ddc5b8c9ee08d2415236492e4cace5e | 7,253 | py | Python | phonemizer/phonemize.py | mipuc/IMS-Toucan | 51c4090369b118d77b998961d788802a62411867 | [
"Apache-2.0"
] | null | null | null | phonemizer/phonemize.py | mipuc/IMS-Toucan | 51c4090369b118d77b998961d788802a62411867 | [
"Apache-2.0"
] | null | null | null | phonemizer/phonemize.py | mipuc/IMS-Toucan | 51c4090369b118d77b998961d788802a62411867 | [
"Apache-2.0"
] | 1 | 2021-11-26T12:45:04.000Z | 2021-11-26T12:45:04.000Z | # Copyright 2015-2021 Mathieu Bernard
#
# This file is part of phonemizer: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Phonemizer is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with phonemizer. If not, see <http://www.gnu.org/licenses/>.
"""Provides the phonemize function
To use it in your own code, type:
from phonemizer import phonemize
"""
import sys
from phonemizer.logger import get_logger
from phonemizer.separator import default_separator
from phonemizer.backend import (
EspeakBackend, EspeakMbrolaBackend, FestivalBackend, SegmentsBackend)
from phonemizer.punctuation import Punctuation
def phonemize(
text,
language='at',
backend='festival',
separator=default_separator,
strip=False,
preserve_punctuation=False,
punctuation_marks=Punctuation.default_marks(),
with_stress=False,
language_switch='keep-flags',
njobs=1,
logger=get_logger()):
"""Multilingual text to phonemes converter
Return a phonemized version of an input `text`, given its
`language` and a phonemization `backend`.
Parameters
----------
text (str or list of str): The text to be phonemized. Any empty
line will be ignored. If `text` is an str, it can be multiline
(lines being separated by \n). If `text` is a list, each
element is considered as a separated line. Each line is
considered as a text utterance.
language (str): The language code of the input text, must be
supported by the backend. If `backend` is 'segments', the
language can be a file with a grapheme to phoneme mapping.
backend (str): The software backend to use for phonemization, must
be 'festival' (US English only is supported, coded 'en-us'),
'espeak', 'espeak-mbrola' or 'segments'.
separator (Separator): string separators between phonemes, syllables and
words, default to separator.default_separator. Syllable separator is
considered only for the festival backend. Word separator is ignored by
the 'espeak-mbrola' backend.
strip (bool): If True, don't output the last word and phone
separators of a token, default to False.
preserve_punctuation (bool): When True, will keep the punctuation in the
phonemized output. Not supported by the 'espeak-mbrola' backend.
Default to False and remove all the punctuation.
punctuation_marks (str): The punctuation marks to consider when dealing
with punctuation, either for removal or preservation. Default to
Punctuation.default_marks().
with_stress (bool): This option is only valid for the 'espeak' backend.
When True the stresses on phonemes are present (stresses characters are
ˈ'ˌ). When False stresses are removed. Default to False.
language_switch (str): Espeak can output some words in another language
(typically English) when phonemizing a text. This option setups the
policy to use when such a language switch occurs. Three values are
available: 'keep-flags' (the default), 'remove-flags' or
'remove-utterance'. The 'keep-flags' policy keeps the language switching
flags, for example "(en) or (jp)", in the output. The 'remove-flags'
policy removes them and the 'remove-utterance' policy removes the whole
line of text including a language switch. This option is only valid for
the 'espeak' backend.
njobs (int): The number of parallel jobs to launch. The input text
is split in `njobs` parts, phonemized on parallel instances of
the backend and the outputs are finally collapsed.
logger (logging.Logger): the logging instance where to send
messages. If not specified, use the default system logger.
Returns
-------
phonemized text (str or list of str) : The input `text` phonemized
for the given `language` and `backend`. The returned value has
the same type of the input text (either a list or a string).
Raises
------
RuntimeError if the `backend` is not valid or is valid but not installed,
if the `language` is not supported by the `backend`, if with_stress` or
`language_switch` are used but the backend is not 'espeak'.
"""
# ensure the backend is either espeak, festival or segments
if backend not in ('espeak', 'espeak-mbrola', 'festival', 'segments'):
raise RuntimeError(
'{} is not a supported backend, choose in {}.'
.format(backend, ', '.join(
('espeak', 'espeak-mbrola', 'festival', 'segments'))))
# with_stress option only valid for espeak
if with_stress and backend != 'espeak':
raise RuntimeError(
'the "with_stress" option is available for espeak backend only, '
'but you are using {} backend'.format(backend))
# language_switch option only valid for espeak
if (
language_switch != 'keep-flags'
and backend not in ('espeak', 'espeak-mbrola')
):
raise RuntimeError(
'the "language_switch" option is available for espeak backend '
'only, but you are using {} backend'.format(backend))
# preserve_punctuation and word separator not valid for espeak-mbrola
if backend == 'espeak-mbrola' and preserve_punctuation:
logger.warning('espeak-mbrola backend cannot preserve punctuation')
if backend == 'espeak-mbrola' and separator.word:
logger.warning('espeak-mbrola backend cannot preserve word separation')
# python2 needs additional utf8 encoding
if sys.version_info[0] == 2: # pragma: nocover
logger.warning(
'Your are using python2 but unsupported by the phonemizer, '
'please update to python>=3.6')
# instanciate the requested backend for the given language (raises
# a RuntimeError if the language is not supported).
backends = {b.name(): b for b in (
EspeakBackend, FestivalBackend, SegmentsBackend, EspeakMbrolaBackend)}
if backend == 'espeak':
phonemizer = backends[backend](
language,
punctuation_marks=punctuation_marks,
preserve_punctuation=preserve_punctuation,
with_stress=with_stress,
language_switch=language_switch,
logger=logger)
elif backend == 'espeak-mbrola':
phonemizer = backends[backend](
language,
logger=logger)
else: # festival or segments
phonemizer = backends[backend](
language,
punctuation_marks=punctuation_marks,
preserve_punctuation=preserve_punctuation,
logger=logger)
# phonemize the input text
#print(text)
return phonemizer.phonemize(
text, separator=separator, strip=strip, njobs=njobs)
| 40.977401 | 79 | 0.685234 | 929 | 7,253 | 5.308934 | 0.273412 | 0.029197 | 0.012165 | 0.011557 | 0.21472 | 0.158962 | 0.11395 | 0.08678 | 0.08678 | 0.07056 | 0 | 0.003087 | 0.240728 | 7,253 | 176 | 80 | 41.210227 | 0.8925 | 0.617262 | 0 | 0.25 | 0 | 0 | 0.215486 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015625 | false | 0 | 0.078125 | 0 | 0.109375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68915201777af49dc63e93bc7ec27c993f24337e | 9,470 | py | Python | src/arm/plc4trucksduck_host.py | TruckHacking/plc4trucksduck | 17b9c18ae8363eab5246d70c17b8f2527b4de559 | [
"MIT"
] | 4 | 2021-08-15T23:10:52.000Z | 2022-02-21T05:16:49.000Z | src/arm/plc4trucksduck_host.py | TruckHacking/plc4trucksduck | 17b9c18ae8363eab5246d70c17b8f2527b4de559 | [
"MIT"
] | 2 | 2021-02-11T19:59:33.000Z | 2021-03-26T21:02:20.000Z | src/arm/plc4trucksduck_host.py | TruckHacking/plc4trucksduck | 17b9c18ae8363eab5246d70c17b8f2527b4de559 | [
"MIT"
] | 1 | 2020-12-06T04:04:47.000Z | 2020-12-06T04:04:47.000Z | #!/usr/bin/env python2
# PLC4TRUCKSDuck (c) 2020 National Motor Freight Traffic Association
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import print_function
import mmap
import pypruss # available only in python 2
import select
import signal
import socket
import struct
import sys
import threading
import time
import bitstring
TARGET_PRU_FW = 'plc4trucksduck.bin'
TARGET_PRU_NO = 0
UDP_PORTS = (6971, 6972)
DDR_START = 0x10000000 # 256MiB
DDR_VADDR = 0x4a300000
DDR_SIZE = pypruss.ddr_size()
DDR_END = DDR_START + DDR_SIZE
if TARGET_PRU_NO == 0:
TARGET_PRU_INTERRUPT = pypruss.PRU0_ARM_INTERRUPT
TARGET_PRU_PRE_SIZE = 0
else:
TARGET_PRU_INTERRUPT = pypruss.PRU1_ARM_INTERRUPT
TARGET_PRU_PRE_SIZE = 8192
SHARED_ADDR = DDR_VADDR + TARGET_PRU_PRE_SIZE
SHARED_OFFSET = SHARED_ADDR - DDR_START
SHARED_FILELEN = DDR_SIZE + DDR_START
RX_PAYLOAD_LEN = 4 # must match the same in plc4trucksduck.c
RX_RING_BUFFER_LEN = 4 # must match the same in plc4trucksduck.c
RX_FRAME_SIZE = 5 # must match the same in plc4trucksduck.c
RX_RING_BUFFER_CONSUME_OFFSET = 4 # must match the same in plc4trucksduck.c
RX_RING_BUFFER_FRAMES_OFFSET = 8 # must match the same in plc4trucksduck.c
TX_PAYLOAD_LEN = 321 # must match the same in plc4trucksduck.c
TX_RING_BUFFER_LEN = 4 # must match the same in plc4trucksduck.c
TX_FRAME_SIZE = 324 # must match the same in plc4trucksduck.c
TX_RING_BUFFER_CONSUME_OFFSET = 4 # must match the same in plc4trucksduck.c
TX_RING_BUFFER_FRAMES_OFFSET = 8 # must match the same in plc4trucksduck.c
TX_FRAME_BIT_LEN_OFFSET = 0 # must match the same in plc4trucksduck.c
TX_FRAME_PREAMBLE_OFFSET = 2 # must match the same in plc4trucksduck.c
TX_FRAME_PAYLOAD_OFFSET = 3 # must match the same in plc4trucksduck.c
RX_RING_BUFFER_VADDR_OFFSET = 0 # must match the same in plc4trucksduck.c
RX_RING_BUFFER_SIZE = 28 # must match the same in plc4trucksduck.c
TX_RING_BUFFER_VADDR_OFFSET = 28 # must match the same in plc4trucksduck.c
MAX_PAYLOAD_SIZE = 255 # corresponds to 321 special bits payload bytes above
def get_special_preamble_bits(preamble_mid):
mid_bits = bitstring.BitArray(bytes=preamble_mid)
return mid_bits
def get_special_payload_bits(payload):
payload_bits = bitstring.BitArray()
for b_int in bytes(payload): # assumes the checksum byte is _in_ `payload`
b_bits = bitstring.BitArray(bytes=b_int)
b_bits.reverse()
payload_bits.append(bitstring.ConstBitArray(bin='0')) # start bit
payload_bits.append(b_bits) # bit-reversed byte
payload_bits.append(bitstring.ConstBitArray(bin='1')) # stop bit
return payload_bits
class PRU_read_thread(threading.Thread):
def __init__(self, stopped, socket, ddr_mem):
super(PRU_read_thread, self).__init__()
self.ddr_mem = ddr_mem
self.struct_start = DDR_START + RX_RING_BUFFER_VADDR_OFFSET
self.frames_base = self.struct_start + RX_RING_BUFFER_FRAMES_OFFSET
self.frames_ptr = self.frames_base
self.calls = 0
self.socket = socket
self.stopped = stopped
def kill_me(self):
self.stopped.set()
def join(self, timeout=None):
super(PRU_read_thread, self).join(timeout)
data = self.ddr_mem[DDR_START:DDR_START + RX_RING_BUFFER_SIZE]
msg = map(lambda x: "{:02x}".format(ord(x)), data)
for i in range(8, len(msg), RX_FRAME_SIZE):
print(",".join(msg[i:i + RX_FRAME_SIZE]))
def run(self):
old_consume = 0
while not self.stopped.is_set():
pypruss.wait_for_event(TARGET_PRU_NO)
pypruss.clear_event(TARGET_PRU_NO, TARGET_PRU_INTERRUPT)
self.calls += 1
(produce, consume) = \
struct.unpack("LL", self.ddr_mem[DDR_START:DDR_START +
RX_RING_BUFFER_FRAMES_OFFSET])
while consume != produce:
length = struct.unpack("B", self.ddr_mem[self.frames_ptr])[0]
frame = \
struct.unpack("B"*length,
self.ddr_mem[self.frames_ptr+1:
self.frames_ptr+1+length])
#sys.stderr.write('rx ' + str(frame) + '\n')
consume = (consume + 1) % RX_RING_BUFFER_LEN
self.frames_ptr = self.frames_base + \
(consume * RX_FRAME_SIZE)
if old_consume != consume:
self.ddr_mem[DDR_START + RX_RING_BUFFER_CONSUME_OFFSET:
DDR_START + RX_RING_BUFFER_FRAMES_OFFSET] = \
struct.pack('L', consume)
old_consume = consume
class PRU_write_thread(threading.Thread):
def __init__(self, stopped, socket, ddr_mem):
super(PRU_write_thread, self).__init__()
self.ddr_mem = ddr_mem
self.struct_start = DDR_START + TX_RING_BUFFER_VADDR_OFFSET
self.frames_base = self.struct_start + TX_RING_BUFFER_FRAMES_OFFSET
self.frames_ptr = self.frames_base
self.socket = socket
self.stopped = stopped
def kill_me(self):
self.stopped.set()
def join(self, timeout=None):
super(PRU_write_thread, self).join(timeout)
def run(self):
while not self.stopped.is_set():
ready = select.select([self.socket], [], [], 0.5)[0]
if ready == []:
continue
frame = self.socket.recv(256)
(produce, consume) = \
struct.unpack('LL',
self.ddr_mem[self.struct_start:
self.struct_start +
TX_RING_BUFFER_FRAMES_OFFSET])
while (produce + 1) % TX_RING_BUFFER_LEN == consume:
sys.stderr.write("buffer full, waiting\n")
time.sleep(0.003)
(produce, consume) = \
struct.unpack('LL',
self.ddr_mem[self.struct_start:
self.struct_start +
TX_RING_BUFFER_FRAMES_OFFSET])
if len(frame) > MAX_PAYLOAD_SIZE:
frame = frame[:MAX_PAYLOAD_SIZE]
preamble_bits = get_special_preamble_bits(frame[0])
preamble_byte = preamble_bits.tobytes()[0]
payload_bits = get_special_payload_bits(frame)
payload_bytes = payload_bits.tobytes()
bit_len_offset = self.frames_ptr + TX_FRAME_BIT_LEN_OFFSET
self.ddr_mem[bit_len_offset:bit_len_offset + 2] = \
struct.pack('H', payload_bits.len)
self.ddr_mem[self.frames_ptr + TX_FRAME_PREAMBLE_OFFSET] = \
preamble_byte
frame_offset = self.frames_ptr + TX_FRAME_PAYLOAD_OFFSET
self.ddr_mem[frame_offset:frame_offset + len(payload_bytes)] = \
payload_bytes
produce = (produce + 1) % TX_RING_BUFFER_LEN
self.frames_ptr = \
self.frames_base + (produce * TX_FRAME_SIZE)
self.ddr_mem[self.struct_start:self.struct_start +
TX_RING_BUFFER_CONSUME_OFFSET] = \
struct.pack('L', produce)
#sys.stderr.write("tx preamble:%s payload:%s bit_length:%d\n" % (preamble_bits, payload_bits, payload_bits.len))
pypruss.modprobe()
sock = socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM)
try:
sock.bind(('localhost', UDP_PORTS[0]))
except OSError as e:
print(e)
sys.exit(-1)
f = open("/dev/mem", "r+b")
shared_mem = mmap.mmap(f.fileno(), SHARED_FILELEN, offset=SHARED_OFFSET)
if TARGET_PRU_NO == 1:
pypruss.init()
pypruss.open(TARGET_PRU_NO)
if TARGET_PRU_NO == 1:
pypruss.pruintc_init()
stopped = threading.Event()
stopped.clear()
pru_stop_thread = PRU_read_thread(stopped, sock, shared_mem)
pru_send_thread = PRU_write_thread(stopped, sock, shared_mem)
pru_stop_thread.start()
pru_send_thread.start()
pypruss.exec_program(TARGET_PRU_NO, TARGET_PRU_FW)
def signal_handler(signal, frame):
pru_stop_thread.kill_me()
pru_send_thread.kill_me()
pru_pump_thread.kill_me()
pru_stop_thread.join()
pru_send_thread.join()
pru_pump_thread.join()
signal.signal(signal.SIGINT, signal_handler)
pru_stop_thread.join()
pru_send_thread.join()
pypruss.exit()
| 36.848249 | 124 | 0.66188 | 1,286 | 9,470 | 4.586314 | 0.211509 | 0.038996 | 0.032553 | 0.043405 | 0.42845 | 0.396745 | 0.321126 | 0.313666 | 0.290777 | 0.25924 | 0 | 0.017019 | 0.255438 | 9,470 | 256 | 125 | 36.992188 | 0.819458 | 0.218902 | 0 | 0.20442 | 0 | 0 | 0.010881 | 0 | 0 | 0 | 0.00272 | 0 | 0 | 1 | 0.060773 | false | 0 | 0.060773 | 0 | 0.143646 | 0.016575 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6892d9ab4d5788548cb25180696420cfb976cd08 | 10,528 | py | Python | src/error_rechecker.py | khairulislam/phys | fc702520fcd3b23022b9253e7d94f878978b4500 | [
"MIT"
] | null | null | null | src/error_rechecker.py | khairulislam/phys | fc702520fcd3b23022b9253e7d94f878978b4500 | [
"MIT"
] | null | null | null | src/error_rechecker.py | khairulislam/phys | fc702520fcd3b23022b9253e7d94f878978b4500 | [
"MIT"
] | null | null | null | from unit_error_types import UnitErrorTypes
from error_checker import ErrorChecker
from tree_walker import TreeWalker
import cps_constraints as con
import cppcheckdata
import pickle
import os
from operator import itemgetter
class ErrorRechecker:
''' IMPLEMENTATION OF USER-ASSISTED ERROR RECHECKING
'''
def __init__(self):
self.cppcheck_pkl_filename = 'cppcheck_config.pkl'
self.errors_pkl_filename = 'error_list.pkl'
self.varlist_pkl_filename = 'var_units_to_check_list.pkl'
def store_state(self, a_cppcheck_configuration, errors, variable_units_to_check_as_list):
tokenlist = {}
for t in a_cppcheck_configuration.tokenlist:
tokenlist[t.Id] = (t.units, t.isKnown, t.is_unit_propagation_based_on_constants,
t.is_unit_propagation_based_on_unknown_variable,
t.is_unit_propagation_based_on_weak_inference, t.isRoot, t.isDimensionless)
pickle.dump(tokenlist, open(self.cppcheck_pkl_filename, 'wb'), pickle.HIGHEST_PROTOCOL)
for e in errors:
e.token = e.token.Id
if e.token_left:
e.token_left = e.token_left.Id
if e.token_right:
e.token_right = e.token_right.Id
pickle.dump(errors, open(self.errors_pkl_filename, 'wb'), pickle.HIGHEST_PROTOCOL)
varlist = {}
for (isKnown, rank, var, var_name, units, linenrs) in variable_units_to_check_as_list:
varlist[(var.Id, var_name)] = rank
pickle.dump(varlist, open(self.varlist_pkl_filename, 'wb'), pickle.HIGHEST_PROTOCOL)
def get_cppcheck_config_data_structure(self, dump_file):
data = cppcheckdata.parsedump(dump_file)
for c in data.configurations[:1]:
return c
def load_state(self, a_cppcheck_configuration):
tokenlist = {}
errors = []
if not (os.path.exists(self.cppcheck_pkl_filename) and \
os.path.exists(self.errors_pkl_filename) and \
os.path.exists(self.varlist_pkl_filename)):
return
tokenlist = pickle.load(open(self.cppcheck_pkl_filename, 'rb'))
for t in a_cppcheck_configuration.tokenlist:
(units, isKnown, is_unit_propagation_based_on_constants,
is_unit_propagation_based_on_unknown_variable,
is_unit_propagation_based_on_weak_inference, isRoot, isDimensionless) = tokenlist[t.Id]
t.units = units
t.isKnown = isKnown
t.is_unit_propagation_based_on_constants = is_unit_propagation_based_on_constants
t.is_unit_propagation_based_on_unknown_variable = is_unit_propagation_based_on_unknown_variable
t.is_unit_propagation_based_on_weak_inference = is_unit_propagation_based_on_weak_inference
t.isRoot = isRoot
t.hasVarOperand = False
t.isDimensionless = isDimensionless
errors = pickle.load(open(self.errors_pkl_filename, 'rb'))
for t in a_cppcheck_configuration.tokenlist:
for e in errors:
if e.token == t.Id:
e.token = t
if e.token_left and e.token_left == t.Id:
e.token_left = t
if e.token_right and e.token_right == t.Id:
e.token_right = t
varlist = pickle.load(open(self.varlist_pkl_filename, 'rb'))
return (errors, varlist)
def apply_and_propagate_units(self, tw, root_token):
break_point = 1000
i=0
# FIND THE MIN AND MAX LINE NUMBERS IN THIS AST : USED TO PROTECT LOOP FROM MULTI-LINE STATEMENTS
tw.generic_recurse_and_apply_function(root_token, tw.find_min_max_line_numbers)
tw.generic_recurse_and_apply_function(root_token, tw.apply_correction_units)
# CONTINUE TO ATTEMPT CHANGES UNTIL CHANGES CEASE
while tw.was_some_unit_changed:
if i>break_point:
s = "BREAKING WHILE LOOP AT %d" % break_point
raise ValueError(s)
return
i+=1
tw.was_some_unit_changed = False
# LOOK FOR EARLY ABANDONMENT OF THIS AST
if not tw.found_units_in_this_tree:
break
### PROPAGATE UNITS
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_across_dot_connectors)
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_across_double_colon)
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_across_square_brackets)
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_across_assignment)
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_math_abs_fabs_floor_ceil)
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_math_min_max)
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_math_fmod_fmodf_fmodl)
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_sqrt)
# tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_getXYZ)
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_ternary)
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_pow)
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_inverse_trig)
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_across_operators)
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_across_return)
# tw.generic_recurse_and_apply_function(root_token, tw.collect_function_param_units_and_decorate_function)
tw.generic_recurse_and_apply_function(root_token, tw.propagate_units_across_parenthesis)
# END -- WHILE LOOP
def recheck_unit_errors(self, correction_file, dump_file, source_file):
with open(correction_file) as f:
for var_result in (line.rstrip('\n') for line in f):
var_name, var_unit = var_result.split(',', 1)
var_name, var_unit = var_name.strip(), var_unit.strip()
var_unit = eval(var_unit)
con.phys_corrections[var_name] = var_unit
#print "phys_corrections: %s" % con.phys_corrections
a_cppcheck_configuration = self.get_cppcheck_config_data_structure(dump_file)
errors, varlist = self.load_state(a_cppcheck_configuration)
err_checker = ErrorChecker(dump_file, source_file)
show_high_confidence=True
show_low_confidence=False
for e in errors:
is_high_confidence = not e.is_warning
is_low_confidence = e.is_warning
if is_high_confidence and not show_high_confidence:
continue
if is_low_confidence and not show_low_confidence:
continue
if e.ERROR_TYPE == UnitErrorTypes.VARIABLE_MULTIPLE_UNITS:
tw = TreeWalker(None)
self.apply_and_propagate_units(tw, e.token)
# TRACK VARIABLE WITH MULTIPLE UNITS
if len(e.token.astOperand2.units) > 1:
e.units_when_multiple_happened = e.token.astOperand2.units
err_checker.all_errors.append(e)
if e.token_left.isKnown:
# TRACK VARIABLE WITH MULTIPLE UNITS
if (len(e.token.astOperand2.units) == 1) and (e.token_left.units != e.token.astOperand2.units):
units = []
units.extend(e.token_left.units)
units.extend(e.token.astOperand2.units)
e.units_when_multiple_happened = units
err_checker.all_errors.append(e)
elif e.ERROR_TYPE == UnitErrorTypes.FUNCTION_CALLED_WITH_DIFFERENT_UNIT_ARGUMENTS:
tw = TreeWalker(None)
self.apply_and_propagate_units(tw, e.token_left)
self.apply_and_propagate_units(tw, e.token_right)
# UPDATE UNITS AT BOTH CALL POINTS
e.units_at_first_assignment = e.token_left.units
e.units_when_multiple_happened = e.token_right.units
# CHECK UNITS OF FIRST CALL POINT AGAINST THE OTHER CALL POINT
if e.units_when_multiple_happened != e.units_at_first_assignment:
err_checker.all_errors.append(e)
elif e.ERROR_TYPE == UnitErrorTypes.ADDITION_OF_INCOMPATIBLE_UNITS:
tw = TreeWalker(None)
self.apply_and_propagate_units(tw, e.token)
err_checker.have_found_addition_error_on_this_line = False
tw.generic_recurse_and_apply_function(e.token, err_checker.error_check_addition_of_incompatible_units_recursive)
elif e.ERROR_TYPE == UnitErrorTypes.COMPARISON_INCOMPATIBLE_UNITS:
tw = TreeWalker(None)
self.apply_and_propagate_units(tw, e.token)
tw.generic_recurse_and_apply_function(e.token, err_checker.error_check_comparison_recursive)
else:
err_checker.all_errors.append(e)
print( "Error_Rechecker:" )
err_checker.pretty_print()
err_checker.print_unit_errors('errors_2.txt')
self.print_var_units_to_check(err_checker, varlist, 'variable_units_to_check_2.txt')
def print_var_units_to_check(self, err_checker, varlist, check_file):
# convert var_units_for_check dictionary to a list ordered by ranking
for (var, var_name) in err_checker.variable_units_to_check:
value = err_checker.variable_units_to_check[(var, var_name)]
isKnown = value[0]
rank = 1.0
if (var.Id, var_name) in varlist:
rank = varlist[(var.Id, var_name)]
err_checker.variable_units_to_check_as_list.append((isKnown, rank, var, var_name, value[1], value[2]))
err_checker.variable_units_to_check_as_list = sorted(err_checker.variable_units_to_check_as_list, key=itemgetter(0, 1))
with open(check_file, 'w') as f:
for (isKnown, rank, var, var_name, units, linenrs) in err_checker.variable_units_to_check_as_list:
f.write("%s, %s, %s, %s, %s\n" % (linenrs[0], rank, var.Id, var_name, units))
| 46.378855 | 128 | 0.662044 | 1,346 | 10,528 | 4.786033 | 0.168648 | 0.030736 | 0.049674 | 0.058988 | 0.555883 | 0.466625 | 0.414468 | 0.376902 | 0.346787 | 0.307048 | 0 | 0.003244 | 0.268047 | 10,528 | 226 | 129 | 46.584071 | 0.832728 | 0.070289 | 0 | 0.142857 | 0 | 0 | 0.018227 | 0.005734 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.049689 | 0 | 0.124224 | 0.031056 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6892ee1e58cea742639ca78935bd837f985447ef | 7,864 | py | Python | QCL_gui/QCL_GUI.py | alex123go/QCL_controllerViaArduino | f9d174ecc6af45be27d6634df9315bdcfa83865c | [
"MIT"
] | null | null | null | QCL_gui/QCL_GUI.py | alex123go/QCL_controllerViaArduino | f9d174ecc6af45be27d6634df9315bdcfa83865c | [
"MIT"
] | null | null | null | QCL_gui/QCL_GUI.py | alex123go/QCL_controllerViaArduino | f9d174ecc6af45be27d6634df9315bdcfa83865c | [
"MIT"
] | null | null | null | from PyQt5 import QtGui, Qt, QtCore, QtWidgets, uic
import sys
import time
import serial.tools.list_ports
import numpy as np
from comms import QCL_comms
class QCL_GUI(QtWidgets.QWidget):
def __init__(self):
super(QCL_GUI, self).__init__()
#
#
uic.loadUi("QCL_GUI.ui", self)
# # self.loadInternalParameters()
self.initUI()
self.setWindowIcon(QtGui.QIcon('qcl500-oem.ico'))
self.show()
self.qcl = QCL_comms()
def closeEvent(self, event):
self.closeSerial()
can_exit = 1
if can_exit:
event.accept() # let the window close
else:
event.ignore()
def initUI(self):
self.pushButton_serial_connect.pressed.connect(self.startSerial)
self.pushButton_serial_connect.setEnabled(False)
self.pushButton_serial_disconnect.pressed.connect(self.closeSerial)
self.pushButton_serial_disconnect.setEnabled(False)
self.pushButton_Comb1_power.pressed.connect(self.powerComb1)
self.pushButton_Comb1_enable.pressed.connect(self.enableComb1)
self.pushButton_Comb2_power.pressed.connect(self.powerComb2)
self.pushButton_Comb2_enable.pressed.connect(self.enableComb2)
self.Comb1_setPoint.setText('NA mA') # Not yet implemented
self.Comb1_current.setText('NA mA')
self.Comb2_setPoint.setText('NA mA')
self.Comb2_current.setText('NA mA')
self.disableUI_QCL()
self.updateSerialList()
def updateUI(self):
print('TODO') # should implement a way to read digitalOutput in the arduino
def updateSerialList(self):
self.comboPorts.clear()
self.Port_list = self.getSerialList()
if len(self.Port_list) == 0:
text = 'No serial port available'
self.comboPorts.addItem(text)
self.pushButton_serial_connect.setEnabled(False)
else:
for i in range(len(self.Port_list)):
self.comboPorts.addItem(self.Port_list[i])
self.pushButton_serial_connect.setEnabled(True)
def getSerialList(self):
comlist = serial.tools.list_ports.comports()
connected = []
for element in comlist:
connected.append(element.device)
return connected
def disableUI_QCL(self):
self.pushButton_Comb1_power.setEnabled(False)
self.pushButton_Comb1_enable.setEnabled(False)
self.pushButton_Comb2_power.setEnabled(False)
self.pushButton_Comb2_enable.setEnabled(False)
self.pushButton_serial_connect.setEnabled(True)
self.pushButton_serial_disconnect.setEnabled(False)
self.comboPorts.setEnabled(True)
self.pushButton_Comb1_power.setText('Turn power ON')
self.pushButton_Comb1_enable.setText('Enable output')
self.pushButton_Comb2_power.setText('Turn power ON')
self.pushButton_Comb2_enable.setText('Enable output')
self.pushButton_Comb1_power.setStyleSheet('')
self.pushButton_Comb1_enable.setStyleSheet('')
self.pushButton_Comb2_power.setStyleSheet('')
self.pushButton_Comb2_enable.setStyleSheet('')
def enableUI_QCL(self):
self.pushButton_Comb1_power.setEnabled(True)
self.pushButton_Comb1_enable.setEnabled(True)
self.pushButton_Comb2_power.setEnabled(True)
self.pushButton_Comb2_enable.setEnabled(True)
self.pushButton_serial_connect.setEnabled(False)
self.pushButton_serial_disconnect.setEnabled(True)
self.comboPorts.setEnabled(False)
# since we know that the output reset to 0 when we reconnect (maybe use updateUI in the future)
self.pushButton_Comb1_power.setText('Turn power ON')
self.pushButton_Comb1_enable.setText('Enable output')
self.pushButton_Comb2_power.setText('Turn power ON')
self.pushButton_Comb2_enable.setText('Enable output')
self.pushButton_Comb1_power.setStyleSheet('background-color: red')
self.pushButton_Comb1_enable.setStyleSheet('background-color: red')
self.pushButton_Comb2_power.setStyleSheet('background-color: red')
self.pushButton_Comb2_enable.setStyleSheet('background-color: red')
def startSerial(self):
print('Opening serial port')
index = self.comboPorts.currentIndex()
port = self.Port_list[index]
print(port)
self.qcl.connect(port = port)
# arduino reset GPIO at serial connection
self.comb_power = [0,0]
self.comb_enable = [0,0]
self.enableUI_QCL()
def closeSerial(self):
print('closing serial port')
self.qcl.disconnect()
self.disableUI_QCL()
self.updateSerialList()
def powerComb1(self):
comb = 1
actual_state = self.comb_power[comb-1]
new_state = int(not(actual_state))
self.qcl.powerComb(comb, new_state)
self.comb_power[comb-1] = new_state
#self.updateUI()
if new_state == 0:
#QCL is not off, turn button red and change text to 'Turn power ON'
self.pushButton_Comb1_power.setText('Turn power ON')
self.pushButton_Comb1_power.setStyleSheet('background-color: red')
else:
#QCL is not on, turn button green and change text to 'Turn power OFF'
self.pushButton_Comb1_power.setText('Turn power OFF')
self.pushButton_Comb1_power.setStyleSheet('background-color: green')
def enableComb1(self):
comb = 1
actual_state = self.comb_enable[comb-1]
new_state = int(not(actual_state))
self.qcl.enableComb(comb, int(not(actual_state)))
self.comb_enable[comb-1] = new_state
#self.updateUI()
if new_state == 0:
#QCL is not off, turn button red and change text to 'Enable output'
self.pushButton_Comb1_enable.setText('Enable output')
self.pushButton_Comb1_enable.setStyleSheet('background-color: red')
else:
#QCL is not on, turn button green and change text to 'Disable output'
self.pushButton_Comb1_enable.setText('Disable output')
self.pushButton_Comb1_enable.setStyleSheet('background-color: green')
def powerComb2(self):
comb = 2
actual_state = self.comb_power[comb-1]
new_state = int(not(actual_state))
self.qcl.powerComb(comb, new_state)
self.comb_power[comb-1] = new_state
#self.updateUI()
if new_state == 0:
#QCL is not off, turn button red and change text to 'Turn power ON'
self.pushButton_Comb2_power.setText('Turn power ON')
self.pushButton_Comb2_power.setStyleSheet('background-color: red')
else:
#QCL is not on, turn button green and change text to 'Turn power OFF'
self.pushButton_Comb2_power.setText('Turn power OFF')
self.pushButton_Comb2_power.setStyleSheet('background-color: green')
def enableComb2(self):
comb = 2
actual_state = self.comb_enable[comb-1]
new_state = int(not(actual_state))
self.qcl.enableComb(comb, int(not(actual_state)))
self.comb_enable[comb-1] = new_state
#self.updateUI()
if new_state == 0:
#QCL is not off, turn button red and change text to 'Enable output'
self.pushButton_Comb2_enable.setText('Enable output')
self.pushButton_Comb2_enable.setStyleSheet('background-color: red')
else:
#QCL is not on, turn button green and change text to 'Disable output'
self.pushButton_Comb2_enable.setText('Disable output')
self.pushButton_Comb2_enable.setStyleSheet('background-color: green')
if __name__ == '__main__':
print("main: about to create controller instance")
app = QtCore.QCoreApplication.instance()
if app is None:
print("QCoreApplication not running yet. creating.")
bEventLoopWasRunningAlready = False
app = QtWidgets.QApplication(sys.argv)
else:
bEventLoopWasRunningAlready = True
print("QCoreApplication already running.")
controller_obj = QCL_GUI()
try:
app.exec_()
except Exception as e:
controller_obj.closeSerial()
print("Exception during app.exec_():")
print(e)
# This code here is to handle weird interaction between IPython's event handler:
# Depending on the setting for the graphical backend in Spyder (Tools/Preferences/IPython Console/Graphics/Backend = (Automatic or Inline),
# the Qt event loop might be already running, so the proper way to teardown our application,
# for example to enable re-using the same console to run another instance afterwards,
# # is different.
# if controller_obj.bEventLoopWasRunningAlready == False:
# # controller_obj.stopCommunication()
# del controller_obj
| 32.495868 | 140 | 0.761445 | 1,064 | 7,864 | 5.450188 | 0.196429 | 0.130367 | 0.072081 | 0.045525 | 0.588205 | 0.506467 | 0.460252 | 0.395068 | 0.341093 | 0.325918 | 0 | 0.012206 | 0.1353 | 7,864 | 241 | 141 | 32.630705 | 0.840588 | 0.175483 | 0 | 0.373494 | 0 | 0 | 0.114126 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084337 | false | 0 | 0.036145 | 0 | 0.13253 | 0.054217 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68941224f41786823ed22982087718cae1a0fb4d | 1,814 | py | Python | setup.py | graykode/cella | b17859976becd1fca30a0ea897928a08157d22a2 | [
"Apache-2.0"
] | 71 | 2020-07-16T10:04:40.000Z | 2022-02-11T13:26:55.000Z | setup.py | graykode/cella | b17859976becd1fca30a0ea897928a08157d22a2 | [
"Apache-2.0"
] | 16 | 2020-08-10T19:24:16.000Z | 2022-02-10T02:22:56.000Z | setup.py | graykode/cella | b17859976becd1fca30a0ea897928a08157d22a2 | [
"Apache-2.0"
] | 5 | 2020-08-12T02:43:16.000Z | 2021-10-03T18:46:13.000Z | # Copyright 2020-present Tae Hwan Jung
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup
from sutils import find_name, get_setuptools, check_torch_tf_version
project_name = "matorage"
version = os.environ.get('MATORAGE_VERSION', '0.0.0')
if __name__ == "__main__":
check_torch_tf_version()
project_name = find_name()
with open('README.md', 'r') as t:
README = t.read()
setup(
# Project Name, Version
name=project_name,
version=version,
long_description=README,
long_description_content_type='text/markdown',
# Author
license="Apache License, Version 2.0",
author="TaeHwan-Jung",
author_email="nlkey2022@gmail.com",
description="matorage is Matrix or Tensor(multidimensional matrix) "
"Object Storage with high availability "
"distributed systems for Deep Learning framework.",
url="https://github.com/graykode/matorage",
# Platform, Requires
python_requires=">=3.5",
platforms=["any"],
project_urls={
"Documentation": "https://matorage.readthedocs.io/en/stable/",
"Source Code": "https://github.com/graykode/matorage",
},
**get_setuptools()
) | 34.884615 | 76 | 0.667585 | 226 | 1,814 | 5.230089 | 0.570796 | 0.050761 | 0.033841 | 0.035533 | 0.138748 | 0.050761 | 0 | 0 | 0 | 0 | 0 | 0.013649 | 0.232635 | 1,814 | 52 | 77 | 34.884615 | 0.835489 | 0.335722 | 0 | 0 | 0 | 0 | 0.339781 | 0.019344 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68943ade760b546a18543a05c85295bf4cc29615 | 12,447 | py | Python | bbot/bbot.py | Fynnyx/discord.py-bots | 9e6ad520bcf382ea195bde54b540a791f37ccac7 | [
"MIT"
] | 2 | 2021-06-03T09:48:31.000Z | 2021-09-12T09:28:12.000Z | bbot/bbot.py | Fynnyx/discord.py-bots | 9e6ad520bcf382ea195bde54b540a791f37ccac7 | [
"MIT"
] | 1 | 2021-06-03T10:37:02.000Z | 2021-06-03T10:37:02.000Z | bbot/bbot.py | Fynnyx/discord.py-bots | 9e6ad520bcf382ea195bde54b540a791f37ccac7 | [
"MIT"
] | null | null | null | '''
# -----------------------------------------------------------------------------------------------------------------------------------
Author: Fynn Westerath
Last Change: 08.06.2021
(c) Copyright. Not for commercial use. All rights reserved
GitHub
https://github.com/Fynnyx/discord.py-bots
# -----------------------------------------------------------------------------------------------------------------------------------
'''
# Imports
import discord
import asyncio
import json
# gets the Token from .env (more infos in README and .env.example)
f = open(".env")
TOKEN = f.read()
# variables to change
bbot_channel: int = 850646620655058944
bbot_prefix: str = '!bbot'
bbot_permission = [451776092785737728, 758301777178918922, 526692364782272532, 853233996565577739]
class Bbot(discord.Client):
async def on_ready(self):
self.profile_picture = client.user.avatar_url
await client.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name='bbond beim Pixeln zu'))
print('Bbot: logged in')
async def on_message(self, message):
if message.content.startswith(bbot_prefix):
member = message.author
channel = message.channel
if message.author != client.user:
if message.channel.id == bbot_channel:
if message.content == (bbot_prefix + ' info'):
info_embed = discord.Embed(title="Here you can get the most information about this bot!",
colour=discord.Colour(0x65158d))
info_embed.set_author(name="Electionbot Info",
icon_url=self.profile_picture)
info_embed.add_field(name="General ❕:",
value="In general this bot is a private project. I made the bot in my freetime.",
inline=True)
info_embed.add_field(name="Personalize ✏:",
value="You can personalize this bot by download the code from github (https://github.com/Fynnyx/discord.py-bots) and run it by yourself.",
inline=True)
info_embed.add_field(name="Help Command 📜:",
value="The bot prefix is `" + bbot_prefix + "`. You will use this in front off all other commands. More infos you'll get by using `" + bbot_prefix + " help`.",
inline=True)
info_embed.add_field(name='GitHub:',
value='Want to use more bots? Visit https://github.com/Fynnyx/discord.py-bots to get more open source Discord bots.',
inline=False)
info_embed.add_field(name="Everything done? ", value="Have fun ❤", inline=False)
await channel.send(embed=info_embed)
if message.content == bbot_prefix + ' help':
help_embed = discord.Embed(title='Community Texturepack 🎨', colour=discord.Colour(0x65158d))
help_embed.set_author(name="Texturepackbot",
icon_url=self.profile_picture)
help_embed.add_field(name='textures',
value='Mit `' + bbot_prefix + ' textures` kannst du alle texturen vom texturepack bekommen',
inline=True)
help_embed.add_field(name='downloads',
value='Mit `' + bbot_prefix + ' downloads` bekommst du den link zu der immer aktuellen version', inline=True)
help_embed.add_field(name='add',
value="Bbond kann mit `" + bbot_prefix + ''' add` `"itemname"` `'description'` `zugehöhriger Spieler` neue Items hinzufügen.''',
inline=True)
help_embed.add_field(name='delete',
value="Pack Developer können mit `" + bbot_prefix + ''' delete` `"itemname"` Items wieder löschen''',
inline=True)
help_embed.add_field(name='Fehler gefunden?',
value='schreibe Fynnyx, Bbond, Quacky oder notmappy an, sie können es ändern',
inline=False)
await channel.send(embed=help_embed)
if message.content == bbot_prefix + ' textures':
with open('textures.json', encoding='UTF-8') as f:
data = json.load(f)
textures = data['textures']
textures_embed = discord.Embed(title='Community Texturepack 🎨', colour=discord.Colour(0x65158d))
textures_embed.set_author(name="Texturepackbot", icon_url=self.profile_picture)
for texture in textures:
textures_embed.add_field(name=texture, value=data['textures'][texture]['description'], inline=True)
await channel.send(embed=textures_embed)
if message.content.startswith(bbot_prefix + ' add'):
if member.id in bbot_permission:
add_message = message.content
get_description = add_message.split("'")
get_name = add_message.split('"')
add_message = add_message.split(' ')
range = len(add_message) - 1
if range >= 5:
itemname = get_name[1]
description = get_description[1]
user = add_message[range]
description = description + ' \n Für `' + user + '`'
with open('textures.json', encoding='UTF-8') as f:
data = json.load(f)
data['textures'][str(itemname)] = {'name' : str(itemname), 'description' : str(description)}
with open('textures.json', 'w', encoding='UTF-8') as f:
f.write(json.dumps(data, indent=2))
added_item_embed = discord.Embed(title='New Item added', description='Bbond hat eine neue Textur zum Texturepack hinzugefügt \n **' + str(itemname) + '**', colour=discord.Colour(0x65158d))
added_item_embed.set_author(name="Texturepackbot",
icon_url=self.profile_picture)
await channel.send(embed=added_item_embed)
else:
add_error_embed = discord.Embed(title="Something went wrong",
description="`" + bbot_prefix + "` add `itemname` `description` `für wen`",
colour=discord.Colour(0x65158d))
add_error_embed.set_author(name="Texturepackbot",
icon_url=self.profile_picture)
await channel.send(embed=add_error_embed)
else:
no_permission_embed = discord.Embed(title="Permission Error",
description="Du hast keine Rechte zum hinzufügen von Items. Frage Bbond oder Quacky",
colour=discord.Colour(0x65158d))
no_permission_embed.set_author(name="Texturepackbot",
icon_url=self.profile_picture)
await channel.send(embed=no_permission_embed)
if message.content == bbot_prefix + ' downloads':
download_embed = discord.Embed(title='Community Texturepack 🎨', colour=discord.Colour(0x65158d))
download_embed.add_field(name='Demo Version', value='https://www.mediafire.com/file/6mwrqpi4idmyf2b/%25C2%25A76%25C2%25A7lKahlifar_%25C2%25A76%25C2%25A7lDemo_%25C2%25A7a%25C2%25A7lPack.zip/file', inline=False)
download_embed.add_field(name='Vollversion 1.16 vom 06.15.2021', value='https://www.mediafire.com/file/lig23e0siumrdhr/Kahlifar_Pack_1.1_for_1.16.zip/file', inline=False)
download_embed.add_field(name='Vollversion 1.17 vom 06.15.2021', value='https://www.mediafire.com/file/mgsufz7w74h3mt5/Kahlifar_Pack_1.1_for_1.17.zip/file', inline=False)
await channel.send(embed=download_embed)
if message.content.startswith(bbot_prefix + ' delete'):
if member.id in bbot_permission:
del_message = str(message.content)
del_message_split = del_message.split('"')
del_item = str(del_message_split[1])
with open('textures.json') as f:
data = json.load(f)
if del_item in data['textures']:
data['textures'].pop(del_item)
delete_embed = discord.Embed(title='Item deleted', description=del_item + ' wurde gelöscht',
colour=discord.Colour(0x65158d))
delete_embed.set_author(name='Texturepackbot')
await channel.send(embed=delete_embed)
else:
no_item_embed = discord.Embed(title="Search Error",
description="Das gewünschte Item wurde nicht gefunden",
colour=discord.Colour(0x65158d))
no_item_embed.set_author(name="Texturepackbot",
icon_url=self.profile_picture)
await channel.send(embed=no_item_embed)
with open('textures.json', 'w') as f:
data = json.dump(data, f, indent=2)
else:
no_permission_embed = discord.Embed(title="Permission Error",
description="Du hast keine Rechte zum hinzufügen von Items. Frage Bbond oder Quacky",
colour=discord.Colour(0x65158d))
no_permission_embed.set_author(name="Texturepackbot",
icon_url=self.profile_picture)
await channel.send(embed=no_permission_embed)
else:
wrong_channel_embed = discord.Embed(title='Community Texturepack 🎨', colour=discord.Colour(0x65158d))
wrong_channel_embed.set_author(name="Texturepackbot",
icon_url=self.profile_picture)
wrong_channel_embed.add_field(name='Wrong Channel', value='Um den DC aufgeräumt zu halten benutze bitte den dafür vorhergesehene Channel')
await message.delete()
message = await channel.send(embed=wrong_channel_embed)
await asyncio.sleep(3)
await message.delete()
client = Bbot()
client.run(TOKEN)
| 58.712264 | 233 | 0.472724 | 1,109 | 12,447 | 5.166817 | 0.243463 | 0.022339 | 0.034031 | 0.044503 | 0.45445 | 0.389354 | 0.320419 | 0.262478 | 0.248866 | 0.248866 | 0 | 0.035734 | 0.42444 | 12,447 | 211 | 234 | 58.990521 | 0.762563 | 0.041456 | 0 | 0.29932 | 0 | 0.040816 | 0.192517 | 0 | 0 | 0 | 0.007382 | 0 | 0 | 1 | 0 | false | 0 | 0.020408 | 0 | 0.027211 | 0.006803 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
6895ec82f062989a731d87ddd5ad1d90d75f2684 | 3,411 | py | Python | zwrite.py | mhorowitz/pykrb5 | 2132e9347bfb6fe37b9711908f07bbbf6cd9b75a | [
"BSD-2-Clause"
] | 5 | 2015-12-18T06:16:17.000Z | 2021-08-07T10:03:50.000Z | zwrite.py | mhorowitz/pykrb5 | 2132e9347bfb6fe37b9711908f07bbbf6cd9b75a | [
"BSD-2-Clause"
] | 1 | 2020-10-10T12:11:01.000Z | 2020-10-10T12:11:01.000Z | zwrite.py | mhorowitz/pykrb5 | 2132e9347bfb6fe37b9711908f07bbbf6cd9b75a | [
"BSD-2-Clause"
] | 1 | 2015-01-08T20:22:34.000Z | 2015-01-08T20:22:34.000Z | # Copyright (c) 2013, Marc Horowitz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This is a hokey test client. It is nowhere near a real zephyr
notice creator."""
import socket
import struct
import sys
import time
import krb5.client
def z_make_ascii_16(value):
return "0x%04X" % value
def z_make_ascii_32(value):
return "0x%08X" % value
def z_make_ascii(value):
return " ".join("0x" + "".join("%02X" % ord(c) for c in value[i:i+4])
for i in xrange(0, len(value), 4))
def z_make_zcode(value):
return "Z" + value.replace("\xff", "\xff\xf1").replace("\x00", "\xff\xf0")
DELIM = "\0"
REALM = "ATHENA.MIT.EDU"
KEY_USAGE = 1027
from_ip = socket.inet_aton(socket.gethostbyname(socket.gethostname()))
kclient = krb5.client.Client()
session = kclient.get_session("zephyr/zephyr@" + REALM)
version = "ZEPH0.2"
kind = 0 # unsafe
uid = struct.pack("!4sii", from_ip, time.time(), 0)
ztime = time.time()
port = 0
auth = 1 # yes
authent = session.make_ap_req_bytes()
class_ = "message"
class_inst = "personal"
opcode = ""
sender = str(session.client)
recipient = sys.argv[1]
default_format = ""
multiuid = uid
checksum = 0
multinotice = ""
sig = "py"
message = sys.argv[2]
if "@" not in recipient:
recipient += "@" + REALM
before_checksum = [
version,
None,
z_make_ascii_32(kind),
z_make_ascii(uid),
z_make_ascii_16(port),
z_make_ascii_32(auth),
z_make_ascii_32(len(authent)),
z_make_zcode(authent),
class_,
class_inst,
opcode,
sender,
recipient,
default_format
]
after_checksum = [
multinotice,
z_make_ascii(multiuid)
]
body = [
sig,
message
]
header_count = len(before_checksum) + 1 + len(after_checksum)
before_checksum[1] = z_make_ascii_32(header_count)
checksum_data = DELIM.join(before_checksum + after_checksum + body)
checksum = z_make_zcode(session.key.make_checksum(KEY_USAGE, checksum_data))
fields = before_checksum + [checksum] + after_checksum + body
notice = DELIM.join(fields)
addr = socket.getaddrinfo("localhost", "zephyr-hm", 0, 0, socket.IPPROTO_UDP)[0]
s = socket.socket(*addr[0:3])
s.sendto(notice, addr[4])
| 28.663866 | 80 | 0.722369 | 496 | 3,411 | 4.840726 | 0.429435 | 0.027072 | 0.041649 | 0.02499 | 0.091628 | 0.056643 | 0.056643 | 0.056643 | 0.056643 | 0.056643 | 0 | 0.020907 | 0.172677 | 3,411 | 118 | 81 | 28.90678 | 0.829908 | 0.402228 | 0 | 0 | 0 | 0 | 0.061286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054795 | false | 0 | 0.068493 | 0.054795 | 0.178082 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68976b61b0e35ec22032e7888e8758db4d01f982 | 4,507 | py | Python | idaes/apps/caprese/examples/cstr_rodrigo/simulation_main.py | Robbybp/idaes-pse | 8a41dbd05819f82806cf17a6e5f06aef79a775e3 | [
"RSA-MD"
] | null | null | null | idaes/apps/caprese/examples/cstr_rodrigo/simulation_main.py | Robbybp/idaes-pse | 8a41dbd05819f82806cf17a6e5f06aef79a775e3 | [
"RSA-MD"
] | 2 | 2021-08-18T19:42:02.000Z | 2021-10-22T04:44:31.000Z | idaes/apps/caprese/examples/cstr_rodrigo/simulation_main.py | Robbybp/idaes-pse | 8a41dbd05819f82806cf17a6e5f06aef79a775e3 | [
"RSA-MD"
] | 1 | 2021-03-17T20:31:17.000Z | 2021-03-17T20:31:17.000Z | ##############################################################################
# Institute for the Design of Advanced Energy Systems Process Systems
# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2019, by the
# software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia
# University Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and
# license information, respectively. Both files are also available online
# at the URL "https://github.com/IDAES/idaes-pse".
##############################################################################
"""
Example for Caprese's module for simulation of a plant.
"""
import random
from idaes.apps.caprese.dynamic_builder import DynamicSim
# from idaes.apps.caprese.util import apply_noise_with_bounds
from pyomo.environ import SolverFactory, Reference
from pyomo.dae.initialization import solve_consistent_initial_conditions
# import idaes.logger as idaeslog
from idaes.apps.caprese.examples.cstr_rodrigo.cstr_rodrigo_model import make_model
from idaes.apps.caprese.data_manager import PlantDataManager
from idaes.apps.caprese.plotlibrary import (
plot_plant_state_evolution,
plot_control_input)
__author__ = "Kuan-Han Lin"
# See if ipopt is available and set up solver
if SolverFactory('ipopt').available():
solver = SolverFactory('ipopt')
solver.options = {
'tol': 1e-6,
'bound_push': 1e-8,
'halt_on_ampl_error': 'yes',
'linear_solver': 'ma57',
}
else:
solver = None
def main():
sample_time = 2.
m_plant = make_model(horizon=sample_time, ntfe=4, ntcp=2, bounds = True)
time_plant = m_plant.t
# We must identify for the plant which variables are our
# inputs and measurements.
inputs = [
m_plant.Tjinb[0],
]
measurements = [
m_plant.Tall[0, "T"],
# m_plant.Tall[0, "Tj"],
m_plant.Ca[0],
]
# Construct the "plant simulator" object
simulator = DynamicSim(
plant_model=m_plant,
plant_time_set=m_plant.t,
inputs_at_t0=inputs,
measurements_at_t0=measurements,
sample_time=sample_time,
)
plant = simulator.plant
p_t0 = simulator.plant.time.first()
p_ts = simulator.plant.sample_points[1]
#--------------------------------------------------------------------------
# Declare variables of interest for plotting.
# It's ok not declaring anything. The data manager will still save some
# important data.
states_of_interest = [Reference(simulator.plant.mod.Ca[:]),
Reference(simulator.plant.mod.Tall[:, "T"])]
# Set up data manager to save plant data
data_manager = PlantDataManager(plant, states_of_interest)
#--------------------------------------------------------------------------
solve_consistent_initial_conditions(plant, plant.time, solver)
input_list = {ind: 250.+ind*5 if ind<=5 else 260.-ind*5 for ind in range(0, 11)}
data_manager.save_initial_plant_data()
plant.inject_inputs([input_list[0]])
# This "initialization" really simulates the plant with the new inputs.
simulator.plant.initialize_by_solving_elements(solver)
simulator.plant.vectors.input[...].fix() #Fix the input to solve the plant
solver.solve(simulator.plant, tee = True)
data_manager.save_plant_data(iteration = 0)
for i in range(1,11):
print('\nENTERING SIMULATOR LOOP ITERATION %s\n' % i)
simulator.plant.advance_one_sample()
simulator.plant.initialize_to_initial_conditions()
simulator.plant.inject_inputs([input_list[i]])
simulator.plant.initialize_by_solving_elements(solver)
simulator.plant.vectors.input[...].fix() #Fix the input to solve the plant
solver.solve(simulator.plant, tee = True)
data_manager.save_plant_data(iteration = i)
plot_plant_state_evolution(states_of_interest, data_manager.plant_df)
inputs_to_plot = [Reference(simulator.plant.mod.Tjinb[:])]
plot_control_input(inputs_to_plot, data_manager.plant_df)
return simulator, data_manager
if __name__ == '__main__':
simulator, data_manager = main()
| 38.194915 | 84 | 0.64544 | 546 | 4,507 | 5.128205 | 0.391941 | 0.075 | 0.023214 | 0.035714 | 0.137857 | 0.119286 | 0.119286 | 0.119286 | 0.119286 | 0.119286 | 0 | 0.011689 | 0.202796 | 4,507 | 117 | 85 | 38.521368 | 0.767604 | 0.309962 | 0 | 0.089552 | 0 | 0 | 0.042109 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0 | 0.104478 | 0 | 0.134328 | 0.014925 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
689bd1af280860dc42f871c63ac19bf8b597a24c | 9,364 | py | Python | postprocessing.py | alejomonbar/Neepy | edfff3445e94d12d15d4e98b25e8b47780ef0ebc | [
"MIT"
] | null | null | null | postprocessing.py | alejomonbar/Neepy | edfff3445e94d12d15d4e98b25e8b47780ef0ebc | [
"MIT"
] | null | null | null | postprocessing.py | alejomonbar/Neepy | edfff3445e94d12d15d4e98b25e8b47780ef0ebc | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 5 12:03:32 2020
Posprocessing functions
@author: jmon
"""
import numpy as np
from neepy import Neepy
import scipy.linalg as matrix
from functions_neepy import partial_trace, partial_trace_mul
from scipy.constants import k
sx = np.array([[0,1],[1,0]], dtype = complex)
sy = np.array([[0,-1j],[1j,0]], dtype = complex)
sz = np.array([[1,0],[0,-1]], dtype = complex)
sn = [sx, sy, sz]
def xyz(p_v):
"""
Return the cartesian coordinates x,y,z of a vector decribed by a density state
Arguments:
p -- square matrix of n x 2 x 2
Return:
xyz_v -- 3 x n
"""
n = len(p_v)
xyz_v = np.zeros((3,n))
for i,p in enumerate(p_v):
xyz_v[:,i] = [2*p[0,1].real,2*p[1,0].imag,p[0,0].real - p[1,1].real]
return xyz_v
def xyz_mul(p_v):
n = p_v.shape[0]
qb = int(np.log2(p_v.shape[1]))
xyz_v = np.zeros((3,n,qb))
for i in range(n):
for ii in range(qb):
pt = p_v[i,:,:]
p = partial_trace(pt,[ii])
xyz_v[:,i,ii] = [2*p[0,1].real,2*p[1,0].imag,p[0,0].real - p[1,1].real]
return xyz_v
def energy(pv,H):
n = pv.shape[0]
e = []
for i in range(n):
pt = pv[i,:,:]
e.append((pt.dot(H[i,:,:])).trace())
return np.array(e,dtype = complex)
def entropy(p_v):
"""
Return the von Neumann entropy for the density state p_v
Arguments:
p_v (array): array with n x nnx nn
Return:
s (array): array n x 1 with the values of the entropy per each density state
in p_v
"""
n = len(p_v)
s = np.zeros((n,1))
for i, p in enumerate(p_v):
s[i] = - np.real(p.dot(matrix.logm(p,disp =False)[0])).trace()
return s
def entropy_production(p_v, dpdt_v):
"""
Return the von Neumann entropy for the density state p_v
Arguments:
p_v (array): array with n x nnx nn
Return:
s (array): array n x 1 with the values of the entropy per each density state
in p_v
"""
n = len(p_v)
dS = np.zeros((n,1))
for i, p in enumerate(p_v):
dS[i] = np.real(-dpdt_v[i,:,:].dot(matrix.logm(p,disp =False)[0]) - (dpdt_v[i,:,:])).trace()
return dS
def dQ(dpdt_v, H):
"""
Return the rate of heat transfer
Arguments:
dpdt_v (array): array with n x nnx nn density state derivative
Return:
s (array): array n x 1 with the values of the entropy per each density state
in p_v
"""
n = len(dpdt_v)
Q = np.zeros((n,1))
for i, dp in enumerate(dpdt_v):
Q[i] = (H[i,:,:] @ dp).trace()
return Q
def observable(p_v,O):
"""
Parameters
----------
p_v : numpy array
density state evolution.
O : numpy array matrix
Operator from which we want to extract the observable.
Returns
-------
np.array
observable evolution through time.
"""
val = []
for p in p_v:
val.append((p @ O).trace())
return np.array(val)
def concurrence(p_v):
"""
Return the concurrence based on the paper of Shulman 2012
"Demonstration of entanglement of electrostatically coupled singlet-triplet
qubits"
Arguments:
p_v (array n x nn x nn): the evolution in time the density operator based
in the evolution equation used
Return:
con(array n x 1): array with the values of concurrence for the timeline
of the density state.
"""
sy = np.array([[0,-1j],[1j,0]])
n = len(p_v)
con = np.zeros((n,1))
for i,p in enumerate(p_v):
pb = np.dot(np.dot(np.kron(sy,sy),np.conjugate(p)),np.kron(sy,sy))
psqrt = matrix.sqrtm(p)
R = matrix.sqrtm(np.dot(np.dot(psqrt,pb),psqrt))
eig = sorted(np.linalg.eigh(R)[0])
con[i] = eig[3] - eig[2] - eig[1] - eig[0]
return con
def concurrence2(p_v):
"""
Return the concurrence based on the paper of Shulman 2012
"Demonstration of entanglement of electrostatically coupled singlet-triplet
qubits"
Arguments:
p_v (array n x nn x nn): the evolution in time the density operator based
in the evolution equation used
Return:
con(array n x 1): array with the values of concurrence for the timeline
of the density state.
"""
sy = np.array([[0,-1j],[1j,0]])
n = len(p_v)
con = np.zeros((n,1))
for i,p in enumerate(p_v):
eig = sorted(np.linalg.eigh(p)[0])
con[i] = eig[3] - eig[2] - eig[1] - eig[0]
return con
def fidelity(p_ideal,p_real):
"""
Parameters
----------
p_ideal : square matrix or array of square matrices
The ideal density state
p_real : square matrix or array of square matrices
The experimental or simulated density state.
Returns
-------
F : value or array
Fidelity of the output signal.
"""
if len(p_real.shape) == 3:
F = []
for i,p in enumerate(p_real):
if len(p_ideal.shape) == 3:
sqrt_p_ideal = matrix.sqrtm(p_ideal[i,:,:])
else:
sqrt_p_ideal = matrix.sqrtm(p_ideal)
F.append(np.trace(matrix.sqrtm(sqrt_p_ideal.dot(p).dot(sqrt_p_ideal)))**2)
F = np.array(F)
else:
sqrt_p_ideal = matrix.sqrtm(p_ideal)
F = np.trace(matrix.sqrtm(sqrt_p_ideal.dot(p_real).dot(sqrt_p_ideal)))**2
return F
def distanceBS(gamma1, gamma2):
return np.arccos(0.5*(gamma1.T.conjugate() @ gamma2 + gamma2.T.conjugate() @ gamma1))
def mutualInf(p):
pa = partial_trace_mul(p, [2,2], axis = 0)
pb = partial_trace_mul(p, [2,2], axis = 1)
return (pa @ matrix.logm(pa)).trace() + (pb @ matrix.logm(pb)).trace() + (p @ matrix.logm(p)).trace()
def CHSH(p):
"""Clauser-Horne-Shimony-Holt"""
T = np.zeros((3,3), dtype = complex)
for i in range(3):
for j in range(3):
T[i,j] = (p @ np.kron(sn[i], sn[j])).trace()
eig = sorted(matrix.eig(T)[0])
t11 = eig[-1]
t22 = eig[-2]
return 2 * np.sqrt(t11**2 + t22**2)
def eigenvalues(p_v):
n,l1,l2 = np.shape(p_v)
eigen = np.zeros((n,l1))
for i,p in enumerate(p_v):
eigen[i,:] = matrix.eigh(p)[0]
return eigen
def eigen_evol(p_v):
n,l1,l2 = np.shape(p_v)
evol = np.zeros((n,l1),dtype = complex)
for i,p in enumerate(p_v):
for nn in range(l1):
evol[i,nn] = p[nn,nn]
return evol
def trace_mul(p_v, partial):
"""
Parameters
----------
p_v : array
matrix with dimensions of the number of subsystems in the case of a
qubit coupled to a harmonic oscillator with 5 energy levelsit has shape
2 X 5 = (10,10).
partial : List
Information of the dimensions of the subsystems and the axis over which
the partial trace is taken.
Returns
-------
p_sub : array
Matrix with dimension of the subsystem times n(the number of evolution
steps).
"""
dim = partial[0]
axis = partial[1]
n, l1, l2 = np.shape(p_v)
p_sub = np.zeros((n,l1//dim[axis],l1//dim[axis]),dtype = complex)
for i in range(n):
p_sub[i,:,:] = partial_trace_mul(p_v[i,:,:],dim,axis)
return p_sub
def tauDf(p_v, x):
"""
Supposition that the tauD = x[0] Tr(p(t) @ sz) + x[1]
Parameters
----------
p_v : array
density state.
x : array
Based on the two-qubit paper linear relation of the dissipative constant.
Returns
-------
tauD : array
Dissipative time of the SEAQT equation of motion with the supossition that
it depends on the energy variation
"""
s3 = np.array([[1,0],[0,-1]])
dims = int(np.log2(len(p_v[0])))
tauD = {_:[] for _ in range(dims)}
for p in p_v:
for q in range(dims):
tauD[q].append(x[q] * (np.trace(partial_trace(p, [q]) @ s3) + 1))
return tauD
def inform(p_v,dpdt_v,properties,p_ideal=None, partial=None, H=None, x=None):
data = {}
for i in properties:
if i == 's':
data['s'] = entropy(p_v)
elif i == 'ds':
data['ds'] = entropy_production(p_v,dpdt_v)
elif i == 'xyz':
data['xyz'] = xyz(p_v)
elif i == 'xyz_mul':
data['xyz_mul'] = xyz_mul(p_v)
elif i == 'con':
data['con'] = concurrence(p_v)
elif i == 'con2':
data['con2'] = concurrence2(p_v)
elif i == 'eigen':
data['eigen'] = eigenvalues(p_v)
elif i == 'F':
data['F'] = fidelity(p_ideal,p_v)
elif i == 'eigen_evol':
data["eigen_evol"] = eigen_evol(p_v)
elif i == "trace_mul":
data["trace_mul"] = trace_mul(p_v, partial)
elif i == 'energy':
data['energy'] = energy(p_v, H)
elif i == "dQ":
data["dQ"] = dQ(dpdt_v, H)
elif i == "temperature":
data["temperature"] = dQ(dpdt_v, H) / (k*entropy_production(p_v, dpdt_v))
elif i == "tauD":
data["tauD"] = tauDf(p_v, x)
else:
raise Warning("This propertie is not included!")
return data
| 28.54878 | 105 | 0.551367 | 1,470 | 9,364 | 3.418367 | 0.161905 | 0.023483 | 0.01194 | 0.011144 | 0.418706 | 0.385075 | 0.355821 | 0.315622 | 0.284577 | 0.252736 | 0 | 0.025173 | 0.30425 | 9,364 | 328 | 106 | 28.54878 | 0.746124 | 0.328599 | 0 | 0.210843 | 0 | 0 | 0.028933 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108434 | false | 0 | 0.03012 | 0.006024 | 0.246988 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
689c3de35c53d75890b9dbc0ddffc79384f78637 | 1,839 | py | Python | clam/utils.py | URI-ABD/clam-astro | d7daef444c3e41ed1ffe952301b96c0f07605864 | [
"MIT"
] | 8 | 2020-07-20T20:57:06.000Z | 2022-03-15T14:00:26.000Z | clam/utils.py | URI-ABD/clam-astro | d7daef444c3e41ed1ffe952301b96c0f07605864 | [
"MIT"
] | 25 | 2020-01-30T00:47:34.000Z | 2022-01-25T06:23:50.000Z | clam/utils.py | URI-ABD/clam-astro | d7daef444c3e41ed1ffe952301b96c0f07605864 | [
"MIT"
] | 3 | 2020-07-20T20:49:29.000Z | 2022-01-24T08:04:01.000Z | """ Some common functions and constants for all of CLAM.
"""
SUBSAMPLE_LIMIT = 100
BATCH_SIZE = 10_000
EPSILON = 1e-8
def catch_normalization_mode(mode: str) -> None:
from typing import List
""" Make sure that the normalization mode is allowed. """
modes: List[str] = ['linear', 'gaussian', 'sigmoid']
if mode not in modes:
raise ValueError(f'Normalization method {mode} is undefined. Must by one of {modes}.')
else:
return
def normalize(values, mode: str):
""" Normalizes each column in values into a [0, 1] range.
:param values: A 1-d or 2-d array of values to normalize.
:param mode: Normalization mode to use. Must be one of 'linear', 'gaussian', or 'sigmoid'.
:return: array of normalized values.
"""
import numpy as np
squeeze = False
if len(values.shape) == 1:
squeeze = True
values = np.expand_dims(values, axis=1)
if mode == 'linear':
min_v, max_v = np.min(values, axis=0), np.max(values, axis=0)
for i in range(values.shape[1]):
if min_v[i] == max_v[i]:
max_v[i] += 1
values[:, i] = min_v[i] + 0.5
values = (values - min_v) / (max_v - min_v)
else:
mu = np.mean(values, axis=0)
sigma = np.std(values, axis=0)
for i in range(values.shape[1]):
if sigma[i] < EPSILON:
values[:, i] = 0.5
else:
if mode == 'gaussian':
from scipy.special import erf
values[:, i] = (1 + erf((values[:, i] - mu[i]) / (sigma[i] * np.sqrt(2)))) / 2
else:
values[:, i] = 1 / (1 + np.exp(-(values[:, i] - mu[i]) / sigma[i]))
values = values.clip(EPSILON, 1)
if squeeze:
values = np.squeeze(values)
return values
| 31.169492 | 98 | 0.547036 | 258 | 1,839 | 3.841085 | 0.375969 | 0.042381 | 0.0444 | 0.016145 | 0.117053 | 0.104945 | 0.072654 | 0.072654 | 0.072654 | 0.072654 | 0 | 0.026232 | 0.315933 | 1,839 | 58 | 99 | 31.706897 | 0.761526 | 0.159326 | 0 | 0.153846 | 0 | 0 | 0.068587 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051282 | false | 0 | 0.076923 | 0 | 0.179487 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
689e4390dee148d8449d6ea72e92680b2ea359e6 | 2,701 | py | Python | tests/tagulous_tests_migration/django_migrations_expected/0003_tree.py | marxide/django-tagulous | 80c057c5dd2dce85f4bb531b25d3b4982bd03e8f | [
"Apache-2.0"
] | null | null | null | tests/tagulous_tests_migration/django_migrations_expected/0003_tree.py | marxide/django-tagulous | 80c057c5dd2dce85f4bb531b25d3b4982bd03e8f | [
"Apache-2.0"
] | null | null | null | tests/tagulous_tests_migration/django_migrations_expected/0003_tree.py | marxide/django-tagulous | 80c057c5dd2dce85f4bb531b25d3b4982bd03e8f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.utils import six
import tagulous.models.fields
import tagulous.models.migrations
class Migration(migrations.Migration):
dependencies = [("tagulous_tests_migration", "0002_tagged")]
operations = (
[
migrations.AddField(
model_name="tagulous_migrationtestmodel_tags",
name="parent",
field=models.ForeignKey(
to="tagulous_tests_migration.Tagulous_MigrationTestModel_tags",
related_name="children",
blank=True,
null=True,
on_delete=models.CASCADE,
),
preserve_default=True,
),
migrations.AddField(
model_name="tagulous_migrationtestmodel_tags",
name="label",
field=models.CharField(
default="-",
max_length=191,
help_text=b"The name of the tag, without ancestors",
),
preserve_default=True,
),
migrations.AddField(
model_name="tagulous_migrationtestmodel_tags",
name="level",
field=models.IntegerField(
default=1, help_text=b"The level of the tag in the tree"
),
preserve_default=True,
),
]
+ tagulous.models.migrations.add_unique_field(
model_name="tagulous_migrationtestmodel_tags",
name="path",
field=models.TextField(),
preserve_default=False,
set_fn=lambda obj: setattr(obj, "path", six.text_type(obj.pk)),
)
+ [
migrations.AlterField(
model_name="migrationtestmodel",
name="tags",
field=tagulous.models.fields.TagField(
to="tagulous_tests_migration.Tagulous_MigrationTestModel_tags",
help_text=b"Enter a comma-separated tag string",
_set_tag_meta=True,
tree=True,
),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name="tagulous_migrationtestmodel_tags",
unique_together=set([("slug", "parent")]),
),
tagulous.models.migrations.ChangeModelBases(
name="tagulous_migrationtestmodel_tags",
bases=(tagulous.models.models.BaseTagTreeModel, models.Model),
),
]
)
| 35.077922 | 83 | 0.530174 | 222 | 2,701 | 6.216216 | 0.387387 | 0.150725 | 0.173913 | 0.147826 | 0.269565 | 0.269565 | 0.238406 | 0.160145 | 0.115942 | 0.115942 | 0 | 0.005392 | 0.382081 | 2,701 | 76 | 84 | 35.539474 | 0.82145 | 0.007775 | 0 | 0.362319 | 0 | 0 | 0.190441 | 0.123226 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.072464 | 0 | 0.115942 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
689eff138ffdb981876fc97b88c7712ec3ac4525 | 1,042 | py | Python | Lab8/Task2.py | triod315/SysProgLabworks | c9f98e8d3d507b738334f459bb76924fb280196d | [
"MIT"
] | 1 | 2019-06-19T16:07:04.000Z | 2019-06-19T16:07:04.000Z | Lab8/Task2.py | triod315/SysProgLabworks | c9f98e8d3d507b738334f459bb76924fb280196d | [
"MIT"
] | null | null | null | Lab8/Task2.py | triod315/SysProgLabworks | c9f98e8d3d507b738334f459bb76924fb280196d | [
"MIT"
] | 1 | 2019-06-10T13:36:22.000Z | 2019-06-10T13:36:22.000Z | from zeep import Client
from sys import version_info
if version_info.major == 2:
from tkinter import Tk, Label, Button, Entry, Frame
elif version_info.major == 3:
from tkinter import Tk, Label, Button, Entry, Frame
def is_username_free():
result = client.service.IsLoginFree(username_entry.get())
is_username_free_result_label.config(text = 'Result: ' + str(result))
client = Client('http://mail.univ.net.ua/plutoniy/Service1.svc?wsdl', port_name='HTTPS-Anon')
client.transport.session.verify = False
root = Tk()
root.title('SOAP Request (Task2)')
root.geometry('430x150')
root.resizable(0, 0)
frame = Frame(root)
frame.place(in_ = root, anchor = "c", relx = .5, rely = .5)
frame.pack()
is_username_free_result_label = Label(frame, text = 'Result 2')
is_username_free_button = Button(frame, text = 'Check username', command = is_username_free)
username_entry = Entry(frame)
is_username_free_button.grid(row = 3)
is_username_free_result_label.grid(row = 4)
username_entry.grid(row = 2, pady = (10, 0))
root.mainloop() | 29.771429 | 93 | 0.738004 | 157 | 1,042 | 4.707006 | 0.452229 | 0.094723 | 0.132612 | 0.108254 | 0.209743 | 0.108254 | 0.108254 | 0.108254 | 0 | 0 | 0 | 0.023128 | 0.128599 | 1,042 | 35 | 94 | 29.771429 | 0.790749 | 0 | 0 | 0.08 | 0 | 0 | 0.113135 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.16 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68a18fbacb46a8631e61b8e030a7d82bcaf2293c | 2,460 | py | Python | concertista/server.py | andrsd/spotify-classical-qt | 86c05240a01a067a3368ca47f08b7de97a96b4c6 | [
"MIT"
] | 1 | 2021-08-13T17:06:07.000Z | 2021-08-13T17:06:07.000Z | concertista/server.py | andrsd/concertista | 1ed4f1a52a3d5472866d9ff0644f60c1cc8fef9b | [
"MIT"
] | 11 | 2021-02-09T16:38:04.000Z | 2022-03-21T22:25:54.000Z | concertista/server.py | andrsd/spotify-classical-qt | 86c05240a01a067a3368ca47f08b7de97a96b4c6 | [
"MIT"
] | null | null | null | import os
import webbrowser
from flask import Flask, request, redirect
from waitress import serve
from dotenv import load_dotenv
import spotipy
import spotipy.util
from PyQt5 import QtCore
from pathlib import Path
load_dotenv()
SPOTIFY_CLIENT_ID = os.getenv('SPOTIFY_CLIENT_ID')
SPOTIFY_CLIENT_SECRET = os.getenv('SPOTIFY_CLIENT_SECRET')
SPOTIFY_REDIRECT_URI = 'http://localhost:9182'
# port where we run our http server so we can talk to spotify
port = int(os.environ.get("CONCERTISTA_PORT", 9182))
app = Flask(__name__)
caches_folder = os.path.join(str(Path.home()), '.cache', 'concertista')
if not os.path.exists(caches_folder):
os.makedirs(caches_folder)
def session_cache_path():
return os.path.join(caches_folder, 'spotify')
@app.route('/')
def index():
scope = ' '.join([
'user-read-playback-state',
'user-modify-playback-state',
'user-read-currently-playing'
])
auth_manager = spotipy.oauth2.SpotifyOAuth(
scope=scope,
client_id=SPOTIFY_CLIENT_ID,
client_secret=SPOTIFY_CLIENT_SECRET,
redirect_uri=SPOTIFY_REDIRECT_URI,
cache_path=session_cache_path(),
show_dialog=True)
if request.args.get("code"):
# Being redirected from Spotify auth page
auth_manager.get_access_token(request.args.get("code"))
return redirect('/')
if not auth_manager.get_cached_token():
# Send user to spotify authorization page
auth_url = auth_manager.get_authorize_url()
webbrowser.open_new(auth_url)
return f'Redirected to '\
f'<a href="{auth_url}">Spotify authorization page</a>.'
# Signed in, display info
spotify = spotipy.Spotify(auth_manager=auth_manager)
signaler.connectToSpotify.emit(spotify)
return f'<center>'\
f'<h1>Concertista</h1>' \
f'{spotify.me()["display_name"]}, '\
f'access to your account was granted. <br/>' \
f'You can close this window, now.' \
f'</center>'
class ServerThread(QtCore.QThread):
"""
Server thread for spotify authorization
"""
def run(self):
"""
Thread body
"""
serve(app, host="0.0.0.0", port=port)
class Signaler(QtCore.QObject):
"""
Signaler class to communicate with Qt
"""
connectToSpotify = QtCore.pyqtSignal(object)
def __init__(self):
super().__init__()
pass
signaler = Signaler()
| 25.625 | 71 | 0.661382 | 309 | 2,460 | 5.067961 | 0.411003 | 0.049808 | 0.028736 | 0.02682 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008368 | 0.222764 | 2,460 | 95 | 72 | 25.894737 | 0.810669 | 0.103659 | 0 | 0 | 0 | 0 | 0.186685 | 0.071695 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067797 | false | 0.016949 | 0.152542 | 0.016949 | 0.338983 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68a3c3aaa202ff398888c87eb3519636a2330192 | 4,379 | py | Python | Validation/RecoTrack/python/GenParticleSelectionsForEfficiency_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Validation/RecoTrack/python/GenParticleSelectionsForEfficiency_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Validation/RecoTrack/python/GenParticleSelectionsForEfficiency_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
GenParticleSelectionForEfficiency = cms.PSet(
lipGP = cms.double(30.0),
chargedOnlyGP = cms.bool(True),
pdgIdGP = cms.vint32(),
minRapidityGP = cms.double(-2.5),
ptMinGP = cms.double(0.005),
maxRapidityGP = cms.double(2.5),
tipGP = cms.double(60),
statusGP = cms.int32(1)
)
from Configuration.Eras.Modifier_phase1Pixel_cff import phase1Pixel
from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker
phase1Pixel.toModify(GenParticleSelectionForEfficiency,minRapidityGP = -3.0, maxRapidityGP = 3.0)
phase2_tracker.toModify(GenParticleSelectionForEfficiency,minRapidityGP = -4.5, maxRapidityGP = 4.5)
generalGpSelectorBlock = cms.PSet(
status = cms.int32(1),
lip = cms.double(30.0),
chargedOnly = cms.bool(True),
pdgId = cms.vint32(),
minRapidity = cms.double(-2.5),
ptMin = cms.double(0.9),
maxRapidity = cms.double(2.5),
tip = cms.double(3.5),
invertRapidityCut = cms.bool(False),
maxPhi = cms.double(3.2),
minPhi = cms.double(-3.2)
)
GpSelectorForEfficiencyVsEtaBlock = cms.PSet(
status = cms.int32(1),
lip = cms.double(30.0),
chargedOnly = cms.bool(True),
pdgId = cms.vint32(),
minRapidity = cms.double(-2.5),
ptMin = cms.double(0.9),
maxRapidity = cms.double(2.5),
tip = cms.double(3.5),
invertRapidityCut = cms.bool(False),
maxPhi = cms.double(3.2),
minPhi = cms.double(-3.2)
)
GpSelectorForEfficiencyVsPhiBlock = cms.PSet(
status = cms.int32(1),
lip = cms.double(30.0),
chargedOnly = cms.bool(True),
pdgId = cms.vint32(),
minRapidity = cms.double(-2.5),
ptMin = cms.double(0.9),
maxRapidity = cms.double(2.5),
tip = cms.double(3.5),
invertRapidityCut = cms.bool(False),
maxPhi = cms.double(3.2),
minPhi = cms.double(-3.2)
)
GpSelectorForEfficiencyVsPtBlock = cms.PSet(
status = cms.int32(1),
chargedOnly = cms.bool(True),
pdgId = cms.vint32(),
minRapidity = cms.double(-2.5),
maxRapidity = cms.double(2.5),
ptMin = cms.double(0.050),
tip = cms.double(3.5),
lip = cms.double(30.0),
invertRapidityCut = cms.bool(False),
maxPhi = cms.double(3.2),
minPhi = cms.double(-3.2)
)
GpSelectorForEfficiencyVsVTXRBlock = cms.PSet(
status = cms.int32(1),
chargedOnly = cms.bool(True),
pdgId = cms.vint32(),
minRapidity = cms.double(-2.5),
ptMin = cms.double(0.9),
maxRapidity = cms.double(2.5),
lip = cms.double(30.0),
tip = cms.double(30.0),
invertRapidityCut = cms.bool(False),
maxPhi = cms.double(3.2),
minPhi = cms.double(-3.2)
)
GpSelectorForEfficiencyVsVTXZBlock = cms.PSet(
status = cms.int32(1),
chargedOnly = cms.bool(True),
pdgId = cms.vint32(),
minRapidity = cms.double(-2.5),
ptMin = cms.double(0.9),
maxRapidity = cms.double(2.5),
lip = cms.double(35.0),
tip = cms.double(3.5),
invertRapidityCut = cms.bool(False),
maxPhi = cms.double(3.2),
minPhi = cms.double(-3.2)
)
def _modifyForPhase1(pset):
pset.minRapidity = -3
pset.maxRapidity = 3
pset.tip = 2.5 # beampipe is around 2.0, BPIX1 is at 2.9
from Configuration.Eras.Modifier_phase1Pixel_cff import phase1Pixel
phase1Pixel.toModify(generalGpSelectorBlock, _modifyForPhase1)
phase1Pixel.toModify(GpSelectorForEfficiencyVsEtaBlock, _modifyForPhase1)
phase1Pixel.toModify(GpSelectorForEfficiencyVsPhiBlock, _modifyForPhase1)
phase1Pixel.toModify(GpSelectorForEfficiencyVsPtBlock, _modifyForPhase1)
phase1Pixel.toModify(GpSelectorForEfficiencyVsVTXRBlock, _modifyForPhase1)
phase1Pixel.toModify(GpSelectorForEfficiencyVsVTXZBlock, _modifyForPhase1)
def _modifyForPhase2(pset):
pset.minRapidity = -4.5
pset.maxRapidity = 4.5
pset.tip = 2.5 # IT1 will be around 3.0 (as in Phase1)
from Configuration.Eras.Modifier_phase2_tracker_cff import phase2_tracker
phase2_tracker.toModify(generalGpSelectorBlock, _modifyForPhase2)
phase2_tracker.toModify(GpSelectorForEfficiencyVsEtaBlock, _modifyForPhase2)
phase2_tracker.toModify(GpSelectorForEfficiencyVsPhiBlock, _modifyForPhase2)
phase2_tracker.toModify(GpSelectorForEfficiencyVsPtBlock, _modifyForPhase2)
phase2_tracker.toModify(GpSelectorForEfficiencyVsVTXRBlock, _modifyForPhase2)
phase2_tracker.toModify(GpSelectorForEfficiencyVsVTXZBlock, _modifyForPhase2)
| 34.753968 | 100 | 0.710436 | 518 | 4,379 | 5.942085 | 0.140927 | 0.137427 | 0.055231 | 0.050032 | 0.487979 | 0.480182 | 0.477908 | 0.477908 | 0.430474 | 0.430474 | 0 | 0.055571 | 0.161681 | 4,379 | 125 | 101 | 35.032 | 0.782893 | 0.017584 | 0 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017391 | false | 0 | 0.043478 | 0 | 0.06087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68a3f892f6babebd8ad4a6dccb1f021be7d34c0a | 1,675 | py | Python | machine/qemu/sources/u-boot/test/py/tests/test_shell_basics.py | muddessir/framework | 5b802b2dd7ec9778794b078e748dd1f989547265 | [
"MIT"
] | 1 | 2021-11-21T19:56:29.000Z | 2021-11-21T19:56:29.000Z | machine/qemu/sources/u-boot/test/py/tests/test_shell_basics.py | muddessir/framework | 5b802b2dd7ec9778794b078e748dd1f989547265 | [
"MIT"
] | null | null | null | machine/qemu/sources/u-boot/test/py/tests/test_shell_basics.py | muddessir/framework | 5b802b2dd7ec9778794b078e748dd1f989547265 | [
"MIT"
] | null | null | null | # SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
# Test basic shell functionality, such as commands separate by semi-colons.
import pytest
pytestmark = pytest.mark.buildconfigspec('cmd_echo')
def test_shell_execute(u_boot_console):
"""Test any shell command."""
response = u_boot_console.run_command('echo hello')
assert response.strip() == 'hello'
def test_shell_semicolon_two(u_boot_console):
"""Test two shell commands separate by a semi-colon."""
cmd = 'echo hello; echo world'
response = u_boot_console.run_command(cmd)
# This validation method ignores the exact whitespace between the strings
assert response.index('hello') < response.index('world')
def test_shell_semicolon_three(u_boot_console):
"""Test three shell commands separate by a semi-colon, with variable
expansion dependencies between them."""
cmd = 'setenv list 1; setenv list ${list}2; setenv list ${list}3; ' + \
'echo ${list}'
response = u_boot_console.run_command(cmd)
assert response.strip() == '123'
u_boot_console.run_command('setenv list')
def test_shell_run(u_boot_console):
"""Test the "run" shell command."""
u_boot_console.run_command('setenv foo \'setenv monty 1; setenv python 2\'')
u_boot_console.run_command('run foo')
response = u_boot_console.run_command('echo ${monty}')
assert response.strip() == '1'
response = u_boot_console.run_command('echo ${python}')
assert response.strip() == '2'
u_boot_console.run_command('setenv foo')
u_boot_console.run_command('setenv monty')
u_boot_console.run_command('setenv python')
| 36.413043 | 80 | 0.719403 | 235 | 1,675 | 4.906383 | 0.314894 | 0.065048 | 0.156114 | 0.143105 | 0.35039 | 0.35039 | 0.256722 | 0 | 0 | 0 | 0 | 0.014245 | 0.161791 | 1,675 | 45 | 81 | 37.222222 | 0.80698 | 0.269851 | 0 | 0.08 | 0 | 0 | 0.186611 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.16 | false | 0 | 0.04 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68a4669437d4166fcbdf808f1656a1aa9ce0f049 | 362 | py | Python | edison/resources/users.py | DoRTaL94/edison | 3a924e31e7074d86e9d71710d2775fab9f01e63a | [
"MIT"
] | null | null | null | edison/resources/users.py | DoRTaL94/edison | 3a924e31e7074d86e9d71710d2775fab9f01e63a | [
"MIT"
] | null | null | null | edison/resources/users.py | DoRTaL94/edison | 3a924e31e7074d86e9d71710d2775fab9f01e63a | [
"MIT"
] | null | null | null | from flask_restful import Resource
from flask_jwt_extended import jwt_required
import edison.models as models
class Users(Resource):
@jwt_required
def get(self):
status = 200
response = list(
map(
lambda user: user.to_json(), models.User.query.all()
)
)
return response, status
| 20.111111 | 68 | 0.607735 | 42 | 362 | 5.095238 | 0.666667 | 0.084112 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012245 | 0.323204 | 362 | 17 | 69 | 21.294118 | 0.861224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0.230769 | 0 | 0.461538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68a5434332990e1d5776edd7834a984b26f05855 | 8,155 | py | Python | capsulenet.py | czyczyyzc/MyCapsuleNet | 09171db1cfa13e0bcc3247764b6694e2f7cecdb3 | [
"MIT"
] | 1 | 2020-10-20T07:19:12.000Z | 2020-10-20T07:19:12.000Z | capsulenet.py | czyczyyzc/MyCapsuleNet | 09171db1cfa13e0bcc3247764b6694e2f7cecdb3 | [
"MIT"
] | null | null | null | capsulenet.py | czyczyyzc/MyCapsuleNet | 09171db1cfa13e0bcc3247764b6694e2f7cecdb3 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from Mybase import layers
from Mybase.layers import *
from Mybase.layers_utils import *
from Mybase.losses import *
class CapsuleNet(object):
def __init__(self, cls_num=10, reg=1e-4, typ=tf.float32):
self.cls_num = cls_num #class number
self.x_dim = 8
self.v_dim = 16
self.reg = reg #regularization
self.typ = typ #dtype
self.mod_tra = True #mode training
self.glb_pol = False #global pooling
def squash(self, x=None, layer=0, eps=1e-7):
with tf.variable_scope('squash_'+str(layer)) as scope:
squa = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
sqrt = tf.sqrt(squa + eps)
x = squa / (1.0 + squa) * x / sqrt
print_activations(x)
return x
def project(self, x=None, layer=0, reuse=False, trainable=True):
x_shp = get_shape(x) #[img_num, 1152, 8]
with tf.variable_scope('project_'+str(layer), reuse=reuse) as scope:
w = tf.get_variable(name='weights', shape=x_shp[1:3]+[self.cls_num,self.v_dim], dtype=self.typ, \
#initializer=tf.initializers.random_normal(mean=0.0, stddev=0.01), \
initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0,mode='FAN_AVG',uniform=True),
regularizer=tf.contrib.layers.l2_regularizer(self.reg), \
trainable=trainable) #(1152, 8, 10, 16)
u = tf.einsum('ijk,jkmn->ijmn', x, w) #(img_num, 1152, 10, 16)
print_activations(u)
return u
def route(self, u=None, layer=0, r=3):
u_shp = get_shape(u) #[img_num, 1152, 10, 16]
with tf.variable_scope('route_'+str(layer)) as scope:
b = tf.zeros(shape=u_shp[:-1]+[1], dtype=tf.float32) #(img_num, 1152, 10, 1)
def cond(i, u, b):
c = tf.less(i, r)
return c
def body(i, u, b):
c = tf.nn.softmax(b, axis=-2) #(img_num, 1152, 10, 1) 每个输入cap预测输出cap的概率
s = u * c #(img_num, 1152, 10, 16)
s = tf.reduce_sum(s, axis=1, keepdims=True) #(img_num, 1, 10, 16)
v = self.squash(s, 0) #(img_num, 1, 10, 16)
b = b + tf.reduce_sum(u*v, axis=-1, keepdims=True) #(img_num, 1152, 10, 1)
return [i+1, u, b]
i = tf.constant(0)
[i, u, b] = tf.while_loop(cond, body, loop_vars=[i, u, b], shape_invariants=None, \
parallel_iterations=1, back_prop=True, swap_memory=True)
c = tf.nn.softmax(b, axis=-2) #(img_num, 1152, 10, 1) 每个输入cap预测输出cap的概率
s = u * c #(img_num, 1152, 10, 16)
s = tf.reduce_sum(s, axis=1, keepdims=True) #(img_num, 1, 10, 16)
v = self.squash(s, 1) #(img_num, 1, 10, 16)
v = tf.squeeze(v, axis=[1]) #(img_num, 10, 16)
print_activations(v)
return v
def margin_loss(self, v, y, layer=0, m_plus=0.9, m_minus=0.1, lambda_=0.5):
with tf.variable_scope('margin_loss_'+str(layer)) as scope:
y = tf.one_hot(y, depth=self.cls_num, dtype=tf.float32) #(img_num, 10)
v = tf.norm(v, ord='euclidean', axis=-1, keepdims=False) #(img_num, 10)
fp = tf.square(tf.maximum(0., m_plus-v ))
fn = tf.square(tf.maximum(0., v-m_minus))
L = y * fp + lambda_ * (1.0 - y) * fn
L = tf.reduce_mean(tf.reduce_sum(L, axis=-1))
print_activations(L)
return L
def recons_loss(self, v, x, y, layer=0):
x_shp = get_shape(x)
with tf.variable_scope('recons_loss_'+str(layer)) as scope:
x = tf.reshape(x, [x_shp[0], -1]) #(img_num, 784)
x_shp = get_shape(x)
y = tf.one_hot(y, depth=self.cls_num, dtype=tf.float32) #(img_num, 10)
v = v * tf.expand_dims(y, axis=-1) #(img_num, 10, 16)
v = tf.reshape(v, [x_shp[0], -1]) #(img_num, 160)
p = {}
p['com'] = {'reg':self.reg, 'wscale':0.01, 'dtype':self.typ, 'reuse':False, 'is_train':self.mod_tra, 'trainable':True}
p['relu'] = {'alpha':-0.1}
p['affine'] = {'dim':512, 'use_bias':True}
v = affine_relu1(v, 0, p)
p['affine'] = {'dim':1024, 'use_bias':True}
v = affine_relu1(v, 1, p)
p['affine'] = {'dim':x_shp[1], 'use_bias':True}
v = affine_sigmoid1(v, 0, p)
L = tf.reduce_sum(tf.square(x - v))
print_activations(L)
return L
def total_loss(self, v, x, y, layer=0, alpha=0.0005):
with tf.variable_scope('total_loss_'+str(layer)) as scope:
L0 = self.margin_loss(v, y, 0)
L1 = self.recons_loss(v, x, y, 0)
L = L0 + alpha * L1
print_activations(L)
return L
def accuracy(self, v, y, layer=0):
with tf.variable_scope('accuracy_'+str(layer)) as scope:
v = tf.norm(v, ord='euclidean', axis=-1, keepdims=False) #(img_num, 10)
v = tf.cast(tf.argmax(v, axis=-1), dtype=tf.int32) #(img_num)
acc = tf.cast(tf.equal(v, y), tf.float32) #(img_num)
acc = tf.reduce_mean(acc, keepdims=False) #(1)
print_activations(acc)
return acc
def forward(self, imgs=None, lbls=None, mtra=None, scp=None):
img_shp = imgs.get_shape().as_list()
img_num, img_hgt, img_wdh = img_shp[0], img_shp[1], img_shp[2]
img_shp = np.stack([img_hgt, img_wdh], axis=0)
#####################Common Parameters!############################
com_pams = {
'com': {'reg':self.reg, 'wscale':0.01, 'dtype':self.typ, 'reuse':False, 'is_train':self.mod_tra, 'trainable':True},
'bn': {'eps':1e-5, 'decay':0.9997}, #0.9997
'relu': {'alpha':-0.1},
'conv': {'number':256,'shape':[9,9],'rate':[1,1],'stride':[1,1],'padding':'VALID','use_bias':True},
'glb_pool': {'axis': [1, 2]},
'reshape': {'shape': [img_num, -1]},
'squeeze': {'axis': [1, 2]},
'transpose': {'perm': [0, 3, 1, 2, 4]},
'affine': {'dim': self.cls_num*self.v_dim, 'use_bias':False},
'dropout': {'keep_p': 0.75, 'shape': None},
#'bilstm': {'num_h': self.fet_dep//2, 'num_o': None, 'fbias': 1.0, 'tmajr': False},
#'concat': {'axis': 0},
#'split': {'axis': 0, 'number': img_num},
}
opas = {'op':[{'op':'conv_relu1', 'loop':1, 'params':{}},
{'op':'conv_relu1', 'loop':1, 'params':{'conv':{'stride':[2, 2]}}},
{'op':'reshape1', 'loop':1, 'params':{'reshape':{'shape':[img_num, -1, self.x_dim]}}},
], 'loop':1}
x = layers_module1(imgs, 0, com_pams, opas, mtra)
x = self.squash(x, 0)
u = self.project(x, 0)
v = self.route(u, 0)
accs = self.accuracy(v, lbls, 0)
los_dat = self.total_loss(v, imgs, lbls, 0)
los_reg = tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
los = los_dat + los_reg * 0.0
loss = tf.stack([los, los_dat, los_reg], axis=0)
return loss, accs | 52.275641 | 133 | 0.469405 | 1,078 | 8,155 | 3.402597 | 0.204082 | 0.044166 | 0.024537 | 0.026172 | 0.354417 | 0.257088 | 0.178026 | 0.155671 | 0.155671 | 0.155671 | 0 | 0.056324 | 0.370815 | 8,155 | 156 | 134 | 52.275641 | 0.658546 | 0.096383 | 0 | 0.145038 | 0 | 0 | 0.066978 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083969 | false | 0 | 0.045802 | 0 | 0.21374 | 0.053435 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68a5d16053fa6388ce5a33a933e1de3b5b46a975 | 500 | py | Python | carp/config.py | lijielife/carp | 376e1a03da6594a567ddf15dde76008a4b126647 | [
"MIT"
] | 1 | 2021-03-02T15:48:57.000Z | 2021-03-02T15:48:57.000Z | carp/config.py | lijielife/carp | 376e1a03da6594a567ddf15dde76008a4b126647 | [
"MIT"
] | null | null | null | carp/config.py | lijielife/carp | 376e1a03da6594a567ddf15dde76008a4b126647 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
from carp import util
class GlobalConfig(object):
DEBUG = False
DEBUG_PATH = 'debug'
if not os.path.exists(DEBUG_PATH):
os.makedirs(DEBUG_PATH)
CACHE_PATH = os.path.join('cache')
if not os.path.exists(CACHE_PATH):
os.makedirs(CACHE_PATH)
DATABASE_ADDR = 'localhost'
DATABASE_PORT = 27017
class KlineConfig(object):
SYNC_FREQS = [
util.FREQ_DAY,
util.FREQ_WEEK,
]
| 16.666667 | 38 | 0.6 | 62 | 500 | 4.66129 | 0.5 | 0.093426 | 0.048443 | 0.076125 | 0.117647 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016997 | 0.294 | 500 | 29 | 39 | 17.241379 | 0.8017 | 0.042 | 0 | 0 | 0 | 0 | 0.039916 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.588235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68a61e3535e994efa7a5276510b5cdad10541e25 | 2,686 | py | Python | podnn/demos/diversity_2d_tensorflow.py | peymanmashhadi/podnn | b33a51ed044e8989328ab48d4eccd2f71088e43c | [
"Apache-2.0"
] | 2 | 2022-03-02T17:46:35.000Z | 2022-03-12T14:39:02.000Z | podnn/demos/diversity_2d_tensorflow.py | caisr-hh/podnn | 27b94aa3e1b35ab40b7cc84234ed7c44b9b0117d | [
"Apache-2.0"
] | null | null | null | podnn/demos/diversity_2d_tensorflow.py | caisr-hh/podnn | 27b94aa3e1b35ab40b7cc84234ed7c44b9b0117d | [
"Apache-2.0"
] | 2 | 2021-12-08T15:45:16.000Z | 2022-03-02T17:46:28.000Z | import numpy as np
from sklearn.datasets import make_circles
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import podnn_tensorflow
import tensorflow as tf
from tensorflow.keras import Model
tf.random.set_seed(4)
import utils
n_samples = 500
X, y = make_circles(noise=0.3, random_state=17, n_samples=n_samples,factor=0.2)
x_train,x_test,y_train,y_test = train_test_split(X,y,test_size=0.3)
X_train = tf.convert_to_tensor(x_train)
y_train = tf.convert_to_tensor(y_train.reshape(-1,1))
X_test = tf.convert_to_tensor(x_test)
y_test = tf.convert_to_tensor(y_test.reshape(-1,1))
unit_model_1 = [
tf.keras.layers.Dense(12,activation='elu'),
tf.keras.layers.Dense(10),
]
unit_model_2 = [
tf.keras.layers.Dense(4)
]
class podnnModel(Model):
def __init__(self):
super(podnnModel, self).__init__()
pass
def build(self,input_shape):
self.InputLayer = podnn_tensorflow.InputLayer(n_models=8)
self.ParallelLayer1 = podnn_tensorflow.ParallelLayer(unit_model_1)
self.OrthogonalLayer = podnn_tensorflow.OrthogonalLayer1D()
self.AggregationLayer = podnn_tensorflow.AggregationLayer(stride=2)
self.DenseLayer = tf.keras.layers.Dense(1, activation='sigmoid',name='last_dense')
def call(self,x):
x = self.InputLayer(x)
x = self.ParallelLayer1(x)
x = self.OrthogonalLayer(x)
x_orth = self.AggregationLayer(x)
x = self.DenseLayer(x_orth)
return x,x_orth
loss_object = tf.keras.losses.BinaryCrossentropy()
optimizer = tf.keras.optimizers.Adam(learning_rate=0.1)
train_loss = tf.keras.metrics.Mean()
train_accuracy = tf.keras.metrics.BinaryAccuracy(name='train_accuracy')
model = podnnModel()
@tf.function
def train_step(x, labels):
with tf.GradientTape() as tape:
predictions,_ = model(x)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, tf.squeeze(predictions))
epochs = 200
for i in range(epochs):
train_loss.reset_states()
train_accuracy.reset_states()
train_step(X_train, y_train)
if np.mod(i,10)==0:
print('epoch:'+str(i)+' train loss='+str(train_loss.result()))
print('epoch:'+str(i) + ' train accuracy=' + str(train_accuracy.result()))
preds_test,_ = model(X_test)
test_acc = accuracy_score(y_test,np.round(preds_test))
print('=======> test accuracy=' + str(test_acc))
utils.plot_bounday_tensorflow(model,4,x_train,y_train,x_test,y_test) | 28.574468 | 90 | 0.715934 | 385 | 2,686 | 4.748052 | 0.314286 | 0.030635 | 0.02407 | 0.037199 | 0.070022 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017349 | 0.163068 | 2,686 | 94 | 91 | 28.574468 | 0.795819 | 0 | 0 | 0 | 0 | 0 | 0.037216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0.015152 | 0.121212 | 0 | 0.212121 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68a6eaabbd59399077717fa621b94f6b9a07775e | 2,784 | py | Python | chap_02/exe_051_roots_quadratic_function.py | aleattene/python-workbook | bf26ba716c957316d1463fb25488384e319d5b91 | [
"MIT"
] | null | null | null | chap_02/exe_051_roots_quadratic_function.py | aleattene/python-workbook | bf26ba716c957316d1463fb25488384e319d5b91 | [
"MIT"
] | null | null | null | chap_02/exe_051_roots_quadratic_function.py | aleattene/python-workbook | bf26ba716c957316d1463fb25488384e319d5b91 | [
"MIT"
] | null | null | null | """
The Program receives from the USER THREE CONSTANTS ("a", "b" and "c")
of a SECOND DEGREE EQUATION of the type:
a(x^2) + bx + c (with "a" other than zero).
Afterwards, calculates any possible REAL SOLUTIONS.
"""
# IMPORT module MATH
import math
# START Definition of FUNCTIONS
def valutaFloat(numero):
countPoints = 0
for char in numero:
if ord(char) == 46:
countPoints += 1
if countPoints == 1 and numero != "." and valutaNumero(numero):
if isinstance(float(numero), float):
return True
else:
return False
def valutaNumero(numero):
if numero == "":
return False
countSigns = 0
for char in numero:
if ord(char) == 45 or ord(char) == 43:
countSigns += 1
if ((numero[0] == "+") or (numero[0] == "-")) and countSigns == 1 and \
numero != "-" and numero != "+" and numero != "-." and numero != "+.":
return True
elif numero[0].isdigit() and countSigns == 0:
return True
else:
return False
def valutaZero(numero):
if numero.isdigit():
if int(numero) == 0:
return True
elif len(numero) > 1:
if valutaNumero(numero) and float(numero) == 0:
return True
return False
def correctEntry(numero):
if valutaFloat(numero) or valutaNumero(numero):
return True
return False
def computesDiscriminant(a, b, c):
discriminant = (b ** 2) - (4 * a * c)
return discriminant
def computesRoots(a, b, c):
discriminant = computesDiscriminant(a, b, c)
if discriminant < 0:
return "NO REAL SOLUTION"
elif discriminant == 0:
x = (-b) / (2 * a)
return "ONE REAL SOLUTION -> x = %.2f" % x
else:
x1 = ((-b) + math.sqrt(discriminant)) / (2 * a)
x2 = ((-b) - math.sqrt(discriminant)) / (2 * a)
return "TWO REAL SOLUTION -> x1 = %.2f" % x1 + " and x2 = %.2f" % x2
# END Definition of FUNCTIONS
# Acquisition and Control of the DATA entered by the USER
print("Enter the value for a (not equal), b and c: ")
a = input("a (non-zero): ")
b = input("b: ")
c = input("c: ")
aValidated = correctEntry(a)
bValidated = correctEntry(b)
cValidated = correctEntry(c)
while not(aValidated and bValidated and cValidated) or valutaZero(a):
print("Incorrect entry. Try again.")
print("Enter the value for a, b and c: ")
a = input("a (non-zero): ")
b = input("b: ")
c = input("c: ")
aValidated = correctEntry(a)
bValidated = correctEntry(b)
cValidated = correctEntry(c)
# Conversion STR -> FLOAT
a = float(a)
b = float(b)
c = float(c)
# DISCRIMINANT evaluation and ROOTS computing
roots = computesRoots(a, b, c)
# Displaying the RESULTS
print("RESULTS QUADRATIC FUNCTION: " + roots)
| 24.637168 | 82 | 0.595546 | 361 | 2,784 | 4.592798 | 0.293629 | 0.008444 | 0.028951 | 0.032569 | 0.291918 | 0.262967 | 0.153197 | 0.153197 | 0.12304 | 0.12304 | 0 | 0.017751 | 0.271552 | 2,784 | 112 | 83 | 24.857143 | 0.799803 | 0.155172 | 0 | 0.388889 | 0 | 0 | 0.115056 | 0 | 0.013889 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.013889 | 0 | 0.305556 | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68a87e9bc41e1355731a449f24454d89206c0867 | 5,895 | py | Python | cogs/imagecmds.py | miettee/tenko-bot | 42421fd1e5af5009a20e1d963a945dada2d32a91 | [
"MIT"
] | 2 | 2021-08-30T23:03:02.000Z | 2021-10-15T15:24:14.000Z | cogs/imagecmds.py | miettee/tenko-bot | 42421fd1e5af5009a20e1d963a945dada2d32a91 | [
"MIT"
] | null | null | null | cogs/imagecmds.py | miettee/tenko-bot | 42421fd1e5af5009a20e1d963a945dada2d32a91 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import random
class imagecmds(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.blessed_dir = "/root/tenko/resources/blessed/blessed"
self.girl_dir = "/root/tenko/resources/girl/girl"
self.cursed_dir = "/root/tenko/resources/cursed/cursed"
self.ronpa_dir = "/root/tenko/resources/dangans/dangan"
#make sure ur image files all start with the word at the end, and ascend in number for each file dir, eg blessed1.png, blessed2.png, blessed1.jpg etc
def log(self, logmsg, user, server, channel):
print(f"{logmsg} by `{user}` in `{server}`, in channel `{channel}`")
@commands.command()
async def cursed(self, ctx):
self.log(ctx.message.content, ctx.author.name, ctx.guild.name, ctx.channel.name)
my_files = []
for i in range(1, 162): #nb range limits cant go beyond how many files you compile
my_files.append(
discord.File(f"{self.cursed_dir}{i}.jpg"))
for i in range(1, 96):
my_files.append(
discord.File(f"{self.cursed_dir}{i}.png"))
for i in range(1, 6):
my_files.append(
discord.File(f"{self.cursed_dir}{i}.jpeg"))
await ctx.channel.send(files=[(random.choice(my_files))])
@commands.command()
async def girl(self, ctx):
self.log(ctx.message.content, ctx.author.name, ctx.guild.name, ctx.channel.name)
my_files = []
for i in range(1, 88):
my_files.append(
discord.File(f"{self.girl_dir}{i}.png"))
await ctx.message.channel.send(files=[(random.choice(my_files))])
@commands.command()
async def blessed(self, ctx):
self.log(ctx.message.content, ctx.author.name, ctx.guild.name, ctx.channel.name)
my_files = []
for i in range(1, 58):
my_files.append(
discord.File(f"{self.blessed_dir}{i}.jpg"))
for i in range(1, 202):
my_files.append(
discord.File(f"{self.blessed_dir}{i}.png"))
for i in range(1, 7):
my_files.append(
discord.File(f"{self.blessed_dir}{i}.jpeg"))
await ctx.message.channel.send(files=[(random.choice(my_files))])
@commands.command()
async def ronpa(self, ctx):
messages_list = ['extra'] # extra unused item in the list because my file numbers start with 1 not 0
danganss = ['Makoto Naegi', 'Sayaka Maizono', 'Leon Kuwata', 'Kyoko Kirigiri', 'Byakuya Togami',
'Hifumi Yamada',
'Mondo Owada', 'Toko Fukawa', 'Celestia Ludenberg', 'Aoi Asahina', 'Kiyotaka Ishimaru',
'Sakura Ogami', 'Yasuhiro Hagakure', 'Chihiro Fujisaki', 'Mukuro Ikusaba', 'Junko Enoshima',
'Hajime Hinata', 'Nagito Komaeda', 'Gundham Tanaka', 'Kazuichi Soda',
'Teruteru Hanamura', 'Nekomaru Nidai', 'Fuyuhiko Kuzuryu', 'Akane Owari', 'Chiaki Nanami',
'Sonia Nevermind', 'Hiyoko Saionji',
'Mahiru Koizumi', 'Mikan Tsumiki',
'Ibuki Mioda', 'Peko Pekoyama',
'Angie Yonaga', 'Gonta Gokuhara',
'Himiko Yumeno', 'Kaede Akamatsu', 'Kaito Momota', 'Kiibo',
'Kirumi Tojo', 'Kokichi Oma',
'Korekiyo Shinguuji', 'Maki Harukawa', 'Miu Iruma', 'Rantaro Amami', 'Ryoma Hoshi',
'Shuichi Saihara',
'Tenko Chabashira',
'Tsumugi Shirogane',
'Izuru Kamazura'] # your images will need to be in the same order as the characters
talents = ['Lucky Student', 'Pop Sensation', 'Baseball Player', 'Detective', 'Heir', 'Doujin Artist'
, 'Gang Leader', 'Author', 'Gambler', 'Swimmer', 'Moral Compass', 'Martial Artist',
'Fortune Teller', 'Programmer', 'Soldier', 'Despair/Fashionista', 'Reserve Student', 'Lucky Student',
'Animal Breeder', 'Mechanic', 'Cook', 'Team Manager', 'Yakuza', 'Gymnast', 'Gamer', 'Princess'
, 'Traditional Dancer', 'Photographer', 'Nurse', 'Musician', 'Swordswoman', 'Artist',
'Entomologist', 'Mage', 'Pianist', 'Astronaut', 'Robot', 'Maid', 'Supreme Leader', 'Anthropologist',
'Child Caregiver',
'Inventor', '???', 'Tennis Player', 'Detective', 'Aikido Master', 'Cosplayer'] # same for talents
for i in range(0, 47):
messages_list.append(
f'Your assigned Danganronpa character is {danganss[i]}, the Super Highschool Level {talents[i]}!')
specials_ = ["Your assigned Danganronpa character is Monokuma, Hope\'s Peak\'s headmaster!",
"Your assigned Danganronpa character is Izuru Kamakura, the Super Highschool Level Hope!",
"Your assigned Danganronpa character is the Super Highschool Level Impostor!",
"Your assigned Danganronpa character is Usami, the Magical Girl teacher!"]
for e in specials_:
messages_list.append(e)
image_and_message = {}
for i in range(1, 51):
image_and_message.update({
discord.File(fr"{self.ronpa_dir}{str(i)}.png"):
messages_list[i]}) # adds image files and messages as pairs in a dict
sent_image = random.choice(list(image_and_message)) # gets a random image
sent_text = image_and_message.get(sent_image) # gets the messages from that image
await ctx.message.channel.send(sent_text, files=[sent_image]) # sends message and image
def setup(bot):
bot.add_cog(imagecmds(bot))
| 48.319672 | 158 | 0.577608 | 686 | 5,895 | 4.892128 | 0.430029 | 0.027116 | 0.016091 | 0.029499 | 0.304231 | 0.237485 | 0.237485 | 0.228844 | 0.210965 | 0.210965 | 0 | 0.007726 | 0.297371 | 5,895 | 121 | 159 | 48.719008 | 0.802511 | 0.082443 | 0 | 0.2 | 0 | 0 | 0.362379 | 0.064027 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031579 | false | 0.010526 | 0.031579 | 0 | 0.073684 | 0.010526 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68a91007412bf0deff4094c45b11fec906c336a1 | 845 | py | Python | microbit/reaction.py | microbit-and-chips/reaction-timer | 93ab0163a74d02a805cc838d505fbc6df8edaf89 | [
"MIT"
] | 2 | 2017-02-01T16:55:43.000Z | 2018-09-03T17:42:51.000Z | microbit/reaction.py | microbit-and-chips/reaction-timer | 93ab0163a74d02a805cc838d505fbc6df8edaf89 | [
"MIT"
] | null | null | null | microbit/reaction.py | microbit-and-chips/reaction-timer | 93ab0163a74d02a805cc838d505fbc6df8edaf89 | [
"MIT"
] | null | null | null | from microbit import *
import random
# see blog
def waiting():
return not(button_a.is_pressed() or button_b.is_pressed())
def clear_buttons():
button_a.was_pressed() # clear the button_a flag
button_b.was_pressed() # clear the button_b flag
def time():
# returns the time until waiting is over, in 1/10 secs
count = 0
while waiting():
sleep(100) # 100 ms = 1/10 secs
count = count + 1
return count
while True:
clear_buttons()
display.show(Image.CLOCK1)
if button_b.was_pressed():
break
sleep(random.randint(500, 4500)) # delay in ms
if button_a.was_pressed():
print('naughty!')
continue
if button_b.was_pressed():
break
display.show(Image.HAPPY)
print(time())
print('bye for now')
while True:
display.scroll('bye! ') | 23.472222 | 62 | 0.63432 | 121 | 845 | 4.280992 | 0.446281 | 0.067568 | 0.057915 | 0.098456 | 0.185328 | 0.092664 | 0 | 0 | 0 | 0 | 0 | 0.035088 | 0.257988 | 845 | 36 | 63 | 23.472222 | 0.791069 | 0.16568 | 0 | 0.206897 | 0 | 0 | 0.034335 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.068966 | 0.034483 | 0.241379 | 0.103448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68a9873f8e0dc07b01318ce3311a3ae2b3541b34 | 2,613 | py | Python | blag/rest.py | ChickenNuggers/blag | 61b48106342e7e682d30e92f4cc74c502f12815b | [
"MIT"
] | null | null | null | blag/rest.py | ChickenNuggers/blag | 61b48106342e7e682d30e92f4cc74c502f12815b | [
"MIT"
] | 1 | 2016-11-19T20:47:28.000Z | 2016-11-24T08:14:29.000Z | blag/rest.py | ChickenNuggers/blag | 61b48106342e7e682d30e92f4cc74c502f12815b | [
"MIT"
] | null | null | null | import base64
from flask import jsonify, abort, request
from functools import wraps
from . import util
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
has_authorization_header = False
auth = None
try:
auth = request.headers['Authorization']
has_authorization_header = True
except:
return
if not has_authorization_header:
raise util.InvalidUsage("Missing Authorization field", 400)
auth = base64.b64decode(auth.split(' ')[1])
if not util.check_auth(auth):
raise util.InvalidUsage("Invalid authorization", 401)
return f(*args, **kwargs)
return decorated
def add_routes(add_route, app):
@app.errorhandler(util.InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@add_route('/api/v1/config')
def get_config():
return jsonify({
key: getattr(app.config['config_module'], key)
for key in dir(app.config['config_module']) if key[0] != '_'
})
@add_route('/api/v1/posts', methods=['GET'])
def get_post_list():
if request.args.get('start_eid'):
return jsonify([
post for post in util.get_post_list(start=int(
request.args.get('start_eid')))
])
else:
return jsonify([post for post in util.get_post_list()])
@add_route('/api/v1/posts/reverse', methods=['GET'])
def get_reverse_post_list():
if request.args.get('start_eid'):
return jsonify([
post for post in util.get_reverse_post_list(start=int(
request.args.get('start_eid')))
])
else:
return jsonify([post for post in util.get_reverse_post_list()])
@add_route('/api/v1/post/<int:eid>', methods=['GET'])
def get_post(eid):
post = util.get_post(eid)
if post is None:
return abort(404)
else:
return jsonify(post)
@add_route('/api/v1/new', methods=['POST'])
@requires_auth
def make_post():
return jsonify({"eid": util.add_post(request)})
@add_route('/api/v1/posts/<int:eid>', methods=['PUT', 'POST'])
@requires_auth
def amend_post(eid):
return jsonify({"eid": util.update_post(eid, request)})
@add_route('/api/v1/posts/<int:eid>', methods=['DELETE'])
@requires_auth
def delete_post(eid):
return jsonify({'eid': util.delete_post(eid)})
| 31.865854 | 75 | 0.594336 | 322 | 2,613 | 4.649068 | 0.251553 | 0.078156 | 0.051436 | 0.060788 | 0.350701 | 0.311289 | 0.257849 | 0.257849 | 0.257849 | 0.207081 | 0 | 0.012658 | 0.274397 | 2,613 | 81 | 76 | 32.259259 | 0.776899 | 0 | 0 | 0.2 | 0 | 0 | 0.109835 | 0.03406 | 0 | 0 | 0 | 0 | 0 | 1 | 0.157143 | false | 0 | 0.057143 | 0.057143 | 0.414286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68aa85825647368a79502e99a19d03d3aa4b5c4f | 1,270 | py | Python | var/spack/repos/builtin/packages/brltty/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2,360 | 2017-11-06T08:47:01.000Z | 2022-03-31T14:45:33.000Z | var/spack/repos/builtin/packages/brltty/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 13,838 | 2017-11-04T07:49:45.000Z | 2022-03-31T23:38:39.000Z | var/spack/repos/builtin/packages/brltty/package.py | LiamBindle/spack | e90d5ad6cfff2ba3de7b537d6511adccd9d5fcf1 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1,793 | 2017-11-04T07:45:50.000Z | 2022-03-30T14:31:53.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Brltty(AutotoolsPackage):
"""BRLTTY is a background process (daemon) providing access to the
Linux/Unix console (when in text mode) for a blind person using
a refreshable braille display."""
homepage = "https://brltty.app/"
url = "https://github.com/brltty/brltty/archive/BRLTTY-6.0.tar.gz"
version('6.0', sha256='acfea5274bdc9230b0ea1a87f8796e241615d4d2c1ba08d87601b9d116c7804c')
version('5.6', sha256='74f35043943525396b340b9f65f0d73c3cc4054a8f63d1c685f27ccf59f46c5d')
version('5.5', sha256='cd80a0d225f13779791dc3a72d7f137c06c48e5f2c9600e80a565d2378422207')
version('5.4', sha256='9ad5a540d29438a755f8b8f1f1534e0eba601c604f3d8223fa00b802959ec636')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('expat')
depends_on('alsa-lib', when='platform=linux', type='link')
def autoreconf(self, spec, prefix):
bash = which('bash')
bash('autogen')
| 39.6875 | 93 | 0.724409 | 139 | 1,270 | 6.57554 | 0.669065 | 0.059081 | 0.070022 | 0.078775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.191628 | 0.153543 | 1,270 | 31 | 94 | 40.967742 | 0.658605 | 0.274016 | 0 | 0 | 0 | 0.058824 | 0.477348 | 0.282873 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68ac83983e3d9d15067e964394d04a70cb5595ad | 1,583 | py | Python | webserver/handlers/status.py | cuauv/software | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | [
"BSD-3-Clause"
] | 70 | 2015-11-16T18:04:01.000Z | 2022-03-05T09:04:02.000Z | webserver/handlers/status.py | cuauv/software | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | [
"BSD-3-Clause"
] | 1 | 2016-08-03T05:13:19.000Z | 2016-08-03T06:19:39.000Z | webserver/handlers/status.py | cuauv/software | 5ad4d52d603f81a7f254f365d9b0fe636d03a260 | [
"BSD-3-Clause"
] | 34 | 2015-12-15T17:29:23.000Z | 2021-11-18T14:15:12.000Z | import json
import threading
import tornado.websocket
import shm
class SwitchWatcherThread(threading.Thread):
def __init__(self, lock, watchers, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.lock = lock
self.watchers = watchers
def run(self):
switch_watcher = shm.watchers.watcher()
switch_watcher.watch(shm.switches)
while True:
with self.lock:
if len(self.watchers) == 0:
break
switch_watcher.wait()
msg = json.dumps({
"soft_kill": shm.switches.soft_kill.get(),
"hard_kill": shm.switches.hard_kill.get(),
})
with self.lock:
for ws in self.watchers:
ws.write_message(msg)
class StatusHandler(tornado.websocket.WebSocketHandler):
ws_clients_lock = threading.Lock()
ws_clients = set()
ws_updater = None
def open(self):
with self.ws_clients_lock:
self.ws_clients.add(self)
if self.ws_updater == None or not self.ws_updater.is_alive():
self.ws_updater = SwitchWatcherThread(self.ws_clients_lock, self.ws_clients, daemon=True)
self.ws_updater.start()
def on_message(self, message):
msg = json.dumps({
"soft_kill": shm.switches.soft_kill.get(),
"hard_kill": shm.switches.hard_kill.get(),
})
self.write_message(msg)
def on_close(self):
with self.ws_clients_lock:
self.ws_clients.remove(self)
| 31.039216 | 101 | 0.596336 | 186 | 1,583 | 4.854839 | 0.306452 | 0.066445 | 0.086379 | 0.056478 | 0.276855 | 0.276855 | 0.276855 | 0.243632 | 0.243632 | 0.159468 | 0 | 0.000901 | 0.2988 | 1,583 | 50 | 102 | 31.66 | 0.812613 | 0 | 0 | 0.27907 | 0 | 0 | 0.022742 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116279 | false | 0 | 0.093023 | 0 | 0.325581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68b150f1ef1796ef781ab93463cbedc4334d539d | 37,499 | py | Python | tanjun/dependencies/limiters.py | A5rocks/Tanjun | 06a6c9208ace51c5b32e7c407b65ce9e1da06b18 | [
"BSD-3-Clause"
] | null | null | null | tanjun/dependencies/limiters.py | A5rocks/Tanjun | 06a6c9208ace51c5b32e7c407b65ce9e1da06b18 | [
"BSD-3-Clause"
] | 7 | 2021-10-17T15:15:44.000Z | 2022-02-02T02:19:10.000Z | tanjun/dependencies/limiters.py | patchwork-systems/Tanjun | 638e8bfe3132513cddf3882b704c0db376db5e9b | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# cython: language_level=3
# BSD 3-Clause License
#
# Copyright (c) 2020-2022, Faster Speeding
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Command cooldown and concurrency limiters."""
from __future__ import annotations
__all__: list[str] = [
"AbstractConcurrencyLimiter",
"AbstractCooldownManager",
"BucketResource",
"ConcurrencyPreExecution",
"ConcurrencyPostExecution",
"CooldownPreExecution",
"InMemoryConcurrencyLimiter",
"InMemoryCooldownManager",
"with_concurrency_limit",
"with_cooldown",
]
import abc
import asyncio
import datetime
import enum
import logging
import time
import typing
from collections import abc as collections
import hikari
from .. import abc as tanjun_abc
from .. import errors
from .. import hooks
from .. import injecting
from . import async_cache
from . import owners
if typing.TYPE_CHECKING:
_InMemoryCooldownManagerT = typing.TypeVar("_InMemoryCooldownManagerT", bound="InMemoryCooldownManager")
_InMemoryConcurrencyLimiterT = typing.TypeVar("_InMemoryConcurrencyLimiterT", bound="InMemoryConcurrencyLimiter")
_LOGGER: typing.Final[logging.Logger] = logging.getLogger("hikari.tanjun")
CommandT = typing.TypeVar("CommandT", bound="tanjun_abc.ExecutableCommand[typing.Any]")
"""Type variable indicating either `BaseSlashCommand` or `MessageCommand`."""
class AbstractCooldownManager(abc.ABC):
"""Interface used for managing command calldowns."""
__slots__ = ()
@abc.abstractmethod
async def check_cooldown(
self, bucket_id: str, ctx: tanjun_abc.Context, /, *, increment: bool = False
) -> typing.Optional[float]:
"""Check if a bucket is on cooldown for the provided context.
Parameters
----------
bucket_id : str
The cooldown bucket to check.
ctx : tanjun.abc.Context
The context of the command.
Other Parameters
----------------
increment : bool
Whether this call should increment the bucket's use counter if
it isn't depleted.
Returns
-------
float | None
When this command will next be usable for the provided context
if it's in cooldown else `None`.
"""
@abc.abstractmethod
async def increment_cooldown(self, bucket_id: str, ctx: tanjun_abc.Context, /) -> None:
"""Increment the cooldown of a cooldown bucket.
Parameters
----------
bucket_id : str
The cooldown bucket's ID.
ctx : tanjun.abc.Context
The context of the command.
"""
class AbstractConcurrencyLimiter(abc.ABC):
"""Interface used for limiting command concurrent usage."""
__slots__ = ()
@abc.abstractmethod
async def try_acquire(self, bucket_id: str, ctx: tanjun_abc.Context, /) -> bool:
"""Try to acquire a concurrency lock on a bucket.
Parameters
----------
bucket_id : str
The concurrency bucket to acquire.
ctx : tanjun.abc.Context
The context to acquire this resource lock with.
Returns
-------
bool
Whether the lock was acquired.
"""
@abc.abstractmethod
async def release(self, bucket_id: str, ctx: tanjun_abc.Context, /) -> None:
"""Release a concurrency lock on a bucket."""
class BucketResource(int, enum.Enum):
"""Resource target types used within command calldowns and concurrency limiters."""
USER = 0
"""A per-user resource bucket."""
MEMBER = 1
"""A per-guild member resource bucket.
.. note::
When executed in a DM this will be per-DM.
"""
CHANNEL = 2
"""A per-channel resource bucket."""
PARENT_CHANNEL = 3
"""A per-parent channel resource bucket.
.. note::
For DM channels this will be per-DM, for guild channels with no parents
this'll be per-guild.
"""
# CATEGORY = 4
# """A per-category resource bucket.
# .. note::
# For DM channels this will be per-DM, for guild channels with no parent
# category this'll be per-guild.
# """
TOP_ROLE = 5
"""A per-highest role resource bucket.
.. note::
When executed in a DM this will be per-DM, with this defaulting to
targeting the @everyone role if they have no real roles.
"""
GUILD = 6
"""A per-guild resource bucket.
.. note::
When executed in a DM this will be per-DM.
"""
GLOBAL = 7
"""A global resource bucket."""
async def _try_get_role(
cache: async_cache.SfCache[hikari.Role], role_id: hikari.Snowflake
) -> typing.Optional[hikari.Role]:
try:
return await cache.get(role_id)
except async_cache.EntryNotFound:
pass
async def _get_ctx_target(ctx: tanjun_abc.Context, type_: BucketResource, /) -> hikari.Snowflake:
if type_ is BucketResource.USER:
return ctx.author.id
if type_ is BucketResource.CHANNEL:
return ctx.channel_id
if type_ is BucketResource.PARENT_CHANNEL:
if ctx.guild_id is None:
return ctx.channel_id
if cached_channel := ctx.get_channel():
return cached_channel.parent_id or ctx.guild_id
# TODO: upgrade this to the standard interface
assert isinstance(ctx, injecting.AbstractInjectionContext)
channel_cache = ctx.get_type_dependency(async_cache.SfCache[hikari.GuildChannel])
if channel_cache and (channel_ := await channel_cache.get(ctx.channel_id, default=None)):
return channel_.parent_id or ctx.guild_id
channel = await ctx.fetch_channel()
assert isinstance(channel, hikari.TextableGuildChannel)
return channel.parent_id or ctx.guild_id
# if type_ is BucketResource.CATEGORY:
# if ctx.guild_id is None:
# return ctx.channel_id
# # This resource doesn't include threads so we can safely assume that the parent is a category
# if channel := ctx.get_channel():
# return channel.parent_id or channel.guild_id
# # TODO: threads
# channel = await ctx.fetch_channel() # TODO: couldn't this lead to two requests per command? seems bad
# assert isinstance(channel, hikari.TextableGuildChannel)
# return channel.parent_id or channel.guild_id
if type_ is BucketResource.TOP_ROLE:
if not ctx.guild_id:
return ctx.channel_id
# If they don't have a member object but this is in a guild context then we'll have to assume they
# only have @everyone since they might be a webhook or something.
if not ctx.member or len(ctx.member.role_ids) <= 1: # If they only have 1 role ID then this is @everyone.
return ctx.guild_id
roles = ctx.member.get_roles()
try_rest = not roles
# TODO: upgrade this to the standard interface
assert isinstance(ctx, injecting.AbstractInjectionContext)
if try_rest and (role_cache := ctx.get_type_dependency(async_cache.SfCache[hikari.Role])):
try:
roles = filter(None, [await _try_get_role(role_cache, role_id) for role_id in ctx.member.role_ids])
try_rest = False
except async_cache.CacheMissError:
pass
if try_rest:
roles = await ctx.member.fetch_roles()
return next(iter(sorted(roles, key=lambda r: r.position, reverse=True))).id
if type_ is BucketResource.GUILD:
return ctx.guild_id or ctx.channel_id
raise ValueError(f"Unexpected type {type_}")
_CooldownT = typing.TypeVar("_CooldownT", bound="_Cooldown")
class _Cooldown:
__slots__ = ("counter", "limit", "reset_after", "resets_at")
def __init__(self, *, limit: int, reset_after: float) -> None:
self.counter = 0
self.limit = limit
self.reset_after = reset_after
self.resets_at = time.monotonic() + reset_after
def has_expired(self) -> bool:
# Expiration doesn't actually matter for cases where the limit is -1.
return time.monotonic() >= self.resets_at
def increment(self: _CooldownT) -> _CooldownT:
# A limit of -1 is special cased to mean no limit, so there's no need to increment the counter.
if self.limit == -1:
return self
if self.counter == 0:
self.resets_at = time.monotonic() + self.reset_after
elif (current_time := time.monotonic()) >= self.resets_at:
self.counter = 0
self.resets_at = current_time + self.reset_after
if self.counter < self.limit:
self.counter += 1
return self
def must_wait_for(self) -> typing.Optional[float]:
# A limit of -1 is special cased to mean no limit, so we don't need to wait.
if self.limit == -1:
return None
if self.counter >= self.limit and (time_left := self.resets_at - time.monotonic()) > 0:
return time_left
class _InnerResourceProto(typing.Protocol):
def has_expired(self) -> bool:
raise NotImplementedError
_InnerResourceT = typing.TypeVar("_InnerResourceT", bound=_InnerResourceProto)
class _BaseResource(abc.ABC, typing.Generic[_InnerResourceT]):
__slots__ = ("make_resource",)
def __init__(self, make_resource: _InnerResourceSig[_InnerResourceT]) -> None:
self.make_resource = make_resource
@abc.abstractmethod
def cleanup(self) -> None:
raise NotImplementedError
@abc.abstractmethod
def copy(self) -> _BaseResource[_InnerResourceT]:
raise NotImplementedError
@abc.abstractmethod
async def into_inner(self, ctx: tanjun_abc.Context, /) -> _InnerResourceT:
raise NotImplementedError
@abc.abstractmethod
async def try_into_inner(self, ctx: tanjun_abc.Context, /) -> typing.Optional[_InnerResourceT]:
raise NotImplementedError
_InnerResourceSig = collections.Callable[[], _InnerResourceT]
class _FlatResource(_BaseResource[_InnerResourceT]):
__slots__ = ("mapping", "resource")
def __init__(self, resource: BucketResource, make_resource: _InnerResourceSig[_InnerResourceT]) -> None:
super().__init__(make_resource)
self.mapping: dict[hikari.Snowflake, _InnerResourceT] = {}
self.resource = resource
async def try_into_inner(self, ctx: tanjun_abc.Context, /) -> typing.Optional[_InnerResourceT]:
return self.mapping.get(await _get_ctx_target(ctx, self.resource))
async def into_inner(self, ctx: tanjun_abc.Context, /) -> _InnerResourceT:
target = await _get_ctx_target(ctx, self.resource)
if resource := self.mapping.get(target):
return resource
resource = self.mapping[target] = self.make_resource()
return resource
def cleanup(self) -> None:
for target_id, resource in self.mapping.copy().items():
if resource.has_expired():
del self.mapping[target_id]
def copy(self) -> _FlatResource[_InnerResourceT]:
return _FlatResource(self.resource, self.make_resource)
class _MemberResource(_BaseResource[_InnerResourceT]):
__slots__ = ("dm_fallback", "mapping")
def __init__(self, make_resource: _InnerResourceSig[_InnerResourceT]) -> None:
super().__init__(make_resource)
self.dm_fallback: dict[hikari.Snowflake, _InnerResourceT] = {}
self.mapping: dict[hikari.Snowflake, dict[hikari.Snowflake, _InnerResourceT]] = {}
async def into_inner(self, ctx: tanjun_abc.Context, /) -> _InnerResourceT:
if not ctx.guild_id:
if resource := self.dm_fallback.get(ctx.channel_id):
return resource
resource = self.dm_fallback[ctx.channel_id] = self.make_resource()
return resource
if (guild_mapping := self.mapping.get(ctx.guild_id)) is not None:
if resource := guild_mapping.get(ctx.author.id):
return resource
resource = guild_mapping[ctx.author.id] = self.make_resource()
return resource
resource = self.make_resource()
self.mapping[ctx.guild_id] = {ctx.author.id: resource}
return resource
async def try_into_inner(self, ctx: tanjun_abc.Context, /) -> typing.Optional[_InnerResourceT]:
if not ctx.guild_id:
return self.dm_fallback.get(ctx.channel_id)
if guild_mapping := self.mapping.get(ctx.guild_id):
return guild_mapping.get(ctx.author.id)
def cleanup(self) -> None:
for guild_id, mapping in self.mapping.copy().items():
for bucket_id, resource in mapping.copy().items():
if resource.has_expired():
del mapping[bucket_id]
if not mapping:
del self.mapping[guild_id]
for bucket_id, resource in self.dm_fallback.copy().items():
if resource.has_expired():
del self.dm_fallback[bucket_id]
def copy(self) -> _MemberResource[_InnerResourceT]:
return _MemberResource(self.make_resource)
class _GlobalResource(_BaseResource[_InnerResourceT]):
__slots__ = ("bucket",)
def __init__(self, make_resource: _InnerResourceSig[_InnerResourceT]) -> None:
super().__init__(make_resource)
self.bucket = make_resource()
async def try_into_inner(self, _: tanjun_abc.Context, /) -> typing.Optional[_InnerResourceT]:
return self.bucket
async def into_inner(self, _: tanjun_abc.Context, /) -> _InnerResourceT:
return self.bucket
def cleanup(self) -> None:
pass
def copy(self) -> _GlobalResource[_InnerResourceT]:
return _GlobalResource(self.make_resource)
def _to_bucket(
resource: BucketResource, make_resource: _InnerResourceSig[_InnerResourceT]
) -> _BaseResource[_InnerResourceT]:
if resource is BucketResource.MEMBER:
return _MemberResource(make_resource)
if resource is BucketResource.GLOBAL:
return _GlobalResource(make_resource)
return _FlatResource(resource, make_resource)
class InMemoryCooldownManager(AbstractCooldownManager):
"""In-memory standard implementation of `AbstractCooldownManager`.
Examples
--------
`InMemoryCooldownManager.set_bucket` may be used to set the cooldown for a
specific bucket:
```py
(
InMemoryCooldownManager()
# Set the default bucket template to a per-user 10 uses per-60 seconds cooldown.
.set_bucket("default", tanjun.BucketResource.USER, 10, 60)
# Set the "moderation" bucket to a per-guild 100 uses per-5 minutes cooldown.
.set_bucket("moderation", tanjun.BucketResource.GUILD, 100, datetime.timedelta(minutes=5))
.set_bucket()
# add_to_client will setup the cooldown manager (setting it as an
# injected dependency and registering callbacks to manage it).
.add_to_client(client)
)
```
"""
__slots__ = ("_buckets", "_default_bucket_template", "_gc_task")
def __init__(self) -> None:
self._buckets: dict[str, _BaseResource[_Cooldown]] = {}
self._default_bucket_template: _BaseResource[_Cooldown] = _FlatResource(
BucketResource.USER, lambda: _Cooldown(limit=2, reset_after=5)
)
self._gc_task: typing.Optional[asyncio.Task[None]] = None
def _get_or_default(self, bucket_id: str, /) -> _BaseResource[_Cooldown]:
if bucket := self._buckets.get(bucket_id):
return bucket
_LOGGER.info("No cooldown found for %r, falling back to 'default' bucket", bucket_id)
bucket = self._buckets[bucket_id] = self._default_bucket_template.copy()
return bucket
async def _gc(self) -> None:
while True:
await asyncio.sleep(10)
for bucket in self._buckets.values():
bucket.cleanup()
def add_to_client(self, client: injecting.InjectorClient, /) -> None:
"""Add this cooldown manager to a tanjun client.
.. note::
This registers the manager as a type dependency and manages opening
and closing the manager based on the client's life cycle.
Parameters
----------
client : tanjun.abc.Client
The client to add this cooldown manager to.
"""
client.set_type_dependency(AbstractCooldownManager, self)
# TODO: the injection client should be upgraded to the abstract Client.
assert isinstance(client, tanjun_abc.Client)
client.add_client_callback(tanjun_abc.ClientCallbackNames.STARTING, self.open)
client.add_client_callback(tanjun_abc.ClientCallbackNames.CLOSING, self.close)
if client.is_alive:
assert client.loop is not None
self.open(_loop=client.loop)
async def check_cooldown(
self, bucket_id: str, ctx: tanjun_abc.Context, /, *, increment: bool = False
) -> typing.Optional[float]:
# <<inherited docstring from AbstractCooldownManager>>.
if increment:
bucket = await self._get_or_default(bucket_id).into_inner(ctx)
if cooldown := bucket.must_wait_for():
return cooldown
bucket.increment()
return None
if (bucket := self._buckets.get(bucket_id)) and (cooldown := await bucket.try_into_inner(ctx)):
return cooldown.must_wait_for()
async def increment_cooldown(self, bucket_id: str, ctx: tanjun_abc.Context, /) -> None:
# <<inherited docstring from AbstractCooldownManager>>.
(await self._get_or_default(bucket_id).into_inner(ctx)).increment()
def close(self) -> None:
"""Stop the cooldown manager.
Raises
------
RuntimeError
If the cooldown manager is not running.
"""
if not self._gc_task:
raise RuntimeError("Cooldown manager is not active")
self._gc_task.cancel()
self._gc_task = None
def open(self, *, _loop: typing.Optional[asyncio.AbstractEventLoop] = None) -> None:
"""Start the cooldown manager.
Raises
------
RuntimeError
If the cooldown manager is already running.
If called in a thread with no running event loop.
"""
if self._gc_task:
raise RuntimeError("Cooldown manager is already running")
self._gc_task = (_loop or asyncio.get_running_loop()).create_task(self._gc())
def disable_bucket(self: _InMemoryCooldownManagerT, bucket_id: str, /) -> _InMemoryCooldownManagerT:
"""Disable a cooldown bucket.
This will stop the bucket from ever hitting a cooldown and also
prevents the bucket from defaulting.
Parameters
----------
bucket_id : str
The bucket to disable.
.. note::
"default" is a special bucket which is used as a template
for unknown bucket IDs.
Returns
-------
Self
This cooldown manager to allow for chaining.
"""
# A limit of -1 is special cased to mean no limit and reset_after is ignored in this scenario.
bucket = self._buckets[bucket_id] = _GlobalResource(lambda: _Cooldown(limit=-1, reset_after=-1))
if bucket_id == "default":
self._default_bucket_template = bucket.copy()
return self
def set_bucket(
self: _InMemoryCooldownManagerT,
bucket_id: str,
resource: BucketResource,
limit: int,
reset_after: typing.Union[int, float, datetime.timedelta],
/,
) -> _InMemoryCooldownManagerT:
"""Set the cooldown for a specific bucket.
Parameters
----------
bucket_id : str
The ID of the bucket to set the cooldown for.
.. note::
"default" is a special bucket which is used as a template
for unknown bucket IDs.
resource : tanjun.BucketResource
The type of resource to target for the cooldown.
limit : int
The number of uses per cooldown period.
reset_after : int | float | datetime.timedelta
The cooldown period.
Returns
-------
Self
The cooldown manager to allow call chaining.
Raises
------
ValueError
If an invalid resource type is given.
If reset_after or limit are negative, 0 or invalid.
if limit is less 0 or negative.
"""
if isinstance(reset_after, datetime.timedelta):
reset_after_seconds = reset_after.total_seconds()
else:
reset_after_seconds = float(reset_after)
if reset_after_seconds <= 0:
raise ValueError("reset_after must be greater than 0 seconds")
if limit <= 0:
raise ValueError("limit must be greater than 0")
bucket = self._buckets[bucket_id] = _to_bucket(
BucketResource(resource), lambda: _Cooldown(limit=limit, reset_after=reset_after_seconds)
)
if bucket_id == "default":
self._default_bucket_template = bucket.copy()
return self
class CooldownPreExecution:
"""Pre-execution hook used to manage a command's cooldowns.
To avoid race-conditions this handles both erroring when the bucket is hit
instead and incrementing the bucket's use counter.
"""
__slots__ = ("_bucket_id", "_error_message", "_owners_exempt")
def __init__(
self,
bucket_id: str,
/,
*,
error_message: str = "Please wait {cooldown:0.2f} seconds before using this command again.",
owners_exempt: bool = True,
) -> None:
"""Initialise a pre-execution cooldown command hook.
Parameters
----------
bucket_id : str
The cooldown bucket's ID.
Other Parameters
----------------
error_message : str
The error message to send in response as a command error if the check fails.
Defaults to f"Please wait {cooldown:0.2f} seconds before using this command again.".
owners_exempt : bool
Whether owners should be exempt from the cooldown.
Defaults to `True`.
"""
self._bucket_id = bucket_id
self._error_message = error_message
self._owners_exempt = owners_exempt
async def __call__(
self,
ctx: tanjun_abc.Context,
cooldowns: AbstractCooldownManager = injecting.inject(type=AbstractCooldownManager),
owner_check: typing.Optional[owners.AbstractOwners] = injecting.inject(
type=typing.Optional[owners.AbstractOwners]
),
) -> None:
if self._owners_exempt:
if not owner_check:
_LOGGER.info("No `AbstractOwners` dependency found, disabling owner exemption for cooldown check")
self._owners_exempt = False
elif await owner_check.check_ownership(ctx.client, ctx.author):
return
if wait_for := await cooldowns.check_cooldown(self._bucket_id, ctx, increment=True):
raise errors.CommandError(self._error_message.format(cooldown=wait_for))
def with_cooldown(
bucket_id: str,
/,
*,
error_message: str = "Please wait {cooldown:0.2f} seconds before using this command again.",
owners_exempt: bool = True,
) -> collections.Callable[[CommandT], CommandT]:
"""Add a pre-execution hook used to manage a command's cooldown through a decorator call.
.. warning::
Cooldowns will only work if there's a setup injected `AbstractCooldownManager`
dependency with `InMemoryCooldownManager` being usable as a standard in-memory
cooldown manager.
Parameters
----------
bucket_id : str
The cooldown bucket's ID.
Other Parameters
----------------
error_message : str
The error message to send in response as a command error if the check fails.
Defaults to f"Please wait {cooldown:0.2f} seconds before using this command again.".
owners_exempt : bool
Whether owners should be exempt from the cooldown.
Defaults to `True`.
Returns
-------
collections.abc.Callable[[CommandT], CommandT]
A decorator that adds a `CooldownPreExecution` hook to the command.
"""
def decorator(command: CommandT, /) -> CommandT:
hooks_ = command.hooks
if not hooks_:
hooks_ = hooks.AnyHooks()
command.set_hooks(hooks_)
hooks_.add_pre_execution(
CooldownPreExecution(bucket_id, error_message=error_message, owners_exempt=owners_exempt)
)
return command
return decorator
class _ConcurrencyLimit:
__slots__ = ("counter", "limit")
def __init__(self, limit: int) -> None:
self.counter = 0
self.limit = limit
def acquire(self) -> bool:
if self.counter < self.limit:
self.counter += 1
return True
# A limit of -1 means unlimited so we don't need to keep count.
if self.limit == -1:
return True
return False
def release(self) -> None:
if self.counter > 0:
self.counter -= 1
return
# A limit of -1 means unlimited so we don't need to keep count.
if self.limit == -1:
return
raise RuntimeError("Cannot release a limit that has not been acquired, this should never happen")
def has_expired(self) -> bool:
# Expiration doesn't actually matter for cases where the limit is -1.
return self.counter == 0
class InMemoryConcurrencyLimiter(AbstractConcurrencyLimiter):
"""In-memory standard implementation of `AbstractConcurrencyLimiter`.
Examples
--------
`InMemoryConcurrencyLimiter.set_bucket` may be used to set the concurrency
limits for a specific bucket:
```py
(
InMemoryConcurrencyLimiter()
# Set the default bucket template to 10 concurrent uses of the command per-user.
.set_bucket("default", tanjun.BucketResource.USER, 10)
# Set the "moderation" bucket with a limit of 5 concurrent uses per-guild.
.set_bucket("moderation", tanjun.BucketResource.GUILD, 5)
.set_bucket()
# add_to_client will setup the concurrency manager (setting it as an
# injected dependency and registering callbacks to manage it).
.add_to_client(client)
)
```
"""
__slots__ = ("_acquiring_ctxs", "_buckets", "_default_bucket_template", "_gc_task")
def __init__(self) -> None:
self._acquiring_ctxs: dict[tuple[str, tanjun_abc.Context], _ConcurrencyLimit] = {}
self._buckets: dict[str, _BaseResource[_ConcurrencyLimit]] = {}
self._default_bucket_template: _BaseResource[_ConcurrencyLimit] = _FlatResource(
BucketResource.USER, lambda: _ConcurrencyLimit(limit=1)
)
self._gc_task: typing.Optional[asyncio.Task[None]] = None
async def _gc(self) -> None:
while True:
await asyncio.sleep(10)
for bucket in self._buckets.values():
bucket.cleanup()
def add_to_client(self, client: injecting.InjectorClient, /) -> None:
"""Add this concurrency manager to a tanjun client.
.. note::
This registers the manager as a type dependency and manages opening
and closing the manager based on the client's life cycle.
Parameters
----------
client : tanjun.abc.Client
The client to add this concurrency manager to.
"""
client.set_type_dependency(AbstractConcurrencyLimiter, self)
# TODO: the injection client should be upgraded to the abstract Client.
assert isinstance(client, tanjun_abc.Client)
client.add_client_callback(tanjun_abc.ClientCallbackNames.STARTING, self.open)
client.add_client_callback(tanjun_abc.ClientCallbackNames.CLOSING, self.close)
if client.is_alive:
assert client.loop is not None
self.open(_loop=client.loop)
def close(self) -> None:
"""Stop the concurrency manager.
Raises
------
RuntimeError
If the concurrency manager is not running.
"""
if not self._gc_task:
raise RuntimeError("Concurrency manager is not active")
self._gc_task.cancel()
self._gc_task = None
def open(self, *, _loop: typing.Optional[asyncio.AbstractEventLoop] = None) -> None:
"""Start the concurrency manager.
Raises
------
RuntimeError
If the concurrency manager is already running.
If called in a thread with no running event loop.
"""
if self._gc_task:
raise RuntimeError("Concurrency manager is already running")
self._gc_task = (_loop or asyncio.get_running_loop()).create_task(self._gc())
async def try_acquire(self, bucket_id: str, ctx: tanjun_abc.Context, /) -> bool:
# <<inherited docstring from AbstractConcurrencyLimiter>>.
bucket = self._buckets.get(bucket_id)
if not bucket:
_LOGGER.info("No concurrency limit found for %r, falling back to 'default' bucket", bucket_id)
bucket = self._buckets[bucket_id] = self._default_bucket_template.copy()
# incrementing a bucket multiple times for the same context could lead
# to weird edge cases based on how we internally track this, so we
# internally de-duplicate this.
elif (bucket_id, ctx) in self._acquiring_ctxs:
return True # This won't ever be the case if it just had to make a new bucket, hence the elif.
if result := (limit := await bucket.into_inner(ctx)).acquire():
self._acquiring_ctxs[(bucket_id, ctx)] = limit
return result
async def release(self, bucket_id: str, ctx: tanjun_abc.Context, /) -> None:
# <<inherited docstring from AbstractConcurrencyLimiter>>.
if limit := self._acquiring_ctxs.pop((bucket_id, ctx), None):
limit.release()
def disable_bucket(self: _InMemoryConcurrencyLimiterT, bucket_id: str, /) -> _InMemoryConcurrencyLimiterT:
"""Disable a concurrency limit bucket.
This will stop the bucket from ever hitting a concurrency limit
and also prevents the bucket from defaulting.
Parameters
----------
bucket_id : str
The bucket to disable.
.. note::
"default" is a special bucket which is used as a template
for unknown bucket IDs.
Returns
-------
Self
This concurrency manager to allow for chaining.
"""
bucket = self._buckets[bucket_id] = _GlobalResource(lambda: _ConcurrencyLimit(limit=-1))
if bucket_id == "default":
self._default_bucket_template = bucket.copy()
return self
def set_bucket(
self: _InMemoryConcurrencyLimiterT, bucket_id: str, resource: BucketResource, limit: int, /
) -> _InMemoryConcurrencyLimiterT:
"""Set the concurrency limit for a specific bucket.
Parameters
----------
bucket_id : str
The ID of the bucket to set the concurrency limit for.
.. note::
"default" is a special bucket which is used as a template
for unknown bucket IDs.
resource : tanjun.BucketResource
The type of resource to target for the concurrency limit.
limit : int
The maximum number of concurrent uses to allow.
Returns
-------
Self
The concurrency manager to allow call chaining.
Raises
------
ValueError
If an invalid resource type is given.
if limit is less 0 or negative.
"""
if limit <= 0:
raise ValueError("limit must be greater than 0")
bucket = self._buckets[bucket_id] = _to_bucket(BucketResource(resource), lambda: _ConcurrencyLimit(limit=limit))
if bucket_id == "default":
self._default_bucket_template = bucket.copy()
return self
class ConcurrencyPreExecution:
"""Pre-execution hook used to acquire a bucket concurrency limiter.
.. note::
For a concurrency limiter to work properly, both `ConcurrencyPreExecution`
and `ConcurrencyPostExecution` hooks must be registered for a command scope.
"""
__slots__ = ("_bucket_id", "_error_message")
def __init__(
self,
bucket_id: str,
/,
*,
error_message: str = "This resource is currently busy; please try again later.",
) -> None:
"""Initialise a concurrency pre-execution hook.
Parameters
----------
bucket_id : str
The concurrency limit bucket's ID.
Other Parameters
----------------
error_message : str
The error message to send in response as a command error if this fails
to acquire the concurrency limit.
Defaults to "This resource is currently busy; please try again later.".
"""
self._bucket_id = bucket_id
self._error_message = error_message
async def __call__(
self,
ctx: tanjun_abc.Context,
limiter: AbstractConcurrencyLimiter = injecting.inject(type=AbstractConcurrencyLimiter),
) -> None:
if not await limiter.try_acquire(self._bucket_id, ctx):
raise errors.CommandError(self._error_message)
class ConcurrencyPostExecution:
"""Post-execution hook used to release a bucket concurrency limiter.
.. note::
For a concurrency limiter to work properly, both `ConcurrencyPreExecution`
and `ConcurrencyPostExecution` hooks must be registered for a command scope.
"""
__slots__ = ("_bucket_id",)
def __init__(self, bucket_id: str, /) -> None:
"""Initialise a concurrency post-execution hook.
Parameters
----------
bucket_id : str
The concurrency limit bucket's ID.
"""
self._bucket_id = bucket_id
async def __call__(
self,
ctx: tanjun_abc.Context,
limiter: AbstractConcurrencyLimiter = injecting.inject(type=AbstractConcurrencyLimiter),
) -> None:
await limiter.release(self._bucket_id, ctx)
def with_concurrency_limit(
bucket_id: str,
/,
*,
error_message: str = "This resource is currently busy; please try again later.",
) -> collections.Callable[[CommandT], CommandT]:
"""Add the hooks used to manage a command's concurrency limit through a decorator call.
.. warning::
Concurrency limiters will only work if there's a setup injected
`AbstractConcurrencyLimiter` dependency with `InMemoryConcurrencyLimiter`
being usable as a standard in-memory concurrency manager.
Parameters
----------
bucket_id : str
The concurrency limit bucket's ID.
Other Parameters
----------------
error_message : str
The error message to send in response as a command error if this fails
to acquire the concurrency limit.
Defaults to "This resource is currently busy; please try again later.".
Returns
-------
collections.abc.Callable[[CommandT], CommandT]
A decorator that adds the concurrency limiter hooks to a command.
"""
def decorator(command: CommandT, /) -> CommandT:
hooks_ = command.hooks
if not hooks_:
hooks_ = hooks.AnyHooks()
command.set_hooks(hooks_)
hooks_.add_pre_execution(ConcurrencyPreExecution(bucket_id, error_message=error_message)).add_post_execution(
ConcurrencyPostExecution(bucket_id)
)
return command
return decorator
| 34.214416 | 120 | 0.645511 | 4,358 | 37,499 | 5.379073 | 0.122533 | 0.023547 | 0.014077 | 0.017021 | 0.583824 | 0.516594 | 0.468091 | 0.434349 | 0.412763 | 0.383286 | 0 | 0.003304 | 0.265607 | 37,499 | 1,095 | 121 | 34.245662 | 0.847925 | 0.311822 | 0 | 0.466951 | 0 | 0 | 0.067315 | 0.016139 | 0 | 0 | 0 | 0.00274 | 0.014925 | 1 | 0.089552 | false | 0.006397 | 0.034115 | 0.010661 | 0.317697 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
68b5b425e5861b77448bbe09dd3f02098c19b2a6 | 3,112 | py | Python | Paciente_App/navigation_screen.py | JoseFernandez16/mecatronicaUNT_Prog2_SMTCPCSOS | 4d6cfe16d5936d2064a9c4cc6e644b70cd1fdafc | [
"MIT"
] | null | null | null | Paciente_App/navigation_screen.py | JoseFernandez16/mecatronicaUNT_Prog2_SMTCPCSOS | 4d6cfe16d5936d2064a9c4cc6e644b70cd1fdafc | [
"MIT"
] | null | null | null | Paciente_App/navigation_screen.py | JoseFernandez16/mecatronicaUNT_Prog2_SMTCPCSOS | 4d6cfe16d5936d2064a9c4cc6e644b70cd1fdafc | [
"MIT"
] | null | null | null | from kivymd.uix.screen import MDScreen
from kivy.lang import Builder
from kivymd.uix.list import OneLineIconListItem,IconLeftWidget
from kivymd.app import MDApp
from functools import partial
import sys
class ListIcon(OneLineIconListItem):
def __init__(self,**kw):
super().__init__()
self.text=kw['text']
self.icon=IconLeftWidget(icon=kw['icon'])
self.add_widget(self.icon)
self.on_release=kw['on_release']
kv="""
<NavigationScreen>
name:'navigation_screen'
NavigationLayout:
id:nav_layout
ScreenManager:
MDScreen:
MDBoxLayout:
orientation:'vertical'
MDToolbar:
id:tool_bar
title:'Pacient-App'
left_action_items:[["menu",lambda x: nav_drawer.set_state()]]
ScreenManager:
id:screen_manager
MDNavigationDrawer:
id:nav_drawer
MDBoxLayout:
orientation:'vertical'
padding: "8dp"
spacing: "8dp"
Image:
size_hint_y: .3
source:'recursos/imagenes/logo1.jpg'
ScrollView:
MDList:
id:nav_list
OneLineIconListItem:
text:'Cerrar Sesión'
on_release:root.cerrar_sesion()
IconLeftWidget:
icon:"close-circle"
"""
class NavigationScreen(MDScreen):
Builder.load_string(kv)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.app=MDApp.get_running_app()
#lista de las pantallas (id,titulo[text],icono)
from pacientes_screen import PacientesScreen
from informacion_screen import InformacionScreen
from historial_screen import HistorialScreen
self.list_screen = {
PacientesScreen:('pacientes_screen','Enviar datos','file-send'),#solo se cambió el nombre debido al tiempo
InformacionScreen:('information_screen','Información','information'),
HistorialScreen:('historial_screen','Historial','history')
}
def on_enter(self, *args):
for screen,details in self.list_screen.items():
identification,text,icon=details
self.ids.screen_manager.add_widget(screen(name=identification))
self.ids.nav_list.add_widget(ListIcon(text=text,icon=icon,on_release=partial(self.button_list_actions,text,identification)))
def button_list_actions(self,title,identification):
self.ids.tool_bar.title=title
self.ids.screen_manager.current=identification
self.ids.nav_drawer.set_state()
def cerrar_sesion(self):
archivo_texto=open('info_paciente.txt','w')
archivo_texto.write('')
archivo_texto.close()
sys.exit()
| 33.826087 | 137 | 0.567481 | 294 | 3,112 | 5.792517 | 0.442177 | 0.020552 | 0.036994 | 0.019965 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00196 | 0.344152 | 3,112 | 91 | 138 | 34.197802 | 0.832435 | 0.027956 | 0 | 0.082192 | 0 | 0 | 0.436562 | 0.065484 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068493 | false | 0 | 0.123288 | 0 | 0.219178 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d799c2d00d7e1721289382d7e9eec0c49f69185f | 24,182 | py | Python | myapp/views/base.py | tencentmusic/fab | e4fa8f505f7c78614b0e63601bb499373aa91a33 | [
"MIT"
] | 19 | 2021-08-05T05:08:54.000Z | 2022-03-17T06:18:48.000Z | myapp/views/base.py | tencentmusic/fab | e4fa8f505f7c78614b0e63601bb499373aa91a33 | [
"MIT"
] | null | null | null | myapp/views/base.py | tencentmusic/fab | e4fa8f505f7c78614b0e63601bb499373aa91a33 | [
"MIT"
] | 5 | 2021-08-05T06:54:45.000Z | 2022-03-01T12:43:27.000Z |
import datetime
import os
import functools
import logging
import traceback
from typing import Any, Dict
import pysnooper
from flask_appbuilder.forms import GeneralModelConverter
from flask import abort, flash, g, get_flashed_messages, redirect, Response
from flask_appbuilder import BaseView, ModelView,urltools
from flask_appbuilder.actions import action
from flask_appbuilder.forms import DynamicForm
from flask_appbuilder.models.sqla.filters import BaseFilter
from flask_appbuilder.widgets import ListWidget
from myapp.forms import MySearchWidget
from flask_babel import get_locale
from flask_babel import gettext as __
from flask_babel import lazy_gettext as _
from flask_wtf.form import FlaskForm
import simplejson as json
from werkzeug.exceptions import HTTPException
from wtforms.fields.core import Field, UnboundField
from flask_appbuilder import ModelView, ModelRestApi
import yaml
from flask_appbuilder.security.decorators import has_access, has_access_api, permission_name
from flask_appbuilder.baseviews import BaseCRUDView, BaseFormView, BaseView, expose, expose_api
from myapp import conf, db, get_feature_flags, security_manager,event_logger
from myapp.exceptions import MyappException, MyappSecurityException
from myapp.translations.utils import get_language_pack
from myapp.utils import core
from sqlalchemy import or_
from flask_appbuilder.urltools import (
get_filter_args,
get_order_args,
get_page_args,
get_page_size_args,
Stack,
)
from flask import (
current_app,
abort,
flash,
g,
Markup,
make_response,
redirect,
render_template,
request,
send_from_directory,
Response,
url_for,
)
from flask import Flask, jsonify
from apispec import yaml_utils
from flask import Blueprint, current_app, jsonify, make_response, request
from flask_babel import lazy_gettext as _
import yaml
FRONTEND_CONF_KEYS = (
"MYAPP_WEBSERVER_TIMEOUT",
"ENABLE_JAVASCRIPT_CONTROLS",
"MYAPP_WEBSERVER_DOMAINS",
)
def get_error_msg():
if conf.get("SHOW_STACKTRACE"):
error_msg = traceback.format_exc()
else:
error_msg = "FATAL ERROR \n"
error_msg += (
"Stacktrace is hidden. Change the SHOW_STACKTRACE "
"configuration setting to enable it"
)
return error_msg
def json_error_response(msg=None, status=500, stacktrace=None, payload=None, link=None):
if not payload:
payload = {"error": "{}".format(msg)}
payload["stacktrace"] = core.get_stacktrace()
if link:
payload["link"] = link
return Response(
json.dumps(payload, default=core.json_iso_dttm_ser, ignore_nan=True),
status=status,
mimetype="application/json",
)
def json_success(json_msg, status=200):
return Response(json_msg, status=status, mimetype="application/json")
def data_payload_response(payload_json, has_error=False):
status = 400 if has_error else 200
return json_success(payload_json, status=status)
# 产生下载csv的响应header
def generate_download_headers(extension, filename=None):
filename = filename if filename else datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
content_disp = "attachment; filename={}.{}".format(filename, extension)
headers = {"Content-Disposition": content_disp}
return headers
def api(f):
"""
A decorator to label an endpoint as an API. Catches uncaught exceptions and
return the response in the JSON format
"""
def wraps(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except Exception as e:
logging.exception(e)
return json_error_response(get_error_msg())
return functools.update_wrapper(wraps, f)
def handle_api_exception(f):
"""
A decorator to catch myapp exceptions. Use it after the @api decorator above
so myapp exception handler is triggered before the handler for generic
exceptions.
"""
def wraps(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except MyappSecurityException as e:
logging.exception(e)
return json_error_response(
core.error_msg_from_exception(e),
status=e.status,
stacktrace=core.get_stacktrace(),
link=e.link,
)
except MyappException as e:
logging.exception(e)
return json_error_response(
core.error_msg_from_exception(e),
stacktrace=core.get_stacktrace(),
status=e.status,
)
except HTTPException as e:
logging.exception(e)
return json_error_response(
core.error_msg_from_exception(e),
stacktrace=traceback.format_exc(),
status=e.code,
)
except Exception as e:
logging.exception(e)
return json_error_response(
core.error_msg_from_exception(e), stacktrace=core.get_stacktrace()
)
return functools.update_wrapper(wraps, f)
# 获取用户的角色
def get_user_roles():
if g.user.is_anonymous:
public_role = conf.get("AUTH_ROLE_PUBLIC")
return [security_manager.find_role(public_role)] if public_role else []
return g.user.roles
class BaseMyappView(BaseView):
# json响应
def json_response(self, obj, status=200):
return Response(
json.dumps(obj, default=core.json_int_dttm_ser, ignore_nan=True),
status=status,
mimetype="application/json",
)
# 前端显示数据
def common_bootstrap_payload(self):
"""Common data always sent to the client"""
messages = get_flashed_messages(with_categories=True)
locale = str(get_locale())
return {
"flash_messages": messages,
"conf": {k: conf.get(k) for k in FRONTEND_CONF_KEYS},
"locale": locale,
"language_pack": get_language_pack(locale),
"feature_flags": get_feature_flags(),
}
# 自定义list页面
class MyappListWidget(ListWidget):
template = "myapp/fab_overrides/list.html"
# model 页面基本视图
class MyappModelView(ModelView):
api_type='web'
datamodel=None
page_size = 100
list_widget = MyappListWidget
src_item_object = None # 原始model对象
src_item_json={} # 原始model对象的json
check_redirect_list_url=None
search_widget = MySearchWidget
help_url=''
pre_add_get = None
pre_update_get = None
pre_list = None
post_list = None
pre_show = None
post_show = None
label_title = ''
# 配置增删改查页面标题
def _init_titles(self):
"""
Init Titles if not defined
"""
class_name = self.datamodel.model_name
if not self.list_title:
if not self.label_title:
self.list_title = "List " + self._prettify_name(class_name)
else:
self.list_title = self.label_title + " 列表"
if not self.add_title:
if not self.label_title:
self.add_title = "Add " + self._prettify_name(class_name)
else:
self.add_title = '添加 ' + self.label_title
if not self.edit_title:
if not self.label_title:
self.edit_title = "Edit " + self._prettify_name(class_name)
else:
self.edit_title ='修改 ' + self.label_title
if not self.show_title:
if not self.label_title:
self.show_title = "Show " + self._prettify_name(class_name)
else:
self.show_title = self.label_title+" 详情"
self.title = self.list_title
# 配置字段的中文描述
# @pysnooper.snoop()
def _gen_labels_columns(self, list_columns):
"""
Auto generates pretty label_columns from list of columns
"""
if hasattr(self.datamodel.obj,'label_columns') and self.datamodel.obj.label_columns:
for col in self.datamodel.obj.label_columns:
self.label_columns[col] = self.datamodel.obj.label_columns[col]
for col in list_columns:
if not self.label_columns.get(col):
self.label_columns[col] = self._prettify_column(col)
# 获取列的中文显示
def lab(self,col):
if col in self.label_columns:
return _(self.label_columns[col])
return _(self._prettify_column(col))
def pre_delete(self, item):
pass
def _get_search_widget(self, form=None, exclude_cols=None, widgets=None):
exclude_cols = exclude_cols or []
widgets = widgets or {}
widgets["search"] = self.search_widget(
route_base=self.route_base,
form=form,
include_cols=self.search_columns,
exclude_cols=exclude_cols,
filters=self._filters,
help_url = self.help_url
)
return widgets
def _get_list_widget(
self,
filters,
actions=None,
order_column="",
order_direction="",
page=None,
page_size=None,
widgets=None,
**args,
):
""" get joined base filter and current active filter for query """
widgets = widgets or {}
actions = actions or self.actions
page_size = page_size or self.page_size
if not order_column and self.base_order:
order_column, order_direction = self.base_order
joined_filters = filters.get_joined_filters(self._base_filters)
count, lst = self.datamodel.query(
joined_filters,
order_column,
order_direction,
page=page,
page_size=page_size,
)
if self.post_list:
lst = self.post_list(lst)
pks = self.datamodel.get_keys(lst)
# serialize composite pks
pks = [self._serialize_pk_if_composite(pk) for pk in pks]
widgets["list"] = self.list_widget(
label_columns=self.label_columns,
include_columns=self.list_columns,
value_columns=self.datamodel.get_values(lst, self.list_columns),
order_columns=self.order_columns,
formatters_columns=self.formatters_columns,
page=page,
page_size=page_size,
count=count,
pks=pks,
actions=actions,
filters=filters,
modelview_name=self.__class__.__name__,
)
return widgets
@event_logger.log_this
@expose("/list/")
@has_access
def list(self):
if self.pre_list:
self.pre_list()
widgets = self._list()
res = self.render_template(
self.list_template, title=self.list_title, widgets=widgets
)
return res
@event_logger.log_this
@expose("/show/<pk>", methods=["GET"])
@has_access
def show(self, pk):
pk = self._deserialize_pk_if_composite(pk)
if self.pre_show:
src_item_object = self.datamodel.get(pk, self._base_filters)
self.pre_show(src_item_object)
widgets = self._show(pk)
return self.render_template(
self.show_template,
pk=pk,
title=self.show_title,
widgets=widgets,
related_views=self._related_views,
)
@event_logger.log_this
@expose("/add", methods=["GET", "POST"])
@has_access
def add(self):
self.src_item_json = {}
if request.method=='GET' and self.pre_add_get:
try:
self.pre_add_get()
self.conv = GeneralModelConverter(self.datamodel)
self.add_form = self.conv.create_form(
self.label_columns,
self.add_columns,
self.description_columns,
self.validators_columns,
self.add_form_extra_fields,
self.add_form_query_rel_fields,
)
except Exception as e:
print(e)
return redirect(self.get_redirect())
widget = self._add()
if not widget:
return self.post_add_redirect()
else:
return self.render_template(
self.add_template, title=self.add_title, widgets=widget
)
# 检测是否具有编辑权限,只有creator和admin可以编辑
def check_edit_permission(self, item):
user_roles = [role.name.lower() for role in list(get_user_roles())]
if "admin" in user_roles:
return
if g.user and g.user.username and hasattr(item,'created_by'):
if g.user.username!=item.created_by.username:
raise MyappException('just creator can edit/delete ')
def _edit(self, pk):
"""
Edit function logic, override to implement different logic
returns Edit widget and related list or None
"""
is_valid_form = True
pages = get_page_args()
page_sizes = get_page_size_args()
orders = get_order_args()
get_filter_args(self._filters)
exclude_cols = self._filters.get_relation_cols()
# 获取model记录
item = self.datamodel.get(pk, self._base_filters)
if not item:
abort(404)
# convert pk to correct type, if pk is non string type.
pk = self.datamodel.get_pk_value(item)
# post方法修改记录
if request.method == "POST":
form = self.edit_form.refresh(request.form)
# fill the form with the suppressed cols, generated from exclude_cols
self._fill_form_exclude_cols(exclude_cols, form)
# trick to pass unique validation
form._id = pk
if form.validate():
self.process_form(form, False)
try:
form.populate_obj(item)
self.pre_update(item)
except Exception as e:
flash(str(e), "danger")
else:
if self.datamodel.edit(item):
self.post_update(item)
flash(*self.datamodel.message)
finally:
return None
else:
is_valid_form = False
# get方法打开
else:
# Only force form refresh for select cascade events
form = self.edit_form.refresh(obj=item)
# Perform additional actions to pre-fill the edit form.
self.prefill_form(form, pk)
widgets = self._get_edit_widget(form=form, exclude_cols=exclude_cols)
widgets = self._get_related_views_widgets(
item,
filters={},
orders=orders,
pages=pages,
page_sizes=page_sizes,
widgets=widgets,
)
if is_valid_form:
self.update_redirect()
return widgets
@event_logger.log_this
@expose("/edit/<pk>", methods=["GET", "POST"])
@has_access
def edit(self, pk):
pk = self._deserialize_pk_if_composite(pk)
self.src_item_object = self.datamodel.get(pk, self._base_filters)
if request.method=='GET' and self.pre_update_get:
try:
self.pre_update_get(self.src_item_object)
self.conv = GeneralModelConverter(self.datamodel)
# 重新更新,而不是只在初始化时更新
self.edit_form = self.conv.create_form(
self.label_columns,
self.edit_columns,
self.description_columns,
self.validators_columns,
self.edit_form_extra_fields,
self.edit_form_query_rel_fields,
)
except Exception as e:
print(e)
self.update_redirect()
return redirect(self.get_redirect())
self.src_item_json = self.src_item_object.to_json()
if self.check_redirect_list_url:
try:
self.check_edit_permission(self.src_item_object)
except Exception as e:
print(e)
flash(str(e), 'warning')
return redirect(self.check_redirect_list_url)
widgets = self._edit(pk)
if not widgets:
return self.post_edit_redirect()
else:
return self.render_template(
self.edit_template,
title=self.edit_title,
widgets=widgets,
related_views=self._related_views,
)
@event_logger.log_this
@expose("/delete/<pk>")
@has_access
def delete(self, pk):
pk = self._deserialize_pk_if_composite(pk)
src_item_object = self.datamodel.get(pk, self._base_filters)
self.src_item_json = src_item_object.to_json()
if self.check_redirect_list_url:
try:
self.check_edit_permission(src_item_object)
except Exception as e:
print(e)
flash(str(e), 'warning')
return redirect(self.check_redirect_list_url)
self._delete(pk)
url = url_for(f"{self.endpoint}.list")
return redirect(url)
# return self.post_delete_redirect()
# 可以多选的列表页面
class ListWidgetWithCheckboxes(ListWidget):
"""An alternative to list view that renders Boolean fields as checkboxes
Works in conjunction with the `checkbox` view."""
template = "myapp/fab_overrides/list_with_checkboxes.html"
def validate_json(form, field): # noqa
try:
json.loads(field.data)
except Exception as e:
logging.exception(e)
raise Exception(_("json isn't valid"))
class YamlExportMixin(object):
@action("yaml_export", __("Export to YAML"), __("Export to YAML?"), "fa-download")
def yaml_export(self, items):
if not isinstance(items, list):
items = [items]
data = [t.export_to_dict() for t in items]
return Response(
yaml.safe_dump(data),
headers=generate_download_headers("yaml"),
mimetype="application/text",
)
# 列表页面删除/批量删除的操作
class DeleteMixin(object):
def _delete(self, pk):
"""
Delete function logic, override to implement diferent logic
deletes the record with primary_key = pk
:param pk:
record primary key to delete
"""
item = self.datamodel.get(pk, self._base_filters)
if not item:
abort(404)
try:
self.pre_delete(item)
except Exception as e:
flash(str(e), "danger")
else:
view_menu = security_manager.find_view_menu(item.get_perm())
pvs = (
security_manager.get_session.query(
security_manager.permissionview_model
)
.filter_by(view_menu=view_menu)
.all()
)
schema_view_menu = None
if hasattr(item, "schema_perm"):
schema_view_menu = security_manager.find_view_menu(item.schema_perm)
pvs.extend(
security_manager.get_session.query(
security_manager.permissionview_model
)
.filter_by(view_menu=schema_view_menu)
.all()
)
if self.datamodel.delete(item):
self.post_delete(item)
for pv in pvs:
security_manager.get_session.delete(pv)
if view_menu:
security_manager.get_session.delete(view_menu)
if schema_view_menu:
security_manager.get_session.delete(schema_view_menu)
security_manager.get_session.commit()
flash(*self.datamodel.message)
self.update_redirect()
@action(
"muldelete", __("Delete"), __("Delete all Really?"), "fa-trash", single=False
)
def muldelete(self, items):
if not items:
abort(404)
for item in items:
try:
self.pre_delete(item)
except Exception as e:
flash(str(e), "danger")
else:
self._delete(item.id)
self.update_redirect()
return redirect(self.get_redirect())
# model list的过滤器
class MyappFilter(BaseFilter):
"""Add utility function to make BaseFilter easy and fast
These utility function exist in the SecurityManager, but would do
a database round trip at every check. Here we cache the role objects
to be able to make multiple checks but query the db only once
"""
def get_user_roles(self):
return get_user_roles()
def get_all_permissions(self):
"""Returns a set of tuples with the perm name and view menu name"""
perms = set()
for role in self.get_user_roles():
for perm_view in role.permissions:
t = (perm_view.permission.name, perm_view.view_menu.name)
perms.add(t)
return perms
def has_role(self, role_name_or_list):
"""Whether the user has this role name"""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any([r.name in role_name_or_list for r in self.get_user_roles()])
def has_perm(self, permission_name, view_menu_name):
"""Whether the user has this perm"""
return (permission_name, view_menu_name) in self.get_all_permissions()
# 获取所有绑定了指定权限的所有vm
def get_view_menus(self, permission_name):
"""Returns the details of view_menus for a perm name"""
vm = set()
for perm_name, vm_name in self.get_all_permissions():
if perm_name == permission_name:
vm.add(vm_name)
return vm
# 下载csv
class CsvResponse(Response):
"""
Override Response to take into account csv encoding from config.py
"""
charset = conf.get("CSV_EXPORT").get("encoding", "utf-8")
# 检查是否有权限
def check_ownership(obj, raise_if_false=True):
"""Meant to be used in `pre_update` hooks on models to enforce ownership
Admin have all access, and other users need to be referenced on either
the created_by field that comes with the ``AuditMixin``, or in a field
named ``owners`` which is expected to be a one-to-many with the User
model. It is meant to be used in the ModelView's pre_update hook in
which raising will abort the update.
"""
if not obj:
return False
security_exception = MyappSecurityException(
"You don't have the rights to alter [{}]".format(obj)
)
if g.user.is_anonymous:
if raise_if_false:
raise security_exception
return False
roles = [r.name for r in get_user_roles()]
if "Admin" in roles:
return True
session = db.create_scoped_session()
orig_obj = session.query(obj.__class__).filter_by(id=obj.id).first()
# Making a list of owners that works across ORM models
owners = []
if hasattr(orig_obj, "owners"):
owners += orig_obj.owners
if hasattr(orig_obj, "owner"):
owners += [orig_obj.owner]
if hasattr(orig_obj, "created_by"):
owners += [orig_obj.created_by]
owner_names = [o.username for o in owners if o]
if g.user and hasattr(g.user, "username") and g.user.username in owner_names:
return True
if raise_if_false:
raise security_exception
else:
return False
# 绑定字段
def bind_field(
self, form: DynamicForm, unbound_field: UnboundField, options: Dict[Any, Any]
) -> Field:
"""
Customize how fields are bound by stripping all whitespace.
:param form: The form
:param unbound_field: The unbound field
:param options: The field options
:returns: The bound field
"""
filters = unbound_field.kwargs.get("filters", [])
filters.append(lambda x: x.strip() if isinstance(x, str) else x)
return unbound_field.bind(form=form, filters=filters, **options)
FlaskForm.Meta.bind_field = bind_field
| 31.860343 | 95 | 0.612274 | 2,884 | 24,182 | 4.898752 | 0.175798 | 0.007078 | 0.013448 | 0.012741 | 0.312642 | 0.237118 | 0.210079 | 0.157135 | 0.137174 | 0.12316 | 0 | 0.001665 | 0.304483 | 24,182 | 758 | 96 | 31.902375 | 0.838338 | 0.10574 | 0 | 0.283422 | 0 | 0 | 0.043049 | 0.006869 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064171 | false | 0.001783 | 0.067736 | 0.005348 | 0.26738 | 0.008913 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d799efd5de3e4090edd52b56624f0c2d0eab4cca | 5,959 | py | Python | ggshield/scannable.py | pitoukiller/my-shield | 53b6b1a207f4ef9759ebf5771431949ace0f3b90 | [
"MIT"
] | null | null | null | ggshield/scannable.py | pitoukiller/my-shield | 53b6b1a207f4ef9759ebf5771431949ace0f3b90 | [
"MIT"
] | null | null | null | ggshield/scannable.py | pitoukiller/my-shield | 53b6b1a207f4ef9759ebf5771431949ace0f3b90 | [
"MIT"
] | null | null | null | import os
import re
from typing import Dict, Iterable, List, NamedTuple, Optional, Set
import click
from pygitguardian import GGClient
from pygitguardian.config import MULTI_DOCUMENT_LIMIT
from pygitguardian.models import ScanResult
from .filter import remove_ignored_from_result
from .git_shell import shell
from .scannable_errors import handle_scan_error
from .utils import MAX_FILE_SIZE, Filemode
class Result(NamedTuple):
"""
Return model for a scan which zips the information
betwen the Scan result and its input content.
"""
content: str # Text content scanned
filemode: Filemode # Filemode (useful for commits)
filename: str # Filename of content scanned
scan: ScanResult # Result of content scan
class File:
""" Class representing a simple file. """
def __init__(self, document: str, filename: str, filesize: Optional[int] = None):
self.document = document
self.filename = filename
self.filemode = Filemode.FILE
self.filesize = filesize if filesize else len(self.document.encode("utf-8"))
@property
def scan_dict(self) -> Dict[str, str]:
""" Return a payload compatible with the scanning API. """
return {
"filename": self.filename
if len(self.filename) <= 256
else self.filename[-255:],
"document": self.document,
"filemode": self.filemode,
}
class CommitFile(File):
""" Class representing a commit file. """
def __init__(
self,
document: str,
filename: str,
filemode: Filemode,
filesize: Optional[int] = None,
):
super().__init__(document, filename, filesize)
self.filemode = filemode
class Files:
"""
Files is a list of files. Useful for directory scanning.
"""
def __init__(self, files: List[File]):
self._files = {entry.filename: entry for entry in files}
@property
def files(self) -> Dict[str, File]:
return self._files
@property
def scannable_list(self) -> List[Dict[str, str]]:
return [entry.scan_dict for entry in self.files.values()]
def scan(
self, client: GGClient, matches_ignore: Iterable[str], verbose: bool
) -> List[Result]:
scannable_list = self.scannable_list
results = []
for i in range(0, len(scannable_list), MULTI_DOCUMENT_LIMIT):
chunk = scannable_list[i : i + MULTI_DOCUMENT_LIMIT]
scan = client.multi_content_scan(chunk)
if not scan.success:
handle_scan_error()
continue
for index, scanned in enumerate(scan.scan_results):
remove_ignored_from_result(scanned, matches_ignore)
if scanned.has_secrets:
results.append(
Result(
content=chunk[index]["document"],
scan=scanned,
filemode=chunk[index]["filemode"],
filename=chunk[index]["filename"],
)
)
return results
class Commit(Files):
"""
Commit represents a commit which is a list of commit files.
"""
def __init__(self, sha: Optional[str] = None, filter_set: Optional[Set[str]] = {}):
self.sha = sha
self._patch = None
self._files = None
self.filter_set = filter_set
@property
def patch(self):
""" Get the change patch for the commit. """
if not self._patch:
if self.sha:
self._patch = "\n".join(shell(["git", "show", self.sha]))
else:
self._patch = "\n".join(shell(["git", "diff", "--cached"]))
return self._patch
@property
def files(self):
if not self._files:
self._files = {entry.filename: entry for entry in list(self.get_files())}
return self._files
@staticmethod
def get_filename(line: str) -> str:
"""
Get the file path from the line patch
Example: line = "a/filename.txt b/filename.txt"
"""
return line.split(" ")[1][2:]
@staticmethod
def get_filemode(line: str) -> str:
"""
Get the file mode from the line patch (new, modified or deleted)
:raise: Exception if filemode is not detected
"""
if line.startswith("index"):
return Filemode.MODIFY
elif line.startswith("similarity"):
return Filemode.RENAME
elif line.startswith("new"):
return Filemode.NEW
elif line.startswith("deleted"):
return Filemode.DELETE
elif line.startswith("old"):
return Filemode.PERMISSION_CHANGE
else:
raise click.ClickException(f"Filemode is not detected:{line}")
def get_files(self) -> Iterable[CommitFile]:
"""
Format the diff into files and extract the patch for each one of them.
Example :
diff --git a/test.txt b/test.txt\n
new file mode 100644\n
index 0000000..b80e3df\n
--- /dev/null\n
+++ b/test\n
@@ -0,0 +1,28 @@\n
+this is a test patch\n
"""
list_diff = re.split(r"^diff --git ", self.patch, flags=re.MULTILINE)[1:]
work_dir = os.getcwd()
for diff in list_diff:
lines = diff.split("\n")
filename = self.get_filename(lines[0])
if os.path.join(work_dir, filename) in self.filter_set:
continue
filemode = self.get_filemode(lines[1])
document = "\n".join(lines[filemode.start :])
file_size = len(document.encode("utf-8"))
if file_size > MAX_FILE_SIZE:
continue
if document:
yield CommitFile(document, filename, filemode, file_size)
| 30.716495 | 87 | 0.57946 | 687 | 5,959 | 4.90393 | 0.24163 | 0.021371 | 0.01306 | 0.013654 | 0.068863 | 0.068863 | 0.04393 | 0.04393 | 0 | 0 | 0 | 0.008623 | 0.318845 | 5,959 | 193 | 88 | 30.875648 | 0.821385 | 0.156402 | 0 | 0.112903 | 0 | 0 | 0.033382 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.104839 | false | 0 | 0.08871 | 0.016129 | 0.362903 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d79bef2d302ed63ef3f6fc3b30a3dd1edd2d1a25 | 1,281 | py | Python | aoc/day24.py | martinhenstridge/adventofcode2021 | f9fa76fd91f13abab9307794e30461033a470eca | [
"MIT"
] | null | null | null | aoc/day24.py | martinhenstridge/adventofcode2021 | f9fa76fd91f13abab9307794e30461033a470eca | [
"MIT"
] | null | null | null | aoc/day24.py | martinhenstridge/adventofcode2021 | f9fa76fd91f13abab9307794e30461033a470eca | [
"MIT"
] | null | null | null | from . import util
def get_constants(lines):
consts = []
chunklen = len(lines) // 14
for i in range(14):
start = i * chunklen
a = int(lines[start + 4].split()[-1])
b = int(lines[start + 5].split()[-1])
c = int(lines[start + 15].split()[-1])
consts.append((a, b, c))
return consts
def derive_constraints(constants):
constraints = []
stack = []
for i, (a, b, c) in enumerate(constants):
if a == 1:
stack.append((i, c))
elif a == 26:
_i, _c = stack.pop()
constraints.append((i, _i, _c + b))
return constraints
def solve(goal, constraints):
digits = [goal] * 14
for i, j, delta in constraints:
digits[i] = digits[j] + delta
return int("".join(str(digit) for digit in digits))
def run():
inputlines = util.get_input_lines("24.txt")
constants = get_constants(inputlines)
constraints = derive_constraints(constants)
# Credit for this goes to:
# https://github.com/dphilipson/advent-of-code-2021/blob/master/src/days/day24.rs
hi = solve(9, [(i, j, d) if d < 0 else (j, i, -d) for i, j, d in constraints])
lo = solve(1, [(i, j, d) if d > 0 else (j, i, -d) for i, j, d in constraints])
return hi, lo
| 25.117647 | 85 | 0.565964 | 188 | 1,281 | 3.803191 | 0.382979 | 0.027972 | 0.016783 | 0.013986 | 0.092308 | 0.092308 | 0.092308 | 0.092308 | 0.092308 | 0.092308 | 0 | 0.030303 | 0.278689 | 1,281 | 50 | 86 | 25.62 | 0.743506 | 0.081187 | 0 | 0 | 0 | 0 | 0.005111 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121212 | false | 0 | 0.030303 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d79c3fc1088e22a097bfce7eea15802cb90df485 | 4,366 | py | Python | hutch_python/tests/test_load_conf.py | tangkong/hutch-python | 7127985182c56fa0ecd70efb9679621fe8f47702 | [
"BSD-3-Clause-LBNL"
] | null | null | null | hutch_python/tests/test_load_conf.py | tangkong/hutch-python | 7127985182c56fa0ecd70efb9679621fe8f47702 | [
"BSD-3-Clause-LBNL"
] | 282 | 2017-12-08T19:55:40.000Z | 2022-03-31T22:56:46.000Z | hutch_python/tests/test_load_conf.py | tangkong/hutch-python | 7127985182c56fa0ecd70efb9679621fe8f47702 | [
"BSD-3-Clause-LBNL"
] | 11 | 2018-01-12T21:57:02.000Z | 2020-11-26T00:29:34.000Z | import logging
import os.path
from socket import gethostname
from types import SimpleNamespace
import pytest
from pcdsdaq.daq import Daq
from pcdsdaq.sim import set_sim_mode
from pcdsdaq.sim.pydaq import Control as SimControl
from pcdsdevices.interface import Presets
import hutch_python.qs_load
from hutch_python.load_conf import load, load_conf
from .conftest import (TST_CAM_CFG, BlueskyScan, ELog, QSBackend, lightpath,
requires_elog, requires_psdaq, skip_if_win32_generic,
skip_if_win32_pcdsdaq)
logger = logging.getLogger(__name__)
@skip_if_win32_pcdsdaq
def test_file_load():
logger.debug('test_file_load')
set_sim_mode(True)
objs = load(os.path.join(os.path.dirname(__file__), 'conf.yaml'))
should_have = ('x', 'unique_device', 'calc_thing', 'daq', 'scan_pvs')
if lightpath is not None:
should_have += ('tst_beampath',)
err = '{} was overriden by a namespace'
for elem in should_have:
assert not isinstance(objs[elem], SimpleNamespace), err.format(elem)
assert 'tst' not in objs # Tree namespace should be disabled
assert len(Presets._paths) == 2
def test_exp_override():
logger.debug('test_exp_override')
set_sim_mode(True)
# Should work with or without hutch name
objs = load(os.path.join(os.path.dirname(__file__), 'conf.yaml'),
SimpleNamespace(exp='x011'))
assert hasattr(objs['x'], 'cats')
objs = load(os.path.join(os.path.dirname(__file__), 'conf.yaml'),
SimpleNamespace(exp='tstx011'))
assert hasattr(objs['x'], 'cats')
def test_no_file():
logger.debug('test_no_file')
objs = load()
assert len(objs) > 1
def test_conf_empty():
logger.debug('test_conf_empty')
objs = load_conf({})
assert len(objs) > 1
@requires_elog
def test_elog(monkeypatch, temporary_config):
logger.debug('test_elog')
monkeypatch.setattr(hutch_python.load_conf, 'HutchELog', ELog)
# No platform
objs = load_conf({'hutch': 'TST'})
assert objs['elog'].station is None
# Check authentication worked correctly
assert objs['elog'].user == 'user'
assert objs['elog'].pw == 'pw'
# Define default platform
objs = load_conf({'daq_platform': {'default': 1},
'hutch': 'TST'})
assert objs['elog'].station is None
# Define host platform
hostname = gethostname()
objs = load_conf({'daq_platform': {'default': 3,
hostname: 4},
'hutch': 'TST'})
assert objs['elog'].station == '1'
@requires_psdaq
def test_lcls2_daq_config(dummy_zmq_lcls2):
logger.debug('test_lcls2_daq')
host = 'fake-hostname-drp'
platform = 1
config = {
'daq_type': 'lcls2',
'daq_host': host,
'daq_platform': {'default': platform},
}
objs = load_conf(config)
daq = objs['daq']
assert isinstance(daq, BlueskyScan)
assert daq.control.host == host
assert daq.control.platform == platform
@skip_if_win32_pcdsdaq
def test_simdaq_config():
logger.debug('test_simdaq_config')
objs = load_conf({'daq_type': 'lcls1-sim'})
daq = objs['daq']
assert isinstance(daq, Daq)
daq.connect()
assert isinstance(daq._control, SimControl)
def test_nodaq_config():
logger.debug('test_nodaq_config')
objs = load_conf({'daq_type': 'nodaq'})
with pytest.raises(KeyError):
objs['daq']
def test_camviewer_load(monkeypatch):
logger.debug('test_camviewer_load')
monkeypatch.setattr(hutch_python.load_conf, 'CAMVIEWER_CFG', TST_CAM_CFG)
objs = load_conf({'hutch': ''})
assert 'camviewer' in objs
assert 'my_cam' in dir(objs['camviewer'])
def test_skip_failures():
logger.debug('test_skip_failures')
# Should not raise
load_conf(dict(hutch=345243, db=12351324, experiment=2341234, load=123454,
bananas='dole'))
@skip_if_win32_generic
def test_auto_experiment(fake_curexp_script):
logger.debug('test_auto_experiment')
hutch_python.qs_load.QSBackend = QSBackend
objs = load_conf(dict(hutch='tst'))
assert objs['inj_x'].run == '15'
assert objs['inj_x'].proposal == 'LR12'
assert objs['x'].inj_x == objs['inj_x']
def test_cannot_auto():
logger.debug('test_cannot_auto')
# Fail silently
load_conf(dict(hutch='tst'))
| 29.70068 | 78 | 0.667201 | 570 | 4,366 | 4.863158 | 0.268421 | 0.04329 | 0.064935 | 0.025974 | 0.246032 | 0.200577 | 0.084776 | 0.084776 | 0.059524 | 0.059524 | 0 | 0.017311 | 0.206138 | 4,366 | 146 | 79 | 29.90411 | 0.782458 | 0.04535 | 0 | 0.145455 | 0 | 0 | 0.140899 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.109091 | false | 0 | 0.109091 | 0 | 0.218182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7a2932502dd681c890f213423a6b586e82c32ba | 1,386 | py | Python | tests/test_pre_trained_embedding.py | IntheGrass/citeomatic_learning | b0fee3c2c9b6462e1878eb5aa3060bee8c86f923 | [
"Apache-2.0"
] | 162 | 2018-02-26T18:13:55.000Z | 2022-02-25T05:14:06.000Z | tests/test_pre_trained_embedding.py | zhoufn/citeomatic | aa0f5add68000232db299e340d114bd03586752f | [
"Apache-2.0"
] | 9 | 2019-03-14T16:16:31.000Z | 2021-03-15T19:50:21.000Z | tests/test_pre_trained_embedding.py | zhoufn/citeomatic | aa0f5add68000232db299e340d114bd03586752f | [
"Apache-2.0"
] | 24 | 2018-06-30T10:37:01.000Z | 2022-02-15T08:34:25.000Z | import random
import unittest
import os
import h5py
from sklearn.preprocessing import normalize
from citeomatic.models.options import ModelOptions
from citeomatic.models.text_embeddings import TextEmbeddingSum
import numpy as np
FIXTURES = os.path.join('tests', 'fixtures')
EMBEDDINGS_FILE = os.path.join(FIXTURES, 'weights.h5')
def almost_equal(x, y, threshold=0.0001):
return abs(x-y) < threshold
class TestPreTrainedEmbedding(unittest.TestCase):
def test_pre_trained_layer(self):
with h5py.File(EMBEDDINGS_FILE, 'r') as f:
pretrained_embeddings = f['embedding'][...]
options = ModelOptions()
options.use_pretrained = True
options.dense_dim = 300
options.n_features = 200
t_embedding_sum = TextEmbeddingSum(options=options,
pretrained_embeddings=pretrained_embeddings,
magnitudes_initializer='ones'
)
embedding_model, outputs = t_embedding_sum.create_text_embedding_model(
prefix='test', final_l2_norm=False)
idx = random.randint(0, 200)
pred = embedding_model.predict(np.asarray([idx + 1]))[0]
input_embedding = normalize(pretrained_embeddings[idx].reshape(1, -1))[0]
assert all(map(almost_equal, pred, input_embedding))
| 31.5 | 87 | 0.655123 | 154 | 1,386 | 5.707792 | 0.525974 | 0.091013 | 0.045506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023278 | 0.256133 | 1,386 | 43 | 88 | 32.232558 | 0.829292 | 0 | 0 | 0 | 0 | 0 | 0.029582 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 1 | 0.066667 | false | 0 | 0.266667 | 0.033333 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7a3e5636e0832d379197e1d5f2d6d6bcdb0f31f | 2,080 | py | Python | 2019-2020/Lato/Sztuczna Inteligencja/ListaZ1/hetmany.py | ldept/University | f5ec29dd1daa1c9dc2d1592c0ddab575146e80ee | [
"FTL"
] | null | null | null | 2019-2020/Lato/Sztuczna Inteligencja/ListaZ1/hetmany.py | ldept/University | f5ec29dd1daa1c9dc2d1592c0ddab575146e80ee | [
"FTL"
] | null | null | null | 2019-2020/Lato/Sztuczna Inteligencja/ListaZ1/hetmany.py | ldept/University | f5ec29dd1daa1c9dc2d1592c0ddab575146e80ee | [
"FTL"
] | null | null | null | import random
import time
N = 6
def place_queens():
# board NxN filled with zeroes - later 1 means that we can't put a queen in this place
board = [ [0 for col in range(N)] for row in range(N)]
current_row = 0
#for every queen
for i in range(N):
empty_in_col = board[i].count(0)
#if there is no empty place - restart
if empty_in_col == 0:
return False
#pick a position at random
random_free_pos = random.randint(1, board[i].count(0))
nth_free_pos = 0
for position_in_row in range(N):
if board[current_row][position_in_row] == 0:
nth_free_pos += 1
if nth_free_pos == random_free_pos:
# col and row ain't free
for n in range(N):
board[n][position_in_row] = 1
board[current_row][n] = 1
# diagonals
# diag_max = max( N - 1 - current_row, N - 1 - position_in_row )
diag_max = N
for diag_down_right in range(1, diag_max):
if current_row + diag_down_right >= N or position_in_row + diag_down_right >= N:
break
board[current_row + diag_down_right][position_in_row + diag_down_right] = 1
for diag_down_left in range(1, diag_max):
if current_row + diag_down_left >= N or position_in_row - diag_down_left < 0:
break
board[current_row + diag_down_left][position_in_row - diag_down_left] = 1
break
current_row+=1
return True
sum=0
# for i in range(50):
# start = time.time()
# place_queens()
# end = time.time()
# print(end - start)
for i in range(50):
c = 0
for i in range(100):
if place_queens():
c+=1
sum+=c
print(c / 100*100,"%")
print("srednia",sum/100*100/50) | 28.888889 | 104 | 0.507692 | 283 | 2,080 | 3.498233 | 0.240283 | 0.070707 | 0.105051 | 0.085859 | 0.294949 | 0.236364 | 0.119192 | 0.070707 | 0.070707 | 0.070707 | 0 | 0.036585 | 0.408654 | 2,080 | 72 | 105 | 28.888889 | 0.768293 | 0.175962 | 0 | 0.075 | 0 | 0 | 0.0047 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025 | false | 0 | 0.05 | 0 | 0.125 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7a5d8046a9fa04afb2ab301fe207eff7e6ce01f | 14,991 | py | Python | pystitchy/app.py | iht/Stitchy-Studio | f7faf846d7ce498ef5945caaff2b09f9108e2919 | [
"MIT"
] | 1 | 2021-02-28T17:27:16.000Z | 2021-02-28T17:27:16.000Z | pystitchy/app.py | iht/Stitchy-Studio | f7faf846d7ce498ef5945caaff2b09f9108e2919 | [
"MIT"
] | null | null | null | pystitchy/app.py | iht/Stitchy-Studio | f7faf846d7ce498ef5945caaff2b09f9108e2919 | [
"MIT"
] | null | null | null | # Copyright (c) 2012 Israel Herraiz <isra@herraiz.org>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import wx
from wx import xrc
from grid import Grid
from image_importer import ImageImporter
from math import sqrt
class MyApp(wx.App):
def __init__ (self, xrcfn, colorsfn):
self._xrcfn = xrcfn
self._colorsfn = colorsfn
self._scroll_rate = 10
self._erase_tool = False
self._grid = Grid()
self._operations = []
self._current_operation = None
self._max_undo = 100
self._timer = None
self._current_mouse_pos = (-1, -1)
wx.App.__init__ (self)
def OnInit (self):
# Colors must be imported before creating the frame
self._import_colors ()
self._current_color = None
# Create main frame
self._res = xrc.XmlResource (self._xrcfn)
self._init_frame()
# Load palette selector dialog
self._palette_dialog = self._res.LoadDialog(self._frame, "SelectColorPaletteDialog")
self._avlb_id = 1234
self._available_listbox = wx.SimpleHtmlListBox(
self._palette_dialog,
self._avlb_id,
style=wx.HLB_MULTIPLE,
size=(230,460))
self._selb_id = 1235
self._select_listbox = wx.SimpleHtmlListBox(
self._palette_dialog,
self._selb_id,
style=wx.HLB_MULTIPLE,
size=(230,460))
for dmc in self._colors.keys():
code, name = self._colors[dmc]
self._available_listbox.Append('<table><tr><td bgcolor="%s" colspan="15" nowrap> </td><td>%s #%s</td></tr></table>' % (code[0:7], name, dmc))
self._res.AttachUnknownControl(
'AvailableColorListUnknown',
self._available_listbox,
self._palette_dialog)
self._res.AttachUnknownControl(
'SelectedColorListUnknown',
self._select_listbox,
self._palette_dialog)
self._palette_dialog.Bind(wx.EVT_BUTTON, self._add_colors_to_palette, id = xrc.XRCID ('AddColorBtn'))
self._palette_dialog.Bind(wx.EVT_BUTTON, self._remove_colors_from_palette, id = xrc.XRCID ('RemoveColorBtn'))
self._palette_dialog.Bind(wx.EVT_BUTTON, self._set_current_palette, id = xrc.XRCID ('PaletteAcceptBtn'))
return True
def _set_current_palette (self, event):
dmcs = [x.split("#")[2].split("<")[0] for x in self._select_listbox.GetStrings()]
self._current_palette = {}
for d in dmcs:
self._current_palette[d] = self._colors[d]
event.Skip()
self._palette_dialog.Show(False)
def _add_colors_to_palette (self, event):
remove = []
item, cookie = self._available_listbox.GetFirstSelected()
while wx.NOT_FOUND != item:
s = self._available_listbox.GetString(item)
self._select_listbox.Append(s)
remove.append(item)
item, cookie = self._available_listbox.GetNextSelected(cookie)
for r in remove:
self._available_listbox.Delete(r)
self._available_listbox.DeselectAll()
self._select_listbox.DeselectAll()
event.Skip()
def _remove_colors_from_palette (self, event):
remove = []
item, cookie = self._select_listbox.GetFirstSelected()
while wx.NOT_FOUND != item:
s = self._select_listbox.GetString(item)
self._available_listbox.Append(s)
remove.append(item)
item, cookie = self._select_listbox.GetNextSelected(cookie)
for r in remove:
self._select_listbox.Delete(r)
self._select_listbox.DeselectAll()
self._available_listbox.DeselectAll()
event.Skip()
def _import_colors (self):
f = open(self._colorsfn, 'r')
ls = f.readlines()
f.close()
self._colors = {}
for l in ls:
dmc, name, code = l.split(',')
self._colors[dmc] = (code, name)
self._current_palette = self._colors
def _find_dmc_color (self, color):
for dmc in self._colors.keys():
code, name = self._colors[dmc]
red = int(code[1:3], 16)
green = int(code[3:5], 16)
blue = int(code[5:7], 16)
if red == color.Red() and green == color.Green() and blue == color.Blue():
return '%s #%s' % (name, dmc)
return 'None'
def _find_closest_dmc_color (self, color):
distance = 1000
red = color.Red()
green = color.Green()
blue = color.Blue()
bestred, bestgreen, bestblue = (None, None, None)
for dmc in self._current_palette.keys():
code, name = self._current_palette[dmc]
dred = int(code[1:3], 16)
dgreen = int(code[3:5], 16)
dblue = int(code[5:7], 16)
r = (dred + red) / 2
dr = dred - red
dg = dgreen - green
db = dblue - blue
ndistance = (2+r/256)*dr**2 + 4*dg**2 + (2+(255-r)/256)*db**2
ndistance = sqrt(ndistance)
if ndistance < distance:
distance = ndistance
bestred, bestgreen, bestblue = (dred, dgreen, dblue)
return wx.Colour (bestred, bestgreen, bestblue)
def OnPaint (self, event):
dc = wx.PaintDC (event.GetEventObject())
dc.Clear()
self._panel.DoPrepareDC(dc)
self._grid.draw_grid (dc)
event.Skip()
def _init_frame (self):
self._frame = self._res.LoadFrame (None,'MyMainFrame')
self._panel = xrc.XRCCTRL (self._frame, 'MainPanel')
self._panel.SetScrollRate (self._scroll_rate, self._scroll_rate)
self._panel.SetVirtualSize (self._grid.get_size ())
self._toolbar = self._frame.GetToolBar ()
self._toolbar.ToggleTool (xrc.XRCID('editgrid'), not self._erase_tool)
self._toolbar.ToggleTool (xrc.XRCID('erase'), self._erase_tool)
color_choice_id = 54 # Random int
color_list = []
for k in self._colors.keys():
dmc = k
code, name = self._colors[k]
color_list.append('%s (%s)' % (name, dmc))
self._color_choice = wx.Choice (self._toolbar, color_choice_id, (-1,-1), (-1,-1), color_list )
self._toolbar.AddControl (self._color_choice)
self._change_color(None)
self._menubar = self._frame.GetMenuBar()
self._statusbar = self._frame.GetStatusBar()
self._panel.Bind(wx.EVT_PAINT, self.OnPaint)
self._panel.Bind(wx.EVT_MOUSE_EVENTS, self._print_cell)
self._toolbar.Bind(wx.EVT_TOOL, self._undo, id = xrc.XRCID('undo'))
self._toolbar.Bind(wx.EVT_TOOL, self._redo, id = xrc.XRCID('redo'))
self._toolbar.Bind(wx.EVT_TOOL, self._set_zoom, id = xrc.XRCID('zoomout'))
self._toolbar.Bind(wx.EVT_TOOL, self._set_zoom, id = xrc.XRCID('zoomin'))
self._toolbar.Bind(wx.EVT_TOOL, self._set_edit, id = xrc.XRCID('editgrid'))
self._toolbar.Bind(wx.EVT_TOOL, self._set_edit, id = xrc.XRCID('erase'))
self._toolbar.Bind,wx.EVT_CHOICE(self, color_choice_id, self._change_color)
self._frame.Bind(wx.EVT_MENU, self._import_image, id = xrc.XRCID('importimage'))
self._timer = wx.Timer()
self._timer.Bind(wx.EVT_TIMER, self._show_tooltip)
self._frame.SetSize ((800,600))
self._panel.FitInside()
self._frame.SetTitle ("Stitchy Studio")
self._timer.Start(3000,True)
self.SetTopWindow (self._frame)
self._frame.Show()
def _import_image (self, event):
path = wx.FileSelector ('Choose an image',
wildcard = "BMP|*.bmp|GIF|*.gif|JPEG|*.jp*g|PNG|*.png|PCX|*.pcx|TIFF|*.tiff|Other|*",
flags = wx.FD_OPEN | wx.FD_FILE_MUST_EXIST,
parent = self._frame)
if path:
self._palette_dialog.Fit()
self._palette_dialog.ShowModal()
importer = ImageImporter ()
importer.load_image (path)
importer.scale_image()
height, width = importer.get_size ()
dc = wx.ClientDC (self._panel)
self._panel.DoPrepareDC (dc)
for x in range (0, width):
for y in range (0, height):
color = importer.get_color (x, y)
bestcolor = self._find_closest_dmc_color (color)
self._grid.add_cell (x, y, dc, bestcolor, False)
event.Skip()
def _change_color (self, event):
selection = self._color_choice.GetStringSelection()
dmc = selection.split("(")[1].split(")")[0]
color, _name = self._colors[dmc]
red = int(color[1:3], 16)
green = int(color[3:5], 16)
blue = int(color[5:7], 16)
self._current_color = wx.Colour (red=red, green=green, blue=blue)
if event:
event.Skip()
def _print_cell (self, event):
mousex, mousey = self._panel.CalcUnscrolledPosition(event.GetX(), event.GetY())
self._current_mouse_pos = (mousex, mousey)
color = self._grid.get_color_by_mouse (mousex, mousey)
if not color:
color_name = 'None'
else:
color_name = self._find_dmc_color (color)
self._statusbar.SetStatusText('Color: %s' % str(color_name))
if event.GetButton() == wx.MOUSE_BTN_LEFT or event.Dragging():
dc = wx.ClientDC (event.GetEventObject())
self._panel.DoPrepareDC (dc)
xcell, ycell = self._grid.mouse2cell (mousex, mousey)
color_index = self._grid.add_cell (xcell, ycell, dc, self._current_color, self._erase_tool)
# Add operation for undo and redo
op = (xcell, ycell, color_index, self._erase_tool)
if (len(self._operations) == 0) or (not op in self._operations):
self._operations.append (op)
self._current_operation = len(self._operations) - 1
elif event.Moving():
self._timer.Start(3000,True)
event.Skip()
def _undo (self, event):
if self._current_operation:
op = self._operations[self._current_operation]
xcell, ycell, color_index, erase = op
dc = wx.ClientDC (self._panel)
self._panel.DoPrepareDC (dc)
if erase:
if color_index > 0:
cur_color = self._grid.get_color_by_index (xcell, ycell, color_index-1)
self._grid.add_cell (xcell, ycell, dc, cur_color, False)
else:
if color_index > 0:
cur_color = self._grid.get_color_by_index (xcell, ycell, color_index-1)
self._grid.add_cell (xcell, ycell, dc, cur_color, False)
else:
self._grid.add_cell (xcell, ycell, dc, None, True)
self._current_operation = self._current_operation - 1
if self._current_operation < 0:
self._current_operation = None
def _redo (self, event):
if not self._current_operation:
self._current_operation = 0
try:
op = self._operations[self._current_operation+1]
xcell, ycell, color_index, erase = op
cur_color = self._grid.get_color_by_index (xcell, ycell, color_index)
dc = wx.ClientDC (self._panel)
self._panel.DoPrepareDC (dc)
if erase:
self._grid.add_cell (xcell, ycell, dc, None, True)
else:
self._grid.add_cell (xcell, ycell, dc, cur_color, False)
self._current_operation += 1
except IndexError:
# No actions to redo
pass
def _set_zoom (self, event):
if event.GetId() == xrc.XRCID('zoomout'):
self._grid.decrease_zoom()
elif event.GetId() == xrc.XRCID('zoomin'):
self._grid.increase_zoom()
size = self._grid.get_size()
self._panel.SetVirtualSize(size)
self._panel.FitInside()
self._panel.SetScrollRate(size[0]/10, size[1]/10)
self._panel.Refresh()
event.Skip()
def _set_edit (self, event):
if event.GetId() == xrc.XRCID('editgrid'):
self._erase_tool = False
elif event.GetId() == xrc.XRCID('erase'):
self._erase_tool = True
self._toolbar.ToggleTool (xrc.XRCID('editgrid'), not self._erase_tool)
self._toolbar.ToggleTool (xrc.XRCID('erase'), self._erase_tool)
event.Skip()
def _show_tooltip (self, event):
x, y = self._current_mouse_pos
color = self._grid.get_color_by_mouse (x, y)
if color:
red = color.Red()
green = color.Green()
blue = color.Blue()
color = self._find_dmc_color (color)
tip = wx.TipWindow (self._frame, "Color: %s\n\nRGB: (%s, %s, %s)\n\nClick tooltip to close" % (str(color),red,green,blue))
event.Skip()
| 34.621247 | 153 | 0.565406 | 1,732 | 14,991 | 4.648383 | 0.207852 | 0.031425 | 0.01565 | 0.014781 | 0.335114 | 0.264812 | 0.232393 | 0.187306 | 0.155012 | 0.108682 | 0 | 0.013421 | 0.328997 | 14,991 | 432 | 154 | 34.701389 | 0.786957 | 0.082183 | 0 | 0.281139 | 0 | 0.010676 | 0.036767 | 0.012013 | 0 | 0 | 0 | 0 | 0 | 1 | 0.064057 | false | 0.003559 | 0.049822 | 0 | 0.131673 | 0.007117 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7a5e7e41e29240a6927c2e2f20a7583a8409a13 | 3,437 | py | Python | prepare-data.py | Riscue/MalConv-Pytorch | 3cad94eb44d0e30af413fd883a35b528a29a32f1 | [
"MIT"
] | null | null | null | prepare-data.py | Riscue/MalConv-Pytorch | 3cad94eb44d0e30af413fd883a35b528a29a32f1 | [
"MIT"
] | null | null | null | prepare-data.py | Riscue/MalConv-Pytorch | 3cad94eb44d0e30af413fd883a35b528a29a32f1 | [
"MIT"
] | null | null | null | import hashlib
import os
import random
import zipfile
from utils import ProgressBar, Chrono, malware_path, benign_path, train_path, valid_path, train_csv, valid_csv, Utils
def md5(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def extract_dex(source, target):
apk_zip = zipfile.ZipFile(source, 'r')
dex_zip = zipfile.ZipFile(target, mode='w')
dex_files = [f for f in apk_zip.namelist() if f.endswith('.dex')]
for dexFile in dex_files:
apk_zip.extract(dexFile)
dex_zip.write(dexFile)
os.remove(dexFile)
def method_name(csv_file_name, path, malwares, benigns):
if not os.path.isdir(path):
os.makedirs(path)
csv_file = open(csv_file_name, "w")
total_malwares = len(malwares)
progress_bar.newbar(total_malwares, 'Malware')
for i in range(total_malwares):
with chrono.measure('step'):
try:
malware_hash = md5('%s/%s' % (malware_path, malwares[i]))
extract_dex('%s/%s' % (malware_path, malwares[i]), '%s/%s' % (path, malware_hash))
csv_file.write('%s,1\n' % malware_hash)
except zipfile.BadZipFile as e:
print(malwares[i])
print(e)
progress_bar.update(i, 'Malware | Time: %s' % Utils.format_time(chrono.last('step')))
total_benigns = len(benigns)
progress_bar.newbar(total_benigns, 'Benign')
for i in range(total_benigns):
with chrono.measure('step'):
try:
benign_hash = md5('%s/%s' % (benign_path, benigns[i]))
extract_dex('%s/%s' % (benign_path, benigns[i]), '%s/%s' % (path, benign_hash))
csv_file.write('%s,0\n' % benign_hash)
except zipfile.BadZipFile as e:
print(benigns[i])
print(e)
progress_bar.update(i, 'Benign | Time: %s' % Utils.format_time(chrono.last('step')))
csv_file.close()
if __name__ == '__main__':
progress_bar = ProgressBar()
chrono = Chrono()
if not os.path.isdir(train_path):
os.makedirs(train_path)
if not os.path.isdir(valid_path):
os.makedirs(valid_path)
malware_files = [f for f in os.listdir(malware_path) if os.path.isfile(os.path.join(malware_path, f))]
benign_files = [f for f in os.listdir(benign_path) if os.path.isfile(os.path.join(benign_path, f))]
random.shuffle(malware_files)
random.shuffle(benign_files)
malwares_split_index = int(0.8 * len(malware_files))
malwares_train = malware_files[:malwares_split_index]
malwares_valid = malware_files[malwares_split_index:]
benigns_split_index = int(0.8 * len(benign_files))
benigns_train = benign_files[:benigns_split_index]
benigns_valid = benign_files[benigns_split_index:]
print('Processing training dataset')
with chrono.measure('process'):
method_name(train_csv, train_path, malwares_train, benigns_train)
print('Completed in: %s' % Utils.format_time(chrono.last('process')))
print('Processing validation dataset')
with chrono.measure('process'):
method_name(valid_csv, valid_path, malwares_valid, benigns_valid)
print('Completed in: %s' % Utils.format_time(chrono.last('process')))
print('Total time: %s' % Utils.format_time(chrono.total('process')))
| 36.178947 | 117 | 0.650276 | 469 | 3,437 | 4.539446 | 0.204691 | 0.019728 | 0.028182 | 0.037576 | 0.437764 | 0.292156 | 0.223579 | 0.108971 | 0.050728 | 0.050728 | 0 | 0.006329 | 0.218505 | 3,437 | 94 | 118 | 36.56383 | 0.786299 | 0 | 0 | 0.162162 | 0 | 0 | 0.075647 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040541 | false | 0 | 0.067568 | 0 | 0.121622 | 0.121622 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7a93b63207e8cb080d9e8928d39f7d826b1853e | 2,845 | py | Python | pyzombie/handlers/HandlerHelp.py | lanhel/pyzombie | dba35d98152e5d99d4231ab9124727ae47b3bf72 | [
"Apache-2.0"
] | null | null | null | pyzombie/handlers/HandlerHelp.py | lanhel/pyzombie | dba35d98152e5d99d4231ab9124727ae47b3bf72 | [
"Apache-2.0"
] | 1 | 2019-12-30T19:30:01.000Z | 2019-12-30T19:30:29.000Z | pyzombie/handlers/HandlerHelp.py | lanhel/pyzombie | dba35d98152e5d99d4231ab9124727ae47b3bf72 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#-------------------------------------------------------------------------------
"""pyzombie HTTP RESTful server handler for root resource."""
__author__ = ('Lance Finn Helsten',)
__version__ = '1.0.1'
__copyright__ = """Copyright 2009 Lance Finn Helsten (helsten@acm.org)"""
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "reStructuredText en"
__all__ = ['HandlerHelp']
import sys
import os
import logging
import http.client
import http.server
from ..Handler import Handler
HELPDIR = os.path.normpath(os.path.join(os.path.dirname(__file__), "../httphelp"))
INDEX_HTML = """<!DOCTYPE html>
<html lang='en'>
<head>
<title>pyzombie Help Contents</title>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
<link rel="Contents" href="/add"/>
<link rel="stylesheet" href="/help/help.css" type="text/css" media="screen"/>
</head>
<body>
<h1>pyzombie Help</h1>
<ol>
{0}
</ol>
</body>
</html>
"""
INDEX_ROW = """ <li><a href="help/{0}">{0}</a></li>"""
class HandlerHelp(Handler):
"""Handle the root resource."""
@classmethod
def dispatch(cls):
cls.initdispatch(r"""^/help(/(?P<helpfile>\w+(\.\w+)?)?)?$""",
"GET,OPTIONS,TRACE", "/help/RESTful")
return cls
def head(self):
self.content = "Headers"
self.get()
def get(self):
html = None
if self.urlargs["helpfile"] is None:
files = [os.path.splitext(f) for f in os.listdir(HELPDIR)]
files = [INDEX_ROW.format(f[0]) for f in files if f[1] == '.html']
body = os.linesep.join(files)
html = INDEX_HTML.format(body)
self.status = http.client.OK
self["Cache-Control"] = "public"
self["Last-Modified"] = self.startstamprfc850
self["Content-type"] = "text/html;UTF-8"
self.writelines(html)
elif os.path.splitext(self.urlargs["helpfile"])[1] == '':
file = os.path.join(HELPDIR, self.urlargs["helpfile"] + '.html')
file = os.path.normpath(file)
self.writefile(file)
else:
file = os.path.join(HELPDIR, self.urlargs["helpfile"])
file = os.path.normpath(file)
self.writefile(file)
self.flush()
| 30.265957 | 82 | 0.606327 | 362 | 2,845 | 4.676796 | 0.450276 | 0.031896 | 0.044891 | 0.018901 | 0.093325 | 0.093325 | 0.093325 | 0.093325 | 0 | 0 | 0 | 0.011221 | 0.216872 | 2,845 | 93 | 83 | 30.591398 | 0.748654 | 0.071353 | 0 | 0.085714 | 0 | 0.028571 | 0.459048 | 0.042286 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042857 | false | 0 | 0.085714 | 0 | 0.157143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7ac9d879f2ea91801d78b718bd2e9874071f787 | 763 | py | Python | opentech/apply/funds/widgets.py | stdevteam/opentech.fund | 6888dc5aa1a8c60f17629dff03877412275e08a5 | [
"BSD-3-Clause"
] | null | null | null | opentech/apply/funds/widgets.py | stdevteam/opentech.fund | 6888dc5aa1a8c60f17629dff03877412275e08a5 | [
"BSD-3-Clause"
] | null | null | null | opentech/apply/funds/widgets.py | stdevteam/opentech.fund | 6888dc5aa1a8c60f17629dff03877412275e08a5 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.staticfiles.templatetags.staticfiles import static
from django_select2.forms import Select2MultipleWidget
class Select2MultiCheckboxesWidget(Select2MultipleWidget):
class Media:
js = (
static('js/select2.multi-checkboxes.js'),
static('js/django_select2-checkboxes.js'),
)
def __init__(self, *args, **kwargs):
attrs = kwargs.get('attrs', {})
attrs.setdefault('data-placeholder', 'items')
kwargs['attrs'] = attrs
super().__init__(*args, **kwargs)
def build_attrs(self, *args, **kwargs):
attrs = super().build_attrs(*args, **kwargs)
attrs['class'] = attrs['class'].replace('django-select2', 'django-select2-checkboxes')
return attrs
| 33.173913 | 94 | 0.650066 | 77 | 763 | 6.285714 | 0.402597 | 0.107438 | 0.092975 | 0.078512 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013267 | 0.209699 | 763 | 22 | 95 | 34.681818 | 0.789386 | 0 | 0 | 0 | 0 | 0 | 0.184797 | 0.112713 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.117647 | 0 | 0.411765 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7adbb7a2fc072a56e0fc8427c9bf5bedd8af41b | 6,295 | py | Python | examples/representation/extract_stellar_graph_from_ir.py | ComputerSystemsLaboratory/YaCoS | abd5d3c6e227e5c7a563493f7855ebf58ba3de05 | [
"Apache-2.0"
] | 8 | 2022-02-03T16:41:01.000Z | 2022-02-09T11:29:20.000Z | examples/representation/extract_stellar_graph_from_ir.py | ComputerSystemsLaboratory/YaCoS | abd5d3c6e227e5c7a563493f7855ebf58ba3de05 | [
"Apache-2.0"
] | null | null | null | examples/representation/extract_stellar_graph_from_ir.py | ComputerSystemsLaboratory/YaCoS | abd5d3c6e227e5c7a563493f7855ebf58ba3de05 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python3
"""
Copyright 2021 Anderson Faustino da Silva.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#
# Classify applications into 104 classes given their raw code.
#
# The representation (graph) is created from IR.
#
import os
import sys
import glob
import pandas as pd
import pickle as pk
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from stellargraph import StellarDiGraph
from absl import app, flags, logging
from yacos.info import compy as R
from yacos.info.compy.extractors import LLVMDriver
def extract_graph_data(graph, graph_type):
"""Extract edges, nodes and embeddings."""
nodes = {}
#nodes['word2vec'] = graph.get_nodes_word2vec_embeddings('ir')
nodes['histogram'] = graph.get_nodes_histogram_embeddings('ir')
nodes['inst2vec'] = graph.get_nodes_inst2vec_embeddings()
nodes['ir2vec'] = graph.get_nodes_ir2vec_embeddings()
nodes['opcode'] = graph.get_nodes_opcode_embeddings()
edges = graph.get_edges_str_dataFrame()
return edges, nodes
def execute(argv):
"""Extract a graph representation."""
del argv
FLAGS = flags.FLAGS
# Verify datset directory.
if not os.path.isdir(FLAGS.dataset_directory):
logging.error('Dataset directory {} does not exist.'.format(
FLAGS.dataset_directory)
)
sys.exit(1)
"""Extract the representation from the source code."""
# Instantiate the LLVM driver.
driver = LLVMDriver()
# Define the builder
builder = R.LLVMGraphBuilder(driver)
# Define the visitor
visitors = {
# CFG
'cfg_call': R.LLVMCFGCallVisitor,
'cfg_call_nr': R.LLVMCFGCallNoRootVisitor,
'cfg_call_compact_me': R.LLVMCFGCallCompactMultipleEdgesVisitor,
'cfg_call_compact_se': R.LLVMCFGCallCompactSingleEdgeVisitor,
'cfg_call_compact_me_nr': R.LLVMCFGCallCompactMultipleEdgesNoRootVisitor,
'cfg_call_compact_se_nr': R.LLVMCFGCallCompactSingleEdgeNoRootVisitor,
# CDFG
'cdfg_call': R.LLVMCDFGCallVisitor,
'cdfg_call_nr': R.LLVMCDFGCallNoRootVisitor,
'cdfg_call_compact_me': R.LLVMCDFGCallCompactMultipleEdgesVisitor,
'cdfg_call_compact_se': R.LLVMCDFGCallCompactSingleEdgeVisitor,
'cdfg_call_compact_me_nr': R.LLVMCDFGCallCompactMultipleEdgesNoRootVisitor,
'cdfg_call_compact_se_nr': R.LLVMCDFGCallCompactSingleEdgeNoRootVisitor,
# CDFG PLUS
'cdfg_plus': R.LLVMCDFGPlusVisitor,
'cdfg_plus_nr': R.LLVMCDFGPlusNoRootVisitor,
# PROGRAML
'programl': R.LLVMProGraMLVisitor,
'programl_nr': R.LLVMProGraMLNoRootVisitor
}
folders = [
os.path.join(FLAGS.dataset_directory, subdir)
for subdir in os.listdir(FLAGS.dataset_directory)
if os.path.isdir(os.path.join(FLAGS.dataset_directory, subdir))
]
idx = FLAGS.dataset_directory.rfind('/')
last_folder = FLAGS.dataset_directory[idx+1:]
# Load data from all folders
for folder in folders:
sources = glob.glob('{}/*.ll'.format(folder))
for source in sources:
try:
extractionInfo = builder.ir_to_info(source)
graph = builder.info_to_representation(extractionInfo,
visitors[FLAGS.graph])
edges, nodes_data = extract_graph_data(graph, FLAGS.graph)
except Exception:
logging.error('Error {}.'.format(source))
continue
for feat, feat_data in nodes_data.items():
indexes = []
embeddings = []
for idx, _, emb in feat_data:
indexes.append(idx)
embeddings.append(emb)
nodes = pd.DataFrame(embeddings, index=indexes)
graph = StellarDiGraph(nodes=nodes,
edges=edges,
edge_type_column="type")
outdir = os.path.join(
folder.replace(
last_folder,
'{}_{}_{}'.format(last_folder, FLAGS.graph, feat)
)
)
os.makedirs(outdir, exist_ok=True)
filename = source.replace('{}/'.format(folder), '')
filename = filename.replace('.ll', '.pk')
filename = '{}/{}'.format(outdir, filename)
fout = open(filename, 'wb')
pk.dump(graph, fout)
fout.close()
# Execute
if __name__ == '__main__':
# app
flags.DEFINE_string('dataset_directory',
None,
'Dataset directory')
flags.DEFINE_enum('graph',
'cdfg_call',
[
# CFG
'cfg_call',
'cfg_call_nr',
'cfg_call_compact_me',
'cfg_call_compact_se',
'cfg_call_compact_me_nr',
'cfg_call_compact_se_nr',
# CDFG
'cdfg_call',
'cdfg_call_nr',
'cdfg_call_compact_me',
'cdfg_call_compact_se',
'cdfg_call_compact_me_nr',
'cdfg_call_compact_se_nr',
# CDFG PLUS
'cdfg_plus',
'cdfg_plus_nr',
# PROGRAML
'programl',
'programl_nr'
],
'The type of the graph')
flags.mark_flag_as_required('dataset_directory')
app.run(execute)
| 34.027027 | 83 | 0.580302 | 635 | 6,295 | 5.527559 | 0.344882 | 0.050142 | 0.031909 | 0.018234 | 0.066667 | 0.021083 | 0.021083 | 0 | 0 | 0 | 0 | 0.004994 | 0.33201 | 6,295 | 184 | 84 | 34.211957 | 0.829727 | 0.160604 | 0 | 0.017544 | 0 | 0 | 0.13928 | 0.034675 | 0 | 0 | 0 | 0 | 0 | 1 | 0.017544 | false | 0 | 0.078947 | 0 | 0.105263 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7b00020f5741932b4f5e60f62f61202af6ea8d5 | 1,377 | py | Python | taribot/database/database.py | Tarinu/taribot | a1ec2aef58ff1619678e29a7c1bbbb59acb26b23 | [
"MIT"
] | null | null | null | taribot/database/database.py | Tarinu/taribot | a1ec2aef58ff1619678e29a7c1bbbb59acb26b23 | [
"MIT"
] | 10 | 2018-08-26T01:40:27.000Z | 2020-07-28T22:23:37.000Z | taribot/database/database.py | Tarinu/taribot | a1ec2aef58ff1619678e29a7c1bbbb59acb26b23 | [
"MIT"
] | 1 | 2020-07-23T20:15:45.000Z | 2020-07-23T20:15:45.000Z | # -*- coding: utf-8 -*-
import aiosqlite
class Database:
def __init__(self, database: str):
self.database = database
async def create_table(self, table_name: str, columns: dict):
column_list = []
for column in columns:
column_list.append("{} {}".format(column, columns.get(column)))
await self.execute('CREATE TABLE IF NOT EXISTS {} ( {} )'.format(table_name, ','.join(column_list)))
async def drop_table(self, table_name: str):
await self.execute('DROP TABLE {}'.format(table_name))
async def execute(self, sql: str, *args):
async with aiosqlite.connect(self.database) as connection: # type: aiosqlite.Connection
await connection.execute(sql, args)
await connection.commit()
async def fetch_one(self, sql: str, *args):
async with aiosqlite.connect(self.database) as connection: # type: aiosqlite.Connection
connection.row_factory = aiosqlite.Row
cursor = await connection.execute(sql, args)
return await cursor.fetchone()
async def fetch_all(self, sql: str, *args):
async with aiosqlite.connect(self.database) as connection: # type: aiosqlite.Connection
connection.row_factory = aiosqlite.Row
cursor = await connection.execute(sql, args)
return await cursor.fetchall()
| 39.342857 | 108 | 0.649237 | 163 | 1,377 | 5.380368 | 0.294479 | 0.068415 | 0.034208 | 0.047891 | 0.566705 | 0.485747 | 0.485747 | 0.485747 | 0.485747 | 0.485747 | 0 | 0.000954 | 0.238925 | 1,377 | 34 | 109 | 40.5 | 0.835878 | 0.074074 | 0 | 0.28 | 0 | 0 | 0.043307 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.04 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7b234755c0105486801e40b6d65fd9ca923e07c | 3,654 | py | Python | 2020/23/solve.py | lamperi/aoc | 1781dcbac0be18a086c10a9b76fb6a2d3595523c | [
"MIT"
] | null | null | null | 2020/23/solve.py | lamperi/aoc | 1781dcbac0be18a086c10a9b76fb6a2d3595523c | [
"MIT"
] | null | null | null | 2020/23/solve.py | lamperi/aoc | 1781dcbac0be18a086c10a9b76fb6a2d3595523c | [
"MIT"
] | null | null | null | import os.path
import collections
import re
import math
import time
import itertools
from timeit import default_timer as timer
INPUT=os.path.join(os.path.dirname(__file__), "input.txt")
with open(INPUT) as f:
data = f.read()
class SingleLinkedList(object):
class Node(object):
def __init__(self, value, next=None):
self.value = value
self.next = next
def __repr__(self):
return f"Node(value={self.value}, next={self.next.value})"
@staticmethod
def from_iterable(iterable):
it=iter(iterable)
cur = head = SingleLinkedList.Node(next(it))
for v in it:
node = SingleLinkedList.Node(v)
cur.next, cur = node, node
cur.next = head
return SingleLinkedList(head)
def __init__(self, head):
self.head = head
def nodes(self):
class Nodes(object):
def __init__(self, node):
self.node = node
def __iter__(self):
return NodeIterator(self.node)
class NodeIterator(object):
def __init__(self, node):
self.node = node
self.initial = node
self.iterated_first = False
def __next__(self):
if not self.iterated_first:
self.iterated_first = True
return self.node
self.node = self.node.next
if self.node.value == self.initial.value:
raise StopIteration()
return self.node
return Nodes(self.head)
def get_node_mapping(self):
mapping = {}
for node in self.nodes():
mapping[node.value] = node
return mapping
def remove_next(self, count):
assert count > 0
new_head = node = self.head.next
for _ in range(count):
new_tail = node
node = node.next
self.head.next = node
new_tail.next = new_head
return new_head, new_tail
def append_after(self, node, first_added, last_added):
last_added.next = node.next
node.next = first_added
def advance(self):
self.head = self.head.next
def solve(data, moves=100, padding=None):
cups = [int(c) for c in data.strip()]
if padding:
cups.extend(range(max(cups)+1, padding+1))
min_cup = min(cups)
max_cup = max(cups)
cups = SingleLinkedList.from_iterable(cups)
node_mapping = cups.get_node_mapping()
current_cup = cups.head.value
move = 0
while move < moves:
move += 1
first, last = cups.remove_next(3)
clockwise_values = set([first.value, first.next.value, last.value])
dest_cup = current_cup
while True:
dest_cup -= 1
if dest_cup < min_cup:
dest_cup = max_cup
if dest_cup not in clockwise_values:
break
cups.append_after(node_mapping[dest_cup], first, last)
cups.advance()
current_cup = cups.head.value
if padding is None:
cups = "".join(str(i.value) for i in cups.nodes())
return cups[cups.index("1")+1:] + cups[:cups.index("1")]
else:
node1 = node_mapping[1]
nodea = node1.next
nodeb = nodea.next
a, b = nodea.value, nodeb.value
return a*b
print(solve("32415"))
print(solve("389125467", moves=10))
print(solve("389125467"))
start = timer()
print(solve(data))
end = timer()
print("Part 1:", end - start)
start = timer()
print(solve(data, padding=1000000,moves=10000000))
end = timer()
print("Part 2:", end - start) | 29 | 75 | 0.576628 | 460 | 3,654 | 4.417391 | 0.241304 | 0.043307 | 0.021654 | 0.031496 | 0.07874 | 0.03248 | 0.03248 | 0.03248 | 0 | 0 | 0 | 0.023284 | 0.318281 | 3,654 | 126 | 76 | 29 | 0.792453 | 0 | 0 | 0.108108 | 0 | 0 | 0.026265 | 0.012859 | 0 | 0 | 0 | 0 | 0.009009 | 1 | 0.126126 | false | 0 | 0.063063 | 0.018018 | 0.315315 | 0.063063 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7b89f3b000877ecd02181e8498b793ce4d147c3 | 643 | py | Python | python/CodeJam/2016/p2.py | gineer01/programming-challenges | 9f0bbaab5b85423b5671ee3cfc2d0fd62cea4cc7 | [
"MIT"
] | null | null | null | python/CodeJam/2016/p2.py | gineer01/programming-challenges | 9f0bbaab5b85423b5671ee3cfc2d0fd62cea4cc7 | [
"MIT"
] | null | null | null | python/CodeJam/2016/p2.py | gineer01/programming-challenges | 9f0bbaab5b85423b5671ee3cfc2d0fd62cea4cc7 | [
"MIT"
] | null | null | null | from functools import lru_cache
file = open("B-large.in")
no_test = int(file.readline())
def opposite(side):
if side == '+':
return '-'
elif side == '-':
return '+'
else:
raise Exception("WTF")
@lru_cache(maxsize=None)
def get_flips(pancake, side):
if len(pancake) == 0:
return 0
if pancake[-1] == side:
return get_flips(pancake[:-1], side)
else:
return get_flips(pancake[:-1], opposite(side)) + 1
def get_output(s):
return get_flips(s, '+')
for i in range(0, no_test):
line = file.readline().strip()
print("Case #%s: %s" % (i + 1, get_output(line))) | 20.09375 | 58 | 0.573872 | 90 | 643 | 3.988889 | 0.455556 | 0.089136 | 0.125348 | 0.116992 | 0.122563 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016667 | 0.253499 | 643 | 32 | 59 | 20.09375 | 0.73125 | 0 | 0 | 0.086957 | 0 | 0 | 0.046584 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.043478 | 0.043478 | 0.434783 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7b8c5dc735378516da0cc29c48100053b063d46 | 3,712 | py | Python | src/hg/makeDb/genbank/src/lib/py/genbank/GenomeSeqs.py | andypohl/kent | af7a004c8f3fa909cd8c2cfc2e5bea60e3421cd1 | [
"MIT"
] | 171 | 2015-04-22T15:16:02.000Z | 2022-03-18T20:21:53.000Z | src/hg/makeDb/genbank/src/lib/py/genbank/GenomeSeqs.py | andypohl/kent | af7a004c8f3fa909cd8c2cfc2e5bea60e3421cd1 | [
"MIT"
] | 60 | 2016-10-03T15:15:06.000Z | 2022-03-30T15:21:52.000Z | src/hg/makeDb/genbank/src/lib/py/genbank/GenomeSeqs.py | andypohl/kent | af7a004c8f3fa909cd8c2cfc2e5bea60e3421cd1 | [
"MIT"
] | 80 | 2015-04-16T10:39:48.000Z | 2022-03-29T16:36:30.000Z | "Module to store information about genome sequences"
import os, glob
from genbank.fileOps import prLine, prRow
from genbank import procOps
class GenomeSeq(object):
"information about a genome sequence"
__slots__ = ("genomeSeqs", "path", "id", "size", "unplaced", "regions")
def __init__(self, genomeSeqs, path, id, size):
self.genomeSeqs = genomeSeqs
self.path = path
self.id = id
self.size = size
self.unplaced = False # unplaced seq, gaps not spanned if set.
# regions without gaps, starts as whole chr, maybe rebuilt from lift
self.regions = [(0, size)]
class GenomeSeqs(dict):
"table of genome sequences and sizes for a database"
def __init__(self, db, genomeFileSpec):
"""Build from all sequences in a genome.
- db - genome database or other name used to identify the genome.
- genomeFileSpec - either a glob pattern or file specification for 2bit
or nib genome seq files.
"""
self.db = db
paths = glob.glob(genomeFileSpec)
if len(paths) == 0:
raise Exception("no files matching: " + genomeFileSpec)
if (len(paths) == 1) and paths[0].endswith(".2bit"):
self.__addTwoBit(paths[0])
else:
self.__addNibs(paths)
def __addNibs(self, paths):
"add nib sequences to object"
# /cluster/data/hg17/nib/chrX.nib chrX 154824264
lines = procOps.callProcLines(["nibSize"] + paths)
for line in lines:
row = line.split("\t")
self[row[1]] = GenomeSeq(self, row[0], row[1], int(row[2]))
def __addTwoBit(self, path):
"add twoBit sequences to object"
self.genomeDb = path[0]
# chrX 154824264
lines = procOps.callProcLines(["twoBitInfo", path, "stdout"])
for line in lines:
row = line.split("\t")
self[row[0]] = GenomeSeq(self, path, row[0], int(row[1]))
def __loadLift(self, liftFile):
"load lift into dict of lists of (start end), ensuring they are sorted"
fh = open(liftFile)
lifts = {}
# offset oldName oldSize newName newSize strand
for line in fh:
row = line[0:-1].split("\t")
if row[1] != "gap":
start = int(row[0])
end = start + int(row[2])
if not row[3] in lifts:
lifts[row[3]] = []
lifts[row[3]].append((start, end))
fh.close
for l in lifts.itervalues():
l.sort()
return lifts
def __addSeqRegionsFromLifts(self, seq, lifts):
"add ungapped regions for a sequence"
seq.regions = []
start = lifts[0][0]
end = lifts[0][1]
for lift in lifts[1:]:
if ((lift[0] - end) > 0) or seq.unplaced:
seq.regions.append((start, end))
start = lift[0]
end = lift[1]
seq.regions.append((start, end))
def defineSeqRegionsFromLifts(self, liftFile):
"""define regions without gaps from a lift file. If a sequence
is flagged as unplaced, adjacent lift entries are not joined"""
lifts = self.__loadLift(liftFile)
for id in lifts.iterkeys():
self.__addSeqRegionsFromLifts(self[id], lifts[id])
def dump(self, fh):
"print contents for debugging purposes"
ids = self.keys()
ids.sort()
for id in ids:
seq = self[id]
prRow(fh, (seq.id, seq.size, seq.path))
if seq.regions != None:
for reg in seq.regions:
fh.write("\t")
prRow(fh, reg)
| 35.692308 | 79 | 0.561422 | 455 | 3,712 | 4.518681 | 0.32967 | 0.024319 | 0.013132 | 0.019455 | 0.093385 | 0.033074 | 0.033074 | 0.033074 | 0.033074 | 0.033074 | 0 | 0.020783 | 0.32597 | 3,712 | 103 | 80 | 36.038835 | 0.800959 | 0.237069 | 0 | 0.075 | 0 | 0 | 0.136714 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.0375 | 0 | 0.1875 | 0.0125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7ba989813244768340b684b3ce5324c8228ee03 | 3,054 | py | Python | orodja/vrni_kvote_iz_html.py | majbc1999/Predictor-UCL-2021-22 | 35618ce88f710310db125731ed36bd5891f9238f | [
"MIT"
] | 1 | 2021-10-21T10:43:44.000Z | 2021-10-21T10:43:44.000Z | orodja/vrni_kvote_iz_html.py | majbc1999/Predictor-UCL-2021-22 | 35618ce88f710310db125731ed36bd5891f9238f | [
"MIT"
] | null | null | null | orodja/vrni_kvote_iz_html.py | majbc1999/Predictor-UCL-2021-22 | 35618ce88f710310db125731ed36bd5891f9238f | [
"MIT"
] | null | null | null | import re
from orodja import vsebina_datoteke, convert_to_float
def vrni_kvote_iz_html(datoteka, matchday):
tekma = vsebina_datoteke('html/' + matchday + '/' + datoteka + '.html')
vzorec_za_rezulat = re.compile(
r'<span class="float-wrap name-wrap"><span class="tcell"><div class="top-row"><a class="popup selTxt" target="_blank" title="View odds history for .*?" '
r'href=".*?" data-name=".*?">'
r'(?P<ekipa_rezultat>.*?)'
r'</a></div></span></span></td>.*?<td class="bc bs o.*?(\n)?.*?" data-bk="B3" data-odig=".*?" data-o=".*?" data-hcap=".*?" data-fodds=".*?" data-best-ew=".*?" data-best-wo=".*?"><p>'
r'(?P<kvote>.*?)'
r'</p></td>',
flags=re.DOTALL)
vzorec_za_rezulat2 = re.compile(
r'<p class="body-text-3 MarketExpanderBetName_m1m6ixsu">'
r'(?P<goli1>\d*?)'
r'-'
r'(?P<goli2>\d*?)'
r'</p>(.*?)<button type="button" class="button_b1oycxy6">'
r'(?P<kvote>.*?)'
r'</button>'
)
#vzorec_za_rezultat_vscode = <span class="float-wrap name-wrap"><span class="tcell"><div class="top-row"><a class="popup selTxt" target="_blank" title="View odds history for .*?" href=".*?" data-name=".*?".*?</a></div></span></span></td>.*?<td class="bc bs o.*?(\n)?.*?" data-bk="B3" data-odig=".*?" data-o=".*?" data-hcap=".*?" data-fodds=".*?" data-best-ew=".*?" data-best-wo=".*?"><p>.*?</p></td>"
#vzorec2 = <p class="body-text-3 MarketExpanderBetName_m1m6ixsu">((\d*?)-(\d*?))</p>(.*?)<button type="button" class="button_b1oycxy6">(.*?)</button>
vzorec_za_ekipe = re.compile(
r'</style><title>'
r'(?P<domaca_ekipa>[a-zA-Z\s]*?)'
r' v '
r'(?P<gostujoca_ekipa>[a-zA-Z\s]*?)'
r' Correct Score',
flags=re.DOTALL)
vzorec_za_ekipe2 = re.compile(
r'<title>'
r'(?P<domaca_ekipa>[a-zA-Z\s]*?)'
r' vs '
r'(?P<gostujoca_ekipa>[a-zA-Z\s]*?)'
r'Betting Odds'
)
slovar = {}
obrni = False
#for razplet in re.finditer(vzorec_za_rezulat, tekma):
# rezultat = razplet['ekipa_rezultat']
# kvote = convert_to_float(razplet['kvote'])
# slovar[rezultat] = kvote
i = 0
for razplet in re.finditer(vzorec_za_rezulat2, tekma):
if (razplet['goli1'] + ':' + razplet['goli2']) in slovar:
obrni = True
if obrni:
rezultat = razplet['goli2'] + ':' + razplet['goli1']
else:
rezultat = razplet['goli1'] + ':' + razplet['goli2']
kvote = convert_to_float(razplet['kvote'])
slovar[rezultat] = kvote
i = i+1
#for ekipa in re.finditer(vzorec_za_ekipe, tekma):
# domaca_ekipa = ekipa['domaca_ekipa']
# gostujoca_ekipa = ekipa['gostujoca_ekipa']
for ekipa in re.finditer(vzorec_za_ekipe2, tekma):
domaca_ekipa = ekipa['domaca_ekipa']
gostujoca_ekipa = ekipa['gostujoca_ekipa']
return([domaca_ekipa, gostujoca_ekipa, slovar]) | 39.662338 | 404 | 0.550753 | 390 | 3,054 | 4.179487 | 0.253846 | 0.014724 | 0.058282 | 0.022086 | 0.628221 | 0.602454 | 0.602454 | 0.433129 | 0.433129 | 0.406135 | 0 | 0.011504 | 0.2315 | 3,054 | 77 | 405 | 39.662338 | 0.683 | 0.281925 | 0 | 0.153846 | 0 | 0.038462 | 0.376201 | 0.119908 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0 | 0.038462 | 0 | 0.057692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7bcbfdb1ae81af2e1f7b492b6b8aec8b7e96b9c | 1,532 | py | Python | cyberdyne/settings/dev_settings.py | jsbUSMC/api | 931c53be2f368c35571f47ff83a4393276ce7e63 | [
"MIT"
] | null | null | null | cyberdyne/settings/dev_settings.py | jsbUSMC/api | 931c53be2f368c35571f47ff83a4393276ce7e63 | [
"MIT"
] | 5 | 2020-06-05T17:27:41.000Z | 2022-01-13T00:39:54.000Z | cyberdyne/settings/dev_settings.py | jsbUSMC/api | 931c53be2f368c35571f47ff83a4393276ce7e63 | [
"MIT"
] | null | null | null | import logging.config
# pylint: disable=W0401,W0614
from .settings import *
LOGFILE_ROOT = join(dirname(BASE_DIR), 'logs')
# Reset logging
# pylint: disable=C0301
# (see http://www.caktusgroup.com/blog/2015/01/27/Django-Logging-Configuration-logging_config-default-settings-logger/)
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(pathname)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'django_log_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, 'django.log'),
'formatter': 'verbose'
},
'proj_log_file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': join(LOGFILE_ROOT, 'project.log'),
'formatter': 'verbose'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'loggers': {
'django': {
'handlers': ['django_log_file'],
'propagate': True,
'level': 'DEBUG',
},
'project': {
'handlers': ['proj_log_file'],
'level': 'DEBUG',
},
}
}
logging.config.dictConfig(LOGGING)
| 27.357143 | 119 | 0.50718 | 137 | 1,532 | 5.554745 | 0.50365 | 0.065703 | 0.047306 | 0.067017 | 0.198423 | 0.165572 | 0.165572 | 0.165572 | 0.165572 | 0.165572 | 0 | 0.020038 | 0.315927 | 1,532 | 55 | 120 | 27.854545 | 0.706107 | 0.118146 | 0 | 0.1875 | 0 | 0.020833 | 0.377415 | 0.052006 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.041667 | 0 | 0.041667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7bf0d0070200a276391a4378ab0dfd730c9fe2c | 3,106 | py | Python | tests/unit/test_my_app.py | azam-a/domini | ea151cd3803e9500c22c342eba959de0b94613c6 | [
"MIT"
] | 1 | 2017-08-20T04:06:52.000Z | 2017-08-20T04:06:52.000Z | tests/unit/test_my_app.py | azam-a/domini | ea151cd3803e9500c22c342eba959de0b94613c6 | [
"MIT"
] | 1 | 2019-10-26T11:44:25.000Z | 2020-01-18T13:47:42.000Z | tests/unit/test_my_app.py | azam-a/domini | ea151cd3803e9500c22c342eba959de0b94613c6 | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import Mock, patch
from my_app import app, scheduled
class AppTests(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
self.valid_url = "https://api.dominos.com.my/api/GPSTracker/CartId/8"
self.invalid_url = "https://api.unknown.domain/api/GPSTracker/CartId/9"
def test_index_view_should_render_introduction_page(self):
response = self.app.get("/")
self.assertIn(b"what is this", response.data.lower())
def test_how_to_view_should_render_how_to_page(self):
response = self.app.get("/how-to")
self.assertIn(b"how-to", response.data.lower())
def test_add_form_view_should_use_correct_template(self):
response = self.app.get("/add-form")
self.assertIn(b"track an order", response.data.lower())
@patch('my_app.controllers')
def test_add_post_view_should_call_model_controller(self, mock_module):
data = {"url": self.valid_url, "phone": "+60123", "token": "mytoken1"}
self.app.post("/add-post", data=data)
mock_module.ItemController.assert_called()
mock_module.ItemController().add.assert_called_with(
self.valid_url, "mytoken1", "+60123")
@patch('my_app.controllers')
def test_add_post_view_should_return_success_message(self, mock_module):
data = {"url": self.valid_url, "phone": "+60123", "token": "mytoken1"}
response = self.app.post("/add-post", data=data)
self.assertIn(b"great success!", response.data.lower())
@patch('my_app.controllers')
def test_add_post_view_should_return_failed_message(self, mock_module):
response = self.app.post("/add-post", data={})
self.assertIn(b"failed", response.data.lower())
@patch('my_app.controllers')
def test_add_post_view_should_accept_valid_url_pattern(self, mock_module):
data = {"url": self.valid_url, "phone": "+60123", "token": "mytoken1"}
response = self.app.post("/add-post", data=data)
self.assertIn(b"great success!", response.data.lower())
@patch('my_app.controllers')
def test_add_post_view_should_fail_invalid_url_pattern(self, mock_module):
data = {"url": self.invalid_url, "phone": "+60123", "token": "mytoken1"}
response = self.app.post("/add-post", data=data)
self.assertIn(b"failed", response.data.lower())
@patch('my_app.controllers')
class ScheduledFunctionTests(unittest.TestCase):
def test_scheduled_should_return_scheduled_string(self, _):
self.assertIn("schedule triggered on", scheduled())
def test_scheduled_should_call_model_controller(self, mock_module):
mock_items = []
mock_controller_instance = Mock()
mock_controller_instance.get_active_items.return_value = []
mock_module.ItemController.return_value = mock_controller_instance
scheduled()
mock_module.ItemController.assert_called_once()
mock_controller_instance.get_active_items.assert_called_once()
mock_controller_instance.process_items.assert_called_once_with(mock_items)
| 40.337662 | 82 | 0.699614 | 405 | 3,106 | 5.061728 | 0.214815 | 0.034146 | 0.05122 | 0.061463 | 0.582439 | 0.519024 | 0.432195 | 0.378537 | 0.356585 | 0.356585 | 0 | 0.012393 | 0.168706 | 3,106 | 76 | 83 | 40.868421 | 0.781565 | 0 | 0 | 0.285714 | 0 | 0 | 0.156149 | 0 | 0 | 0 | 0 | 0 | 0.232143 | 1 | 0.196429 | false | 0 | 0.053571 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7bf0d9dc64412a9c3037b745c63a25bb7857ec1 | 1,283 | py | Python | core/user/client/__init__.py | geetesh-gupta/replication-of-tcp-ip-model | 12ae5b12b9dc8cf1bd6d025fd664e2d68765ebb4 | [
"MIT"
] | null | null | null | core/user/client/__init__.py | geetesh-gupta/replication-of-tcp-ip-model | 12ae5b12b9dc8cf1bd6d025fd664e2d68765ebb4 | [
"MIT"
] | null | null | null | core/user/client/__init__.py | geetesh-gupta/replication-of-tcp-ip-model | 12ae5b12b9dc8cf1bd6d025fd664e2d68765ebb4 | [
"MIT"
] | null | null | null | import sys
import settings
from core.utils import log
from core.user.client.connection import create_connection
from core.utils.str_byte_conversion import str2bytes
from core.device.datalink.client import client_dll
def run_client():
host = settings.SERVER_HOST
port = settings.SERVER_PORT
sock = create_connection((host, port))
# Send data to server
try:
while True:
# Input data to send
orig_data = input("Enter data you want to send: ")
# Convert data to list of frames
enc_frames = client_dll(orig_data)
log("Frames to be send: ", 2, end="")
log(enc_frames, 2)
# Send number of frames
# num_of_frames = str(len(enc_frames))
# encode_num_of_frames = client_dll(num_of_frames)[0]
# sock.sendall(str2bytes(encode_num_of_frames))
# Send the frames
for frame in enc_frames:
sock.sendall(str2bytes(frame))
# receive data from the server
# recv_data = sock.recv(settings.PACKET_SIZE)
# print(("Received message from the server: " + bytes2str(recv_data)))
except (KeyboardInterrupt, EOFError):
sock.close()
print()
sys.exit(0)
| 30.547619 | 82 | 0.620421 | 162 | 1,283 | 4.734568 | 0.407407 | 0.062581 | 0.057366 | 0.044329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008879 | 0.29774 | 1,283 | 41 | 83 | 31.292683 | 0.842397 | 0.299299 | 0 | 0 | 0 | 0 | 0.054115 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.272727 | 0 | 0.318182 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7c1747e42ca5a5ba8178c135cb134eb262c4f5a | 21,338 | py | Python | mappertrac/subscripts/s3_probtrackx.py | LLNL/MaPPeRTrac | 6112b8741fa96a540bbf572f841d6ccae02f2aea | [
"BSD-3-Clause"
] | 2 | 2020-09-05T13:12:35.000Z | 2021-09-21T19:32:47.000Z | mappertrac/subscripts/s3_probtrackx.py | LLNL/MaPPeRTrac | 6112b8741fa96a540bbf572f841d6ccae02f2aea | [
"BSD-3-Clause"
] | 9 | 2020-08-25T15:02:20.000Z | 2022-02-01T04:39:48.000Z | mappertrac/subscripts/s3_probtrackx.py | LLNL/MaPPeRTrac | 6112b8741fa96a540bbf572f841d6ccae02f2aea | [
"BSD-3-Clause"
] | 2 | 2020-08-13T19:46:30.000Z | 2020-09-13T01:46:22.000Z | #!/usr/bin/env python3
import os,sys,glob,multiprocessing,time,csv,math,pprint,shutil,platform,fcntl,errno,tempfile,json,psutil,random
import scipy.io
import numpy as np
from parsl.app.app import python_app
from os.path import *
from mappertrac.subscripts import *
EDGE_LIST = 'data/lists/list_edges_reduced.txt'
def run_probtrackx(params):
sdir = params['work_dir']
assert exists(join(sdir, 'S1_COMPLETE')), 'Subject {sdir} must first run --freesurfer'
assert exists(join(sdir, 'S2_COMPLETE')), 'Subject {sdir} must first run --bedpostx'
pbtx_edges = get_edges_from_file(join(params['script_dir'], EDGE_LIST))
edges_per_chunk = 4
n = edges_per_chunk
edge_chunks = [pbtx_edges[i * n:(i + 1) * n] for i in range((len(pbtx_edges) + n - 1) // n )]
start_future = start(params)
process_futures = []
for edge_chunk in edge_chunks:
process_futures.append(process(params, edge_chunk, inputs=[start_future]))
return combine(params, inputs=process_futures)
@python_app(executors=['worker'])
def start(params, inputs=[]):
sdir = params['work_dir']
stdout = params['stdout']
start_time = time.time()
start_str = f'''
=====================================
{get_time_date()}
Started step 3: probtrackx
Arguments:
{pprint.pformat(params, width=1)}
=====================================
'''
write(stdout, start_str)
print(start_str)
time_log = join(sdir, 'start_time_s3.txt')
smart_remove(time_log)
write(time_log, start_time)
sdir = params['work_dir']
output_dir = params['output_dir']
pbtk_dir = join(sdir,"EDI","PBTKresults")
connectome_dir = join(sdir,"EDI","CNTMresults")
derivatives_dir_tmp = join(output_dir, 'derivatives', "tmp")
sdir_tmp = join(sdir, "tmp")
smart_remove(pbtk_dir)
smart_remove(connectome_dir)
smart_remove(sdir_tmp)
smart_mkdir(pbtk_dir)
smart_mkdir(connectome_dir)
smart_mkdir(sdir_tmp)
time.sleep(random.randrange(0, 10)) # random sleep to avoid parallel collision
smart_mkdir(derivatives_dir_tmp)
@python_app(executors=['worker'])
def process(params, edges, inputs=[]):
sdir = params['work_dir']
stdout = params['stdout']
output_dir = params['output_dir']
pbtx_sample_count = params['pbtx_sample_count']
derivatives_dir_tmp = join(output_dir, 'derivatives', "tmp")
sdir_tmp = join(sdir, "tmp")
EDI_allvols = join(sdir,"EDI","allvols")
pbtk_dir = join(sdir,"EDI","PBTKresults")
connectome_dir = join(sdir,"EDI","CNTMresults")
bedpostxResults = join(sdir,"bedpostx_b1000.bedpostX")
merged = join(bedpostxResults,"merged")
nodif_brain_mask = join(bedpostxResults,"nodif_brain_mask.nii.gz")
allvoxelscortsubcort = join(sdir,"allvoxelscortsubcort.nii.gz")
terminationmask = join(sdir,"terminationmask.nii.gz")
bs = join(sdir,"bs.nii.gz")
##################################
# Memory Management
##################################
pbtx_max_memory = psutil.virtual_memory().total * 1.0E-9
node_name = platform.uname().node.strip()
assert node_name and ' ' not in node_name, "Invalid node name {}".format(node_name)
mem_record = join(derivatives_dir_tmp, node_name + '.json') # Keep record to avoid overusing node memory
# Only access mem_record with file locking to avoid outdated data
def open_mem_record(mode = 'r'):
f = None
while True:
try:
f = open(mem_record, mode, newline='')
fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
# raise on unrelated IOErrors
if e.errno != errno.EAGAIN:
raise
else:
time.sleep(0.1)
assert f is not None, "Failed to open mem_record {}".format(mem_record)
return f
def estimate_total_memory_usage():
f = open_mem_record('r')
mem_dict = json.load(f)
fcntl.flock(f, fcntl.LOCK_UN)
f.close()
mem_sum = 0.0
for task_mem in mem_dict.values():
mem_sum += float(task_mem)
return mem_sum
def estimate_task_mem_usage():
total_size = 0
total_size += os.path.getsize(allvoxelscortsubcort)
total_size += os.path.getsize(terminationmask)
total_size += os.path.getsize(bs)
for dirpath, dirnames, filenames in os.walk(bedpostxResults):
for f in filenames:
fp = os.path.join(dirpath, f)
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
max_region_size = 0
for edge in edges:
a, b = edge
a_file = join(EDI_allvols, a + "_s2fa.nii.gz")
b_file = join(EDI_allvols, b + "_s2fa.nii.gz")
a_size = os.path.getsize(a_file)
b_size = os.path.getsize(b_file)
max_region_size = max([a_size, b_size, max_region_size])
total_size += max_region_size
return float(total_size) * 1.0E-9
def add_task():
task_id = '0'
f = open_mem_record('r')
if not exists(mem_record):
json.dump({task_id:task_mem_usage}, f)
else:
mem_dict = json.load(f)
task_ids = [int(x) for x in mem_dict.keys()] + [0] # append zero in case task_ids empty
task_id = str(max(task_ids) + 1) # generate incremental task_id
mem_dict[task_id] = task_mem_usage
tmp_fp, tmp_path = tempfile.mkstemp(dir=sdir_tmp)
with open(tmp_path, 'w', newline='') as tmp: # file pointer not consistent, so we open using the pathname
json.dump(mem_dict, tmp)
try:
os.replace(tmp_path, mem_record) # atomic on POSIX systems. flock is advisory, so we can still overwrite.
except OSError as e:
fcntl.flock(f, fcntl.LOCK_UN)
f.close()
time.sleep(random.randrange(5, 30))
return add_task()
fcntl.flock(f, fcntl.LOCK_UN)
f.close()
return task_id
def remove_task(task_id):
f = open_mem_record('r')
mem_dict = json.load(f)
mem_dict.pop(task_id, None)
tmp_fp, tmp_path = tempfile.mkstemp(dir=sdir_tmp)
with open(tmp_path, 'w', newline='') as tmp:
json.dump(mem_dict, tmp)
try:
os.replace(tmp_path, mem_record)
except OSError as e:
fcntl.flock(f, fcntl.LOCK_UN)
f.close()
time.sleep(random.randrange(5, 30))
return remove_task(task_id)
fcntl.flock(f, fcntl.LOCK_UN)
f.close()
sleep_timeout = 7200
task_mem_usage = estimate_task_mem_usage()
assert task_mem_usage < pbtx_max_memory, f'Task consumes more memory ({task_mem_usage:.2f} GB) than available ({pbtx_max_memory:.2f} GB)'
total_sleep = 0
# Memory record is atomic, but might not be updated on time
# So we randomize sleep to discourage multiple tasks hitting at once
init_sleep = random.randrange(5, 30)
write(stdout, "Sleeping for {:d} seconds".format(init_sleep))
total_sleep += init_sleep
time.sleep(init_sleep)
if not exists(mem_record):
f = open_mem_record('w')
json.dump({}, f)
fcntl.flock(f, fcntl.LOCK_UN)
f.close()
total_mem_usage = estimate_total_memory_usage()
# Then we sleep until memory usage is low enough
while total_mem_usage + task_mem_usage > pbtx_max_memory:
sleep_interval = random.randrange(5, 60)
write(stdout, "Sleeping for {:d} seconds. Memory usage: {:.2f}/{:.2f} GB".format(sleep_interval, total_mem_usage, pbtx_max_memory))
total_sleep += sleep_interval
if total_sleep > sleep_timeout:
raise Exception('Retrying task that has slept longer than 2 hours')
time.sleep(sleep_interval)
total_mem_usage = estimate_total_memory_usage()
write(stdout, "Running Probtrackx after sleeping for {} seconds".format(total_sleep))
# Insert task and memory usage into record
task_id = add_task()
##################################
# Tractography
##################################
try:
for edge in edges:
a, b = edge
a_file = join(EDI_allvols, a + "_s2fa.nii.gz")
b_file = join(EDI_allvols, b + "_s2fa.nii.gz")
tmp = join(sdir, "tmp", "{}_to_{}".format(a, b))
a_to_b_formatted = "{}_s2fato{}_s2fa.nii.gz".format(a,b)
a_to_b_file = join(pbtk_dir,a_to_b_formatted)
waypoints = join(tmp,"waypoint.txt")
waytotal = join(tmp, "waytotal")
assert exists(a_file) and exists(b_file), "Error: Both Freesurfer regions must exist: {} and {}".format(a_file, b_file)
smart_remove(a_to_b_file)
smart_remove(tmp)
smart_mkdir(tmp)
write(stdout, "Running subproc: {} to {}".format(a, b))
write(waypoints, b_file.replace(sdir, "/mappertrac"))
exclusion = join(tmp,"exclusion.nii.gz")
termination = join(tmp,"termination.nii.gz")
run("fslmaths {} -sub {} {}".format(allvoxelscortsubcort, a_file, exclusion), params)
run("fslmaths {} -sub {} {}".format(exclusion, b_file, exclusion), params)
run("fslmaths {} -add {} {}".format(exclusion, bs, exclusion), params)
run("fslmaths {} -add {} {}".format(terminationmask, b_file, termination), params)
pbtx_args = (" -x {} ".format(a_file) +
# " --pd -l -c 0.2 -S 2000 --steplength=0.5 -P 1000" +
" --pd -l -c 0.2 -S 2000 --steplength=0.5 -P {}".format(pbtx_sample_count) +
" --waypoints={} --avoid={} --stop={}".format(waypoints, exclusion, termination) +
" --forcedir --opd --rseed={}".format(random.randint(1000,9999)) +
" -s {}".format(merged) +
" -m {}".format(nodif_brain_mask) +
" --dir={}".format(tmp) +
" --out={}".format(a_to_b_formatted)
)
run("probtrackx2" + pbtx_args, params)
waytotal_count = 0
if exists(waytotal):
with open(waytotal, 'r') as f:
waytotal_count = f.read().strip()
fdt_tmp = join(connectome_dir, "{}_to_{}.fdt.tmp".format(a, b))
smart_remove(fdt_tmp)
run(f"fslmeants -i {join(tmp, a_to_b_formatted)} -m {b_file} | head -n 1 > {fdt_tmp}", params) # based on getconnectome script
time.sleep(5)
with open(fdt_tmp, 'r') as f2:
fdt_count = f2.read().strip()
if not is_float(waytotal_count):
write(stdout, "Error: Failed to read waytotal_count value {} in {}".format(waytotal_count, edge))
continue
if not is_float(fdt_count):
write(stdout, "Error: Failed to read fdt_count value {} in {}".format(fdt_count, edge))
continue
edge_file = join(connectome_dir, "{}_to_{}.dot".format(a, b))
smart_remove(edge_file)
write(edge_file, "{} {} {} {}".format(a, b, waytotal_count, fdt_count))
# Error check edge file
with open(edge_file) as f:
line = f.read().strip()
if len(line) > 0: # ignore empty lines
chunks = [x.strip() for x in line.split(' ') if x]
if not (len(chunks) == 4 and is_float(chunks[2]) and is_float(chunks[3])):
write(stdout, "Error: Connectome {} has invalid edge {} to {}".format(edge_file, a, b))
continue
else:
write(stdout, 'Error: failed to find waytotal for {} to {}'.format(a, b))
copyfile(join(tmp, a_to_b_formatted), a_to_b_file) # keep edi output
if not a == "lh.paracentral": # discard all temp files except these for debugging
smart_remove(tmp)
finally:
remove_task(task_id)
@python_app(executors=['worker'])
def combine(params, inputs=[]):
sdir = params['work_dir']
stdout = params['stdout']
pbtx_sample_count = params['pbtx_sample_count']
pbtx_edges = get_edges_from_file(join(params['script_dir'], EDGE_LIST))
connectome_idx_list = join(params['script_dir'], 'data/lists/connectome_idxs.txt')
start_time = time.time()
connectome_dir = join(sdir,"EDI","CNTMresults")
oneway_list = join(sdir, "connectome_{}samples_oneway.txt".format(pbtx_sample_count))
twoway_list = join(sdir, "connectome_{}samples_twoway.txt".format(pbtx_sample_count))
oneway_nof = join(sdir, "connectome_{}samples_oneway_nof.mat".format(pbtx_sample_count)) # nof = number of fibers
twoway_nof = join(sdir, "connectome_{}samples_twoway_nof.mat".format(pbtx_sample_count))
oneway_nof_normalized = join(sdir, "connectome_{}samples_oneway_nofn.mat".format(pbtx_sample_count)) # nofn = number of fibers, normalized
twoway_nof_normalized = join(sdir, "connectome_{}samples_twoway_nofn.mat".format(pbtx_sample_count))
pbtk_dir = join(sdir,"EDI","PBTKresults")
consensus_dir = join(pbtk_dir,"twoway_consensus_edges")
edi_maps = join(sdir,"EDI","EDImaps")
edge_total = join(edi_maps,"FAtractsumsTwoway.nii.gz")
tract_total = join(edi_maps,"FAtractsumsRaw.nii.gz")
smart_remove(oneway_list)
smart_remove(twoway_list)
smart_remove(oneway_nof_normalized)
smart_remove(twoway_nof_normalized)
smart_remove(oneway_nof)
smart_remove(twoway_nof)
smart_remove(edi_maps)
smart_mkdir(pbtk_dir)
smart_mkdir(consensus_dir)
smart_mkdir(edi_maps)
oneway_edges = {}
twoway_edges = {}
consensus_edges = []
for edge in pbtx_edges:
a, b = edge
if [a, b] in consensus_edges or [b, a] in consensus_edges:
continue
consensus_edges.append(edge)
copyfile(connectome_idx_list, join(sdir, 'connectome_idxs.txt')) # give each subject a copy for reference
##################################
# Compile connectome matrices
##################################
vol_idxs = {}
with open(connectome_idx_list) as f:
lines = [x.strip() for x in f.readlines() if x]
max_idx = -1
for line in lines:
vol, idx = line.split(',', 1)
idx = int(idx)
vol_idxs[vol] = idx
if idx > max_idx:
max_idx = idx
oneway_nof_normalized_matrix = np.zeros((max_idx+1, max_idx+1))
oneway_nof_matrix = np.zeros((max_idx+1, max_idx+1))
twoway_nof_normalized_matrix = np.zeros((max_idx+1, max_idx+1))
twoway_nof_matrix = np.zeros((max_idx+1, max_idx+1))
for edge in pbtx_edges:
a, b = edge
edge_file = join(connectome_dir, "{}_to_{}.dot".format(a, b))
with open(edge_file) as f:
chunks = [x.strip() for x in f.read().strip().split(' ') if x]
a_to_b = (chunks[0], chunks[1])
b_to_a = (chunks[1], chunks[0])
waytotal_count = float(chunks[2])
fdt_count = float(chunks[3])
if b_to_a in twoway_edges:
twoway_edges[b_to_a][0] += waytotal_count
twoway_edges[b_to_a][1] += fdt_count
else:
twoway_edges[a_to_b] = [waytotal_count, fdt_count]
oneway_edges[a_to_b] = [waytotal_count, fdt_count]
for a_to_b in oneway_edges:
a = a_to_b[0]
b = a_to_b[1]
for vol in a_to_b:
if vol not in vol_idxs:
write(stdout, 'Error: could not find {} in connectome idxs'.format(vol))
break
else:
write(oneway_list, "{} {} {} {}".format(a, b, oneway_edges[a_to_b][0], oneway_edges[a_to_b][1]))
oneway_nof_matrix[vol_idxs[a]][vol_idxs[b]] = oneway_edges[a_to_b][0]
oneway_nof_normalized_matrix[vol_idxs[a]][vol_idxs[b]] = oneway_edges[a_to_b][1]
for a_to_b in twoway_edges:
a = a_to_b[0]
b = a_to_b[1]
for vol in a_to_b:
if vol not in vol_idxs:
write(stdout, 'Error: could not find {} in connectome idxs'.format(vol))
break
else:
write(twoway_list, "{} {} {} {}".format(a, b, twoway_edges[a_to_b][0], twoway_edges[a_to_b][1]))
twoway_nof_matrix[vol_idxs[a]][vol_idxs[b]] = twoway_edges[a_to_b][0]
twoway_nof_normalized_matrix[vol_idxs[a]][vol_idxs[b]] = twoway_edges[a_to_b][1]
scipy.io.savemat(oneway_nof, {'data': oneway_nof_matrix})
scipy.io.savemat(oneway_nof_normalized, {'data': oneway_nof_normalized_matrix})
scipy.io.savemat(twoway_nof, {'data': twoway_nof_matrix})
scipy.io.savemat(twoway_nof_normalized, {'data': twoway_nof_normalized_matrix})
smart_copy(twoway_nof_normalized, join(dirname(sdir), basename(twoway_nof_normalized)))
smart_copy(twoway_list, join(dirname(sdir), basename(twoway_list)))
##################################
# EDI consensus
##################################
for edge in pbtx_edges:
a, b = edge
a_to_b = "{}_to_{}".format(a, b)
a_to_b_file = join(pbtk_dir,"{}_s2fato{}_s2fa.nii.gz".format(a,b))
b_to_a_file = join(pbtk_dir,"{}_s2fato{}_s2fa.nii.gz".format(b,a))
if not exists(a_to_b_file):
write(stdout, "Error: cannot find {}".format(a_to_b_file))
return
if not exists(b_to_a_file):
write(stdout, "Error: cannot find {}".format(b_to_a_file))
return
consensus = join(consensus_dir, a_to_b + '.nii.gz')
amax_tmp = join(connectome_dir, f"{a_to_b}.amax.tmp")
bmax_tmp = join(connectome_dir, f"{a_to_b}.bmax.tmp")
smart_remove(amax_tmp)
smart_remove(bmax_tmp)
run(f'fslstats {a_to_b_file} -R | cut -f 2 -d \\" \\" > {amax_tmp}', params).strip()
run(f'fslstats {b_to_a_file} -R | cut -f 2 -d \\" \\" > {bmax_tmp}', params).strip()
time.sleep(5)
with open(amax_tmp, 'r') as f:
amax = f.read().strip()
with open(bmax_tmp, 'r') as f:
bmax = f.read().strip()
if not is_float(amax):
write(stdout, "Error: fslstats on {} returns invalid value {}".format(a_to_b_file, amax))
return
amax = int(float(amax))
if not is_float(bmax):
write(stdout, "Error: fslstats on {} returns invalid value {}".format(b_to_a_file, bmax))
return
bmax = int(float(bmax))
write(stdout, "amax = {}, bmax = {}".format(amax, bmax))
if amax > 0 and bmax > 0:
tmp1 = join(pbtk_dir, "{}_to_{}_tmp1.nii.gz".format(a, b))
tmp2 = join(pbtk_dir, "{}_to_{}_tmp2.nii.gz".format(b, a))
run("fslmaths {} -thrP 5 -bin {}".format(a_to_b_file, tmp1), params)
run("fslmaths {} -thrP 5 -bin {}".format(b_to_a_file, tmp2), params)
run("fslmaths {} -add {} -thr 1 -bin {}".format(tmp1, tmp2, consensus), params)
smart_remove(tmp1)
smart_remove(tmp2)
else:
with open(join(pbtk_dir, "zerosl.txt"), 'a') as log:
log.write("For edge {}:\n".format(a_to_b))
log.write("{} is thresholded to {}\n".format(a, amax))
log.write("{} is thresholded to {}\n".format(b, bmax))
# Collect number of probtrackx tracts per voxel
for edge in pbtx_edges:
a, b = edge
a_to_b_formatted = "{}_s2fato{}_s2fa.nii.gz".format(a,b)
a_to_b_file = join(pbtk_dir,a_to_b_formatted)
if not exists(tract_total):
copyfile(a_to_b_file, tract_total)
else:
run("fslmaths {0} -add {1} {1}".format(a_to_b_file, tract_total), params)
# Collect number of parcel-to-parcel edges per voxel
for edge in consensus_edges:
a, b = edge
consensus = join(consensus_dir, "{}_to_{}.nii.gz".format(a,b))
if not exists(consensus):
write(stdout,"{} has been thresholded. See {} for details".format(edge, join(pbtk_dir, "zerosl.txt")))
continue
if not exists(edge_total):
copyfile(consensus, edge_total)
else:
run("fslmaths {0} -add {1} {1}".format(consensus, edge_total), params)
if not exists(edge_total):
write(stdout, "Error: Failed to generate {}".format(edge_total))
else:
smart_copy(edge_total, join(dirname(sdir), 'EDI_' + basename(edge_total)))
update_permissions(sdir, params)
write(join(sdir, 'S3_COMPLETE'))
time_log = join(sdir, 'start_time_s3.txt')
with open(time_log) as f:
start_time = float(f.read())
finish_str = f'''
=====================================
{get_time_date()}
Finished step 3: probtrackx
Arguments:
{pprint.pformat(params, width=1)}
Total time: {get_time_string(time.time() - start_time)} (HH:MM:SS)
=====================================
'''
write(stdout, finish_str)
print(finish_str)
| 42.933602 | 146 | 0.590918 | 2,845 | 21,338 | 4.180668 | 0.138489 | 0.010846 | 0.014461 | 0.008071 | 0.432823 | 0.335043 | 0.256432 | 0.226333 | 0.194468 | 0.152598 | 0 | 0.010272 | 0.265442 | 21,338 | 496 | 147 | 43.020161 | 0.748565 | 0.04977 | 0 | 0.345154 | 0 | 0.004728 | 0.1663 | 0.039347 | 0 | 0 | 0 | 0 | 0.014184 | 1 | 0.021277 | false | 0 | 0.014184 | 0 | 0.061466 | 0.01182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7c896e3975a016c9183bb6ac43afb0daa5f5360 | 1,274 | py | Python | python/largest-component-size-by-common-factor.py | alirezaghey/leetcode-solutions | 676b71b4790c64d21af91dce02e97ee47e78d523 | [
"MIT"
] | 3 | 2020-10-10T00:14:23.000Z | 2022-03-02T21:16:29.000Z | python/largest-component-size-by-common-factor.py | alirezaghey/leetcode-solutions | 676b71b4790c64d21af91dce02e97ee47e78d523 | [
"MIT"
] | null | null | null | python/largest-component-size-by-common-factor.py | alirezaghey/leetcode-solutions | 676b71b4790c64d21af91dce02e97ee47e78d523 | [
"MIT"
] | 1 | 2021-09-14T05:16:54.000Z | 2021-09-14T05:16:54.000Z | from math import sqrt
from collections import defaultdict, Counter
class UnionFind:
def __init__(self, n):
self.p = list(range(n))
def find(self, x):
if self.p[x] != x:
self.p[x] = self.find(self.p[x])
return self.p[x]
def union(self, x, y):
xp, yp = self.find(x), self.find(y)
self.p[xp] = yp
class Solution:
def find_factors(self,n, cache):
if n in cache: return cache[n]
for i in range(2, int(sqrt(n)+1)):
if n % i == 0:
cache[n] = self.find_factors(n//i, cache) | set([i])
return cache[n]
return set([n])
def largestComponentSize(self, A: List[int]) -> int:
n = len(A)
uf = UnionFind(n)
connection_dict = defaultdict(list)
factor_cache = dict()
for i, el in enumerate(A):
s_factors = self.find_factors(el, factor_cache)
for f in s_factors:
connection_dict[f].append(i)
for indices in connection_dict.values():
for i in range(len(indices)-1):
uf.union(indices[i], indices[i+1])
return max(Counter(uf.find(i) for i in range(n)).values())
| 28.954545 | 74 | 0.515699 | 178 | 1,274 | 3.61236 | 0.280899 | 0.046656 | 0.037325 | 0.051322 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006105 | 0.357143 | 1,274 | 43 | 75 | 29.627907 | 0.778999 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.151515 | false | 0 | 0.060606 | 0 | 0.393939 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7cc08eb7e8cbbb30dfdd6d21d34070c531558bc | 640 | py | Python | pycat/test/label_test_2.py | cmorace/pycat | 7abc53f90a03b4961c10003eaca2c01efec9e4d2 | [
"MIT"
] | null | null | null | pycat/test/label_test_2.py | cmorace/pycat | 7abc53f90a03b4961c10003eaca2c01efec9e4d2 | [
"MIT"
] | null | null | null | pycat/test/label_test_2.py | cmorace/pycat | 7abc53f90a03b4961c10003eaca2c01efec9e4d2 | [
"MIT"
] | null | null | null |
from pycat.core import Window, Label, Scheduler
w = Window(enforce_window_limits=False)
class TestLabel(Label):
def on_create(self):
self.font_size = 40
self.text = "hello world"
self.y = w.height
self.x = (w.width - self.content_width) / 2
def on_update(self, dt: float):
if self.y < 100:
self.delete()
else:
self.y -= 10
num_labels = 0
def spawn_label(dt):
w.create_label(TestLabel)
global num_labels
num_labels += 1
if num_labels == 10:
Scheduler.cancel_update(spawn_label)
Scheduler.update(spawn_label, delay=1)
w.run()
| 18.285714 | 51 | 0.615625 | 90 | 640 | 4.211111 | 0.511111 | 0.094987 | 0.084433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028139 | 0.278125 | 640 | 34 | 52 | 18.823529 | 0.792208 | 0 | 0 | 0 | 0 | 0 | 0.017214 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.045455 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7d09e2d5013c7195c0e5153b002f04e69e0ec1d | 8,488 | py | Python | quantlib/test/test_bucketanalysis.py | yuyingfeng/pyql | ceb838581ad4db73a0208bc51bde2771bb534e5f | [
"BSD-3-Clause"
] | null | null | null | quantlib/test/test_bucketanalysis.py | yuyingfeng/pyql | ceb838581ad4db73a0208bc51bde2771bb534e5f | [
"BSD-3-Clause"
] | null | null | null | quantlib/test/test_bucketanalysis.py | yuyingfeng/pyql | ceb838581ad4db73a0208bc51bde2771bb534e5f | [
"BSD-3-Clause"
] | 2 | 2016-08-24T20:56:14.000Z | 2022-01-03T05:58:42.000Z |
from .unittest_tools import unittest
from quantlib.instruments.bonds import (FixedRateBond)
from quantlib.pricingengines.bond import DiscountingBondEngine
from quantlib.time.calendar import ( Unadjusted, ModifiedFollowing, Following)
from quantlib.time.calendars.target import TARGET
from quantlib.time.calendars.united_states import ( UnitedStates, GOVERNMENTBOND)
from quantlib.currency.api import USDCurrency
from quantlib.instruments.option import VanillaOption
from quantlib.time.calendars.null_calendar import NullCalendar
from quantlib.compounding import Compounded, Continuous
from quantlib.time.date import ( Date, Days, Semiannual, January, August, Period, March, February, May,Jul, Annual, Years)
from quantlib.time.api import (TARGET, Period, Months, Years, Days,September, ISDA, today, Mar,
ModifiedFollowing, Unadjusted, Actual360, Thirty360, ActualActual, Actual365Fixed,
Annual, UnitedStates, Months, Actual365Fixed)
from quantlib.time.daycounters.actual_actual import Bond, ISMA
from quantlib.time.schedule import Schedule, Backward
from quantlib.settings import Settings
from quantlib.indexes.libor import Libor
from quantlib.instruments.option import (EuropeanExercise, AmericanExercise, DividendVanillaOption)
from quantlib.termstructures.yields.rate_helpers import (DepositRateHelper, SwapRateHelper)
from quantlib.termstructures.yields.piecewise_yield_curve import (VALID_TRAITS, VALID_INTERPOLATORS,PiecewiseYieldCurve)
from quantlib.termstructures.yields.api import (FlatForward, YieldTermStructure)
from quantlib.quotes import SimpleQuote
from quantlib.termstructures.volatility.equityfx.black_vol_term_structure import BlackConstantVol
from quantlib.processes.black_scholes_process import BlackScholesMertonProcess
from quantlib.pricingengines.vanilla.vanilla import (
AnalyticEuropeanEngine, BaroneAdesiWhaleyApproximationEngine,
FDDividendAmericanEngine
)
from quantlib.instruments.payoffs import PlainVanillaPayoff, Put
import quantlib.pricingengines.bondfunctions as bf
from quantlib.experimental.risk.sensitivityanalysis import bucket_analysis
class SensitivityTestCase(unittest.TestCase):
#@unittest.skip('This test is not numerically accurate and fails')
def test_bucketanalysis_bond(self):
settings = Settings()
calendar = TARGET()
settlement_date = calendar.adjust(Date(28, January, 2011))
simple_quotes = []
fixing_days = 1
settlement_days = 1
todays_date = calendar.advance(
settlement_date, -fixing_days, Days
)
settings.evaluation_date = todays_date
face_amount = 100.0
redemption = 100.0
issue_date = Date(27, January, 2011)
maturity_date = Date(1, January, 2021)
coupon_rate = 0.055
bond_yield = 0.034921
flat_discounting_term_structure = YieldTermStructure(relinkable=True)
flat_term_structure = FlatForward(
reference_date = settlement_date,
forward = bond_yield,
daycounter = Actual365Fixed(),
compounding = Compounded,
frequency = Semiannual)
flat_discounting_term_structure.link_to(flat_term_structure)
fixed_bond_schedule = Schedule(
issue_date,
maturity_date,
Period(Semiannual),
UnitedStates(market=GOVERNMENTBOND),
Unadjusted,
Unadjusted,
Backward,
False);
bond = FixedRateBond(
settlement_days,
face_amount,
fixed_bond_schedule,
[coupon_rate],
ActualActual(Bond),
Unadjusted,
redemption,
issue_date
)
zspd=bf.zSpread(bond, 100.0, flat_term_structure, Actual365Fixed(),
Compounded, Semiannual, settlement_date, 1e-6, 100, 0.5)
depositData = [[ 1, Months, 4.581 ],
[ 2, Months, 4.573 ],
[ 3, Months, 4.557 ],
[ 6, Months, 4.496 ],
[ 9, Months, 4.490 ]]
swapData = [[ 1, Years, 4.54 ],
[ 5, Years, 4.99 ],
[ 10, Years, 5.47 ],
[ 20, Years, 5.89 ],
[ 30, Years, 5.96 ]]
rate_helpers = []
end_of_month = True
for m, period, rate in depositData:
tenor = Period(m, Months)
sq_rate = SimpleQuote(rate/100)
helper = DepositRateHelper(sq_rate,
tenor,
settlement_days,
calendar,
ModifiedFollowing,
end_of_month,
Actual360())
simple_quotes.append(sq_rate)
rate_helpers.append(helper)
liborIndex = Libor('USD Libor', Period(6, Months), settlement_days,
USDCurrency(), calendar, Actual360(),
YieldTermStructure(relinkable=False))
spread = SimpleQuote(0)
fwdStart = Period(0, Days)
for m, period, rate in swapData:
sq_rate = SimpleQuote(rate/100)
helper = SwapRateHelper.from_tenor(
sq_rate, Period(m, Years), calendar, Annual, Unadjusted, Thirty360(), liborIndex,
spread, fwdStart
)
simple_quotes.append(sq_rate)
rate_helpers.append(helper)
ts_day_counter = ActualActual(ISDA)
tolerance = 1.0e-15
ts = PiecewiseYieldCurve(
'discount', 'loglinear', settlement_date, rate_helpers,
ts_day_counter, tolerance)
discounting_term_structure = YieldTermStructure(relinkable=True)
discounting_term_structure.link_to(ts)
pricing_engine = DiscountingBondEngine(discounting_term_structure)
bond.set_pricing_engine(pricing_engine)
self.assertAlmostEqual(bond.npv, 100.83702940160767)
ba = bucket_analysis([simple_quotes], [bond], [1], 0.0001, 1)
self.assertTrue(2, ba)
self.assertTrue(type(tuple), ba)
self.assertEqual(len(simple_quotes), len(ba[0][0]))
self.assertEqual(0, ba[0][0][8])
def test_bucket_analysis_option(self):
settings = Settings()
calendar = TARGET()
todays_date = Date(15, May, 1998)
settlement_date = Date(17, May, 1998)
settings.evaluation_date = todays_date
option_type = Put
underlying = 40
strike = 40
dividend_yield = 0.00
risk_free_rate = 0.001
volatility = 0.20
maturity = Date(17, May, 1999)
daycounter = Actual365Fixed()
underlyingH = SimpleQuote(underlying)
payoff = PlainVanillaPayoff(option_type, strike)
flat_term_structure = FlatForward(
reference_date = settlement_date,
forward = risk_free_rate,
daycounter = daycounter
)
flat_dividend_ts = FlatForward(
reference_date = settlement_date,
forward = dividend_yield,
daycounter = daycounter
)
flat_vol_ts = BlackConstantVol(
settlement_date,
calendar,
volatility,
daycounter
)
black_scholes_merton_process = BlackScholesMertonProcess(
underlyingH,
flat_dividend_ts,
flat_term_structure,
flat_vol_ts
)
european_exercise = EuropeanExercise(maturity)
european_option = VanillaOption(payoff, european_exercise)
analytic_european_engine = AnalyticEuropeanEngine(
black_scholes_merton_process
)
european_option.set_pricing_engine(analytic_european_engine)
ba_eo= bucket_analysis(
[[underlyingH]], [european_option], [1], 0.50, 1)
self.assertTrue(2, ba_eo)
self.assertTrue(type(tuple), ba_eo)
self.assertEqual(1, len(ba_eo[0][0]))
self.assertEqual(-0.4582666150152517, ba_eo[0][0][0])
if __name__ == '__main__':
unittest.main()
| 35.514644 | 122 | 0.615457 | 804 | 8,488 | 6.299751 | 0.297264 | 0.05923 | 0.025271 | 0.014808 | 0.168016 | 0.085884 | 0.04304 | 0.04304 | 0.04304 | 0 | 0 | 0.039611 | 0.309967 | 8,488 | 238 | 123 | 35.663866 | 0.825166 | 0.007658 | 0 | 0.142077 | 0 | 0 | 0.004038 | 0 | 0 | 0 | 0 | 0 | 0.04918 | 1 | 0.010929 | false | 0 | 0.147541 | 0 | 0.163934 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7d364d894789927e0f9a525f39ac2e4267bd239 | 8,012 | py | Python | python/enhancement.py | TrojanXu/4k-bing-ng | a2c015e67191e7f9aa9d662e59ee39bb44f7ef28 | [
"Apache-2.0"
] | 3 | 2020-03-07T11:44:22.000Z | 2020-06-17T00:18:59.000Z | python/enhancement.py | TrojanXu/4k-bing-ng | a2c015e67191e7f9aa9d662e59ee39bb44f7ef28 | [
"Apache-2.0"
] | null | null | null | python/enhancement.py | TrojanXu/4k-bing-ng | a2c015e67191e7f9aa9d662e59ee39bb44f7ef28 | [
"Apache-2.0"
] | null | null | null | import sys
#import tensorflow as tf
sys.path.append("../3rdparty/mmsr/codes/")
import models.archs.RRDBNet_arch as arch
import utils.util as util
import numpy as np
import torch
import onnxruntime as rt
import argparse
import glob
import cv2
from image_content import ImageContent
def adjust_dynamic_range(data, drange_in, drange_out):
if drange_in != drange_out:
scale = (np.float32(drange_out[1]) - np.float32(drange_out[0])) / (np.float32(drange_in[1]) - np.float32(drange_in[0]))
bias = (np.float32(drange_out[0]) - np.float32(drange_in[0]) * scale)
data = data * scale + bias
return data
class Step:
def get_description(self):
return "Step"
pass
class Denoise(Step):
def __init__(self, model_path):
super(Denoise, self).__init__()
self._model_path = model_path
'''
# tensorflow
self._graph = tf.Graph()
self._sess = tf.InteractiveSession(graph = self._graph)
with tf.gfile.GFile(self._model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
# Define input tensor
self._input = tf.placeholder(tf.float32, shape = [None, 3, None, None], name='Inputs/Placeholder')
tf.import_graph_def(graph_def, {'Inputs/Placeholder': self._input})
self._output_tensor = self._graph.get_tensor_by_name("import/noise2clean_1/nin_c/add:0")
'''
self._tile_size = 128
self._sess = rt.InferenceSession(self._model_path, providers=['CPUExecutionProvider'])
sess_opt = self._sess.get_session_options()
self._input = self._sess.get_inputs()[0].name
def get_description(self):
return "denoise"
# assume in_img is of [0,255] and hwc
def execute(self, in_img):
tile_size = self._tile_size
data_type = in_img.data.dtype
data_nchw = in_img.get_nchw_data(tile=[tile_size, tile_size])
data_nchw = adjust_dynamic_range(data_nchw, [0, 255], [0., 1.])
out = self._sess.run(None, {self._input:data_nchw})[0]
out = adjust_dynamic_range(out, [0.,1.], [0, 255])
out = np.rint(out).clip(0, 255).astype(data_type)
in_img.set_nchw_data(out)
'''
in_img_sh = in_img.data.shape
h, w = in_img_sh[0], in_img_sh[1]
num_tile_h, num_tile_w = (h+tile_size-1) // tile_size, (w+tile_size-1)//tile_size
for i in range(num_tile_h):
start_h, end_h = i*tile_size, min(i*tile_size+tile_size, h)
for j in range(num_tile_w):
start_w, end_w = j*tile_size, min(j*tile_size+tile_size, w)
img = in_img.data[start_h:end_h, start_w:end_w, :]
img = np.expand_dims(img.transpose([2,0,1]), axis=0)
img = adjust_dynamic_range(img, [0, 255], [0.0, 1.0])
sh = img.shape[2:]
validation_image_size = [max([x.shape[axis] for x in [img]]) for axis in [2, 3]]
validation_image_size = [(x + 31) // 32 * 32 for x in validation_image_size] # Round up to a multiple of 32.
validation_image_size = [max(validation_image_size) for x in validation_image_size] # Square it up for the rotators.
img = np.pad(img, [[0, 0], [0, 0], [0, validation_image_size[0] - sh[0]], [0, validation_image_size[1] - sh[1]]], 'reflect')
out = self._sess.run(None, {self._input: img})[0]
#out = self._sess.run(self._output_tensor, feed_dict = {self._input: img})
out = out[0, :, :sh[0], :sh[1]].transpose([1,2,0])
out = adjust_dynamic_range(out, [0,1], [0, 255])
out = np.rint(out).clip(0, 255).astype(data_type)
in_img.data[start_h:end_h, start_w:end_w, :] = out
'''
class AddCaption(Step):
def __init__(self):
super(AddCaption, self).__init__()
def execute(self, in_img):
pass
class SuperResolution(Step):
def __init__(self, model_path, scale):
super(SuperResolution, self).__init__()
assert(scale==2 or scale == 4)
self._model_path = model_path
self._description = 'x{}'.format(scale)
def get_description(self):
return self._description
def execute(self, in_img):
data_type = in_img.data.dtype
img = np.transpose(in_img.data.astype(np.float32), [2, 0, 1])
img = np.expand_dims(img, axis=0) # nchw
img = img[:, [2, 1, 0], :, :] / 255.0
if '.onnx' in self._model_path:
pred = self._onnx_infer(img)
else:
pred = self._torch_infer(img)
pred = util.tensor2img(pred)
in_img.data = pred
def _onnx_infer(self, img):
sess = rt.InferenceSession(self._model_path)
sess_opt = sess.get_session_options()
input_name = sess.get_inputs()[0].name
pred_onnx = sess.run(None, {input_name: img})[0]
return torch.from_numpy(pred_onnx)
def _torch_infer(self, img):
if torch.cuda.is_available():
device = torch.device('cuda')
else:
device = torch.device('cpu')
torch_input = torch.from_numpy(img).to(device)
model = arch.RRDBNet(in_nc=3, out_nc=3, nf=64, nb=23,upscale=2)
model_bytes = torch.load(self._model_path)
model.load_state_dict(model_bytes, strict=False)
model.eval()
model = model.to(device)
with torch.no_grad():
pred = model(torch_input)
return pred
'''
def enhance_a_single_image(img, steps):
img = ImageContent(cv2.imread(img_path))
description = ""
for step in steps:
step.execute(img)
description += "_" + step.get_description()
cv2.imwrite(img_path.replace('.png', description+'.png').replace('.jpg', description+'.jpg'), img.data)
'''
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Enhancement of a single image or images")
parser.add_argument('--image-dir', help='Path to image set')
parser.add_argument('--image', help='path to image')
parser.add_argument('--denoise', help='denoise model')
parser.add_argument('--x2', help='x2 model')
parser.add_argument('--x4', help='x4 model')
args = parser.parse_args()
if (args.image_dir is None) == (args.image is None):
print("both --image-dir and --image are set or unset. Please set either one.")
exit(1)
tasks = []
if args.x2 is not None:
steps = [SuperResolution(args.x2, 2)]
if args.denoise is not None:
steps.append(Denoise(args.denoise))
tasks.append(steps)
if args.x4 is not None:
steps = [SuperResolution(args.x4, 4)]
if args.denoise is not None:
steps.append(Denoise(args.denoise))
tasks.append(steps)
if args.x2 is None and args.x4 is None and args.denoise is not None:
tasks.append([Denoise(args.denoise)])
if len(tasks) == 0:
print("No model specified. Please specify at least one model.")
exit(1)
img_list = []
img_path_list = []
if args.image_dir is not None:
img_path_list = glob.glob(args.image_dir+"/*")
else:
img_path_list = [arg.image]
for task in tasks:
img_list = []
for img_path in img_path_list:
if img_path.endswith(".png") or img_path.endswith(".jpg"):
img_list.append(ImageContent(cv2.imread(img_path), img_path=img_path))
for step in task:
for img in img_list:
step.execute(img)
img.suffix += "_" + step.get_description()
del step
for img in img_list:
img.save()
| 36.921659 | 141 | 0.58637 | 1,095 | 8,012 | 4.050228 | 0.189041 | 0.022548 | 0.026381 | 0.011725 | 0.277339 | 0.167531 | 0.104848 | 0.092672 | 0.092672 | 0.076437 | 0 | 0.025892 | 0.28657 | 8,012 | 216 | 142 | 37.092593 | 0.75 | 0.007988 | 0 | 0.209302 | 0 | 0 | 0.065989 | 0.004412 | 0 | 0 | 0 | 0 | 0.007752 | 1 | 0.093023 | false | 0.015504 | 0.077519 | 0.023256 | 0.248062 | 0.015504 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7d45b8619b3234ae6234ce7fa7a556db11c8c3c | 1,621 | py | Python | test_rmmc.py | rafacarrascosa/reed_muller_multiclass | f57f5fa35bd20969bde498b689595188b4739b65 | [
"MIT"
] | null | null | null | test_rmmc.py | rafacarrascosa/reed_muller_multiclass | f57f5fa35bd20969bde498b689595188b4739b65 | [
"MIT"
] | null | null | null | test_rmmc.py | rafacarrascosa/reed_muller_multiclass | f57f5fa35bd20969bde498b689595188b4739b65 | [
"MIT"
] | null | null | null | import pytest
import numpy
from reed_muller_multiclass import reed_muller, ReedMullerCodec
def _cm(s):
s = s.split()
s.sort(reverse=True)
return numpy.array([[int(x) for x in xs] for xs in s])
def test_gm_smallest():
gm = reed_muller(1, 1)
assert (gm == _cm("11 01")).all()
def test_gm_1_3():
# according to https://en.wikipedia.org/wiki/Reed%E2%80%93Muller_code
# but shuffling the columns to read left to right.
correct = """
11111111
01010101
00110011
00001111
"""
gm = reed_muller(1, 3)
assert (gm == _cm(correct)).all()
def test_gm_2_3():
# according to https://en.wikipedia.org/wiki/Reed%E2%80%93Muller_code
# but shuffling the columns to read left to right.
correct = """
11111111
01010101
00110011
00001111
00010001
00000101
00000011
"""
gm = reed_muller(2, 3)
assert (gm == _cm(correct)).all()
def test_gm_shape():
gm = reed_muller(1, 9)
assert gm.shape == (9 + 1, 2 ** 9)
def test_gm_invalid_values():
with pytest.raises(ValueError):
reed_muller(0, 1)
reed_muller(0, 0)
reed_muller(-2, -1)
reed_muller(10, 9)
def test_reed_muller_2_4_back_and_forth():
rm = ReedMullerCodec(2, 4)
for i in range(2 ** 11):
assert i == rm.decode(rm.encode(i))
def test_reed_muller_limit_is_shorter():
a = ReedMullerCodec(2, 5, limit=5).encode(4)
b = ReedMullerCodec(2, 5).encode(4)
assert len(a) < len(b)
def test_reed_muller_limit_raises():
with pytest.raises(ValueError):
ReedMullerCodec(1, 3, limit=5).encode(100)
| 21.905405 | 73 | 0.636644 | 245 | 1,621 | 4.02449 | 0.342857 | 0.131846 | 0.045639 | 0.039554 | 0.375254 | 0.330629 | 0.330629 | 0.330629 | 0.330629 | 0.269777 | 0 | 0.118644 | 0.235657 | 1,621 | 73 | 74 | 22.205479 | 0.677159 | 0.143738 | 0 | 0.313725 | 0 | 0 | 0.114244 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 1 | 0.176471 | false | 0 | 0.058824 | 0 | 0.254902 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7d524497404bc22d398f6bafda7fcf2a05518e2 | 1,571 | py | Python | examples/central_system/standalone/central_system.py | villekr/ocpp-asgi | 032e3843b09c1b6a1c2a1d1accc1bea2b125e397 | [
"MIT"
] | 2 | 2021-10-19T04:54:59.000Z | 2021-12-11T21:57:17.000Z | examples/central_system/standalone/central_system.py | villekr/ocpp-asgi | 032e3843b09c1b6a1c2a1d1accc1bea2b125e397 | [
"MIT"
] | null | null | null | examples/central_system/standalone/central_system.py | villekr/ocpp-asgi | 032e3843b09c1b6a1c2a1d1accc1bea2b125e397 | [
"MIT"
] | 1 | 2021-09-06T10:42:08.000Z | 2021-09-06T10:42:08.000Z | from examples.central_system.routers.v16.provisioning_router import (
router as v16_provisioning_router,
)
from examples.central_system.routers.v201.provisioning_router import (
router as v201_provisioning_router,
)
from ocpp_asgi.app import ASGIApplication, RouterContext, Subprotocol
class CentralSystem(ASGIApplication):
"""Central System is collection of routers."""
async def on_startup(self):
print("(CentralSystem) Startup.")
async def on_shutdown(self):
print("(CentralSystem) Shutdown.")
async def on_connect(self, context: RouterContext) -> bool:
print(
f"(CentralSystem) Charging Station id: {context.charging_station_id} subprotocol: {context.subprotocol} connected." # noqa: E501
)
# You can inspect context.scope["headers"] and perform eg. basic authentication
return True
async def on_disconnect(
self, *, charging_station_id: str, subprotocol: Subprotocol, code: int
):
print(
f"(CentralSystem) Charging Station id: {charging_station_id} subprotocol: {subprotocol} disconnected. Reason code: {code}" # noqa: E501
)
if __name__ == "__main__":
import uvicorn
central_system = CentralSystem()
central_system.include_router(v16_provisioning_router)
central_system.include_router(v201_provisioning_router)
subprotocols = f"{Subprotocol.ocpp201}, {Subprotocol.ocpp16}"
headers = [("Sec-WebSocket-Protocol", subprotocols)]
uvicorn.run(central_system, host="0.0.0.0", port=9000, headers=headers)
| 36.534884 | 148 | 0.716741 | 174 | 1,571 | 6.258621 | 0.41954 | 0.083563 | 0.078053 | 0.045914 | 0.183655 | 0.066116 | 0 | 0 | 0 | 0 | 0 | 0.026542 | 0.184596 | 1,571 | 42 | 149 | 37.404762 | 0.823575 | 0.089752 | 0 | 0.064516 | 0 | 0.064516 | 0.253165 | 0.080872 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.129032 | 0 | 0.193548 | 0.129032 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7d6dfc31aefe9661b943152eec0c5066e929e97 | 2,452 | py | Python | hazard_detection/hazard_detection.py | Vlad-Mocanu/hazard_detection | 6c3426847e90846347b7eb0f538b2c0854093b14 | [
"Apache-2.0"
] | 1 | 2018-03-10T11:02:25.000Z | 2018-03-10T11:02:25.000Z | hazard_detection/hazard_detection.py | Vlad-Mocanu/hazard_detection | 6c3426847e90846347b7eb0f538b2c0854093b14 | [
"Apache-2.0"
] | null | null | null | hazard_detection/hazard_detection.py | Vlad-Mocanu/hazard_detection | 6c3426847e90846347b7eb0f538b2c0854093b14 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import argparse
import json
import pyaudio
import time
import logging
import RPi.GPIO as GPIO
import datetime
import sched
import threading
import mail_functions
import sound_detection_functions
import water_detection_functions
# read configuration
parser = argparse.ArgumentParser()
parser.add_argument("--config_file", "-f", default="hazard_config.json", help="path to configuration json file (default: hazard_config.json)")
args = parser.parse_args()
with open(args.config_file) as data_file:
config_options = json.load(data_file)
data_file.close()
# configure loggers
handlers = [logging.FileHandler(config_options["logging"]["log_file"]), logging.StreamHandler()]
logging.basicConfig(level = config_options["logging"]["level"], handlers = handlers, format = "[%(asctime)-15s] %(message)s")
# schedule the next status report
s = sched.scheduler(time.time, time.sleep)
status_thread = threading.Thread(target = mail_functions.schedule_next_status, args = (1, s, logging, config_options))
status_thread.start()
# add callback for water detection - each time the pin changes it will trigger the callback and send mail
def callback(channel):
water_detection_functions.get_flood_status(GPIO.input(channel), "", logging, config_options)
# water #######################
if config_options["water_detection"]["enable_function"]:
logging.info("Water detection function enabled")
GPIO.setmode(GPIO.BCM)
GPIO.setup(config_options["water_detection"]["channel"], GPIO.IN)
GPIO.add_event_detect(config_options["water_detection"]["channel"], GPIO.BOTH, callback, bouncetime=config_options["water_detection"]["bouncetime"])
else:
logging.info("Water detection function not enabled (see config)")
# sound #######################
if config_options["sound_detection"]["enable_function"]:
# initialize sound detection and determine the correct hardware used
logging.info("Sound detection function enabled")
logging.info("Initialize PyAudio...")
p = pyaudio.PyAudio()
record_device_index = sound_detection_functions.get_recording_device(p, logging, config_options)
# sound detection
while True:
sound_detection_functions.listen_until_sound_on(p, record_device_index, logging, config_options)
sound_detection_functions.listen_until_sound_off(p, record_device_index, logging, config_options)
else:
logging.info("Sound detection function not enabled (see config)")
| 37.723077 | 152 | 0.761827 | 311 | 2,452 | 5.794212 | 0.363344 | 0.093785 | 0.055494 | 0.059933 | 0.240844 | 0.167592 | 0.042175 | 0 | 0 | 0 | 0 | 0.001843 | 0.115008 | 2,452 | 64 | 153 | 38.3125 | 0.828571 | 0.117047 | 0 | 0.047619 | 0 | 0 | 0.218587 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0.285714 | 0 | 0.309524 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7d8a570d3e7dd430f0bc29d40e637477b207598 | 6,056 | py | Python | tests/unit/boot/configurators/test_jobqueue.py | sonali-pitre/ignition | 03c4bc7eac53159a1c7dbe2519eb2e366bd82304 | [
"Apache-2.0"
] | 1 | 2019-09-02T15:23:08.000Z | 2019-09-02T15:23:08.000Z | tests/unit/boot/configurators/test_jobqueue.py | sonali-pitre/ignition | 03c4bc7eac53159a1c7dbe2519eb2e366bd82304 | [
"Apache-2.0"
] | 62 | 2019-09-16T14:51:32.000Z | 2020-07-08T13:28:50.000Z | tests/unit/boot/configurators/test_jobqueue.py | sonali-pitre/ignition | 03c4bc7eac53159a1c7dbe2519eb2e366bd82304 | [
"Apache-2.0"
] | 4 | 2021-08-17T14:38:54.000Z | 2022-02-09T14:33:57.000Z | from .utils import ConfiguratorTestCase
from unittest.mock import MagicMock, patch
from ignition.boot.config import BootstrapApplicationConfiguration, BootProperties
from ignition.boot.configurators.jobqueue import JobQueueConfigurator
from ignition.service.queue import JobQueueCapability, MessagingJobQueueService, JobQueueProperties
from ignition.service.messaging import MessagingProperties, TopicsProperties, PostalCapability, InboxCapability
from ignition.service.framework import ServiceRegistration
class TestJobQueueConfigurator(ConfiguratorTestCase):
def __bootstrap_config(self):
configuration = BootstrapApplicationConfiguration()
configuration.app_name = 'TestJobQueueConfigurator'
boot_config = BootProperties()
configuration.property_groups.add_property_group(boot_config)
messaging_conf = MessagingProperties()
messaging_conf.connection_address = 'testaddr'
configuration.property_groups.add_property_group(messaging_conf)
job_queue_conf = JobQueueProperties()
configuration.property_groups.add_property_group(job_queue_conf)
return configuration
def test_configure_nothing_when_disabled(self):
configuration = self.__bootstrap_config()
configuration.property_groups.get_property_group(BootProperties).job_queue.service_enabled = False
JobQueueConfigurator().configure(configuration, self.mock_service_register)
self.mock_service_register.add_service.assert_not_called()
@patch('ignition.boot.configurators.jobqueue.TopicCreator')
def test_configure(self, mock_topic_creator_init):
configuration = self.__bootstrap_config()
configuration.property_groups.get_property_group(BootProperties).job_queue.service_enabled = True
self.mock_service_register.get_service_offering_capability.return_value = None
JobQueueConfigurator().configure(configuration, self.mock_service_register)
registered_service = self.assert_single_service_registered()
self.assert_service_registration_equal(registered_service, ServiceRegistration(
MessagingJobQueueService, job_queue_config=JobQueueProperties, postal_service=PostalCapability, inbox_service=InboxCapability, topics_config=TopicsProperties, messaging_config=MessagingProperties))
def test_configure_service_fails_when_already_registered(self):
configuration = self.__bootstrap_config()
configuration.property_groups.get_property_group(BootProperties).job_queue.service_enabled = True
self.mock_service_register.get_service_offering_capability.return_value = MagicMock()
with self.assertRaises(ValueError) as context:
JobQueueConfigurator().configure(configuration, self.mock_service_register)
self.assertEqual(str(context.exception), 'An existing service has been registered to serve the Job Queue capability but bootstrap.job_queue.service_enabled has not been disabled')
def test_configure_fails_when_messaging_connection_address_not_set(self):
configuration = self.__bootstrap_config()
configuration.property_groups.get_property_group(BootProperties).job_queue.service_enabled = True
configuration.property_groups.get_property_group(MessagingProperties).connection_address = None
self.mock_service_register.get_service_offering_capability.return_value = None
with self.assertRaises(ValueError) as context:
JobQueueConfigurator().configure(configuration, self.mock_service_register)
self.assertEqual(str(context.exception), 'messaging.connection_address must be set when bootstrap.job_queue.service_enabled is True')
@patch('ignition.boot.configurators.jobqueue.TopicCreator')
def test_configure_creates_job_queue_topic_name_when_not_set(self, mock_topic_creator_init):
configuration = self.__bootstrap_config()
configuration.property_groups.get_property_group(BootProperties).job_queue.service_enabled = True
self.mock_service_register.get_service_offering_capability.return_value = None
JobQueueConfigurator().configure(configuration, self.mock_service_register)
self.assertEqual(configuration.property_groups.get_property_group(MessagingProperties).topics.job_queue.name, 'TestJobQueueConfigurator_job_queue')
@patch('ignition.boot.configurators.jobqueue.TopicCreator')
def test_configure_creates_job_queue_topic_with_special_chars_removed(self, mock_topic_creator_init):
configuration = self.__bootstrap_config()
configuration.app_name = 'Testing Spaces And Special !"£$%^&*()+={}[]:;@~#<>?,./¬ Chars'
configuration.property_groups.get_property_group(BootProperties).job_queue.service_enabled = True
self.mock_service_register.get_service_offering_capability.return_value = None
JobQueueConfigurator().configure(configuration, self.mock_service_register)
self.assertEqual(configuration.property_groups.get_property_group(MessagingProperties).topics.job_queue.name, 'Testing_Spaces_And_Special_Chars_job_queue')
@patch('ignition.boot.configurators.jobqueue.TopicCreator')
def test_configure_creates_job_queue_topic_if_needed(self, mock_topic_creator_init):
configuration = self.__bootstrap_config()
configuration.property_groups.get_property_group(BootProperties).job_queue.service_enabled = True
configuration.property_groups.get_property_group(MessagingProperties).topics.job_queue.auto_create = True
self.mock_service_register.get_service_offering_capability.return_value = None
JobQueueConfigurator().configure(configuration, self.mock_service_register)
mock_topic_creator_init.assert_called_once()
messaging_properties = MessagingProperties()
messaging_properties.connection_address = 'testaddr'
mock_topic_creator_init.return_value.create_topic_if_needed.assert_called_once_with(messaging_properties, configuration.property_groups.get_property_group(MessagingProperties).topics.job_queue)
| 70.418605 | 209 | 0.808785 | 643 | 6,056 | 7.219285 | 0.172628 | 0.037915 | 0.087247 | 0.069367 | 0.626454 | 0.602327 | 0.574537 | 0.574537 | 0.559673 | 0.545455 | 0 | 0 | 0.121697 | 6,056 | 85 | 210 | 71.247059 | 0.872344 | 0 | 0 | 0.418919 | 0 | 0.013514 | 0.098761 | 0.069694 | 0 | 0 | 0 | 0 | 0.148649 | 1 | 0.108108 | false | 0 | 0.094595 | 0 | 0.22973 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7d98dd1bec8cc8e309b9de3fe27edbbe269c2b1 | 1,048 | py | Python | setuptools_git.py | onepercentclub/django-salesforce | 24fb86185276f7af34d8b5fbd32c819f1e15b419 | [
"MIT"
] | null | null | null | setuptools_git.py | onepercentclub/django-salesforce | 24fb86185276f7af34d8b5fbd32c819f1e15b419 | [
"MIT"
] | null | null | null | setuptools_git.py | onepercentclub/django-salesforce | 24fb86185276f7af34d8b5fbd32c819f1e15b419 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
A hook into setuptools for Git.
"""
import locale
import os
from subprocess import Popen, PIPE
import sys
if sys.version_info[0] >= 3:
def u(s, encoding):
if not isinstance(s, str):
s = s.decode(encoding)
return s
else:
def u(s, encoding):
return s
def gitlsfiles(dirname=""):
try:
p = Popen(['git', 'ls-files', dirname], stdout=PIPE, stderr=PIPE)
p.stderr.close()
files = p.stdout.readlines()
except:
# Something went terribly wrong but the setuptools doc says we
# must be strong in the face of danger. We shall not run away
# in panic.
return []
if p.wait():
# git chocked
return []
encoding = locale.getpreferredencoding()
return [u(f.strip(), encoding) for f in files]
if __name__ == "__main__":
import sys
from pprint import pprint
if len(sys.argv) != 2:
print("USAGE: %s DIRNAME" % sys.argv[0])
sys.exit(1)
pprint(gitlsfiles(sys.argv[1]))
| 22.782609 | 73 | 0.591603 | 144 | 1,048 | 4.243056 | 0.548611 | 0.03437 | 0.016367 | 0.042553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008043 | 0.288168 | 1,048 | 45 | 74 | 23.288889 | 0.810992 | 0.187023 | 0 | 0.266667 | 0 | 0 | 0.042857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.2 | 0.033333 | 0.466667 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7dd74b2264a3a3f6149373f76229db2588e6bd0 | 1,108 | py | Python | aoc/day04/board.py | hron/advent-of-code-2021 | 6be8cfb88595d35a7865f8faf734f8efd9c28543 | [
"MIT"
] | null | null | null | aoc/day04/board.py | hron/advent-of-code-2021 | 6be8cfb88595d35a7865f8faf734f8efd9c28543 | [
"MIT"
] | null | null | null | aoc/day04/board.py | hron/advent-of-code-2021 | 6be8cfb88595d35a7865f8faf734f8efd9c28543 | [
"MIT"
] | null | null | null | from aoc.day04.cell import Cell
class Board:
def __init__(self, raw_board: list[str]):
self.board_state = [
[Cell(int(d)) for d in r.split()]
for r in raw_board
]
def mark(self, number: int):
for row in self.board_state:
for cell in row:
if cell.value == number:
cell.mark()
def rows(self):
return self.board_state
def columns(self):
clmns = []
for c in range(len(self.board_state[0])):
clmns.append([])
for r in range(len(self.board_state[0])):
clmns[-1].append(self.board_state[r][c])
return clmns
def detect_winning_position(self):
for row in self.rows():
for cell in row:
if not cell.is_marked():
break
else:
return row
for column in self.columns():
for cell in column:
if not cell.is_marked():
break
else:
return column
return None
| 25.767442 | 56 | 0.486462 | 136 | 1,108 | 3.845588 | 0.330882 | 0.10325 | 0.160612 | 0.045889 | 0.286807 | 0.237094 | 0.237094 | 0.237094 | 0 | 0 | 0 | 0.007788 | 0.420578 | 1,108 | 42 | 57 | 26.380952 | 0.806854 | 0 | 0 | 0.228571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.028571 | 0.028571 | 0.342857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7e04ae0e11bff76e1cb837bbae9d685acd842d1 | 2,896 | py | Python | tools/perf/page_sets/calendar_forward_backward.py | justremotephone/android_external_chromium_org | 246856e61da7acf5494076c74198f2aea894a721 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2020-01-25T10:18:18.000Z | 2021-01-23T15:29:56.000Z | tools/perf/page_sets/calendar_forward_backward.py | justremotephone/android_external_chromium_org | 246856e61da7acf5494076c74198f2aea894a721 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | tools/perf/page_sets/calendar_forward_backward.py | justremotephone/android_external_chromium_org | 246856e61da7acf5494076c74198f2aea894a721 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2020-11-04T07:24:13.000Z | 2020-11-04T07:24:13.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class CalendarForwardBackwardPage(page_module.Page):
""" Why: Click forward(4x) and backwards(4x) repeatedly """
def __init__(self, page_set):
super(CalendarForwardBackwardPage, self).__init__(
url='https://www.google.com/calendar/',
page_set=page_set,
name='calendar_forward_backward')
self.credentials_path = 'data/credentials.json'
self.credentials = 'google'
self.user_agent_type = 'desktop'
self.archive_data_file = 'data/calendar_forward_backward.json'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.Wait(2)
action_runner.WaitForElement('div[class~="navForward"]')
action_runner.ExecuteJavaScript('''
(function() {
var elem = document.createElement('meta');
elem.name='viewport';
elem.content='initial-scale=1';
document.body.appendChild(elem);
})();''')
def RunEndure(self, action_runner):
action_runner.ClickElement('div[class~="navForward"]')
action_runner.Wait(2)
action_runner.WaitForElement('div[class~="navForward"]')
action_runner.ClickElement('div[class~="navForward"]')
action_runner.Wait(2)
action_runner.WaitForElement('div[class~="navForward"]')
action_runner.ClickElement('div[class~="navForward"]')
action_runner.Wait(2)
action_runner.WaitForElement('div[class~="navForward"]')
action_runner.ClickElement('div[class~="navForward"]')
action_runner.Wait(2)
action_runner.WaitForElement('div[class~="navBack"]')
action_runner.ClickElement('div[class~="navBack"]')
action_runner.Wait(2)
action_runner.WaitForElement('div[class~="navBack"]')
action_runner.ClickElement('div[class~="navBack"]')
action_runner.Wait(2)
action_runner.WaitForElement('div[class~="navBack"]')
action_runner.ClickElement('div[class~="navBack"]')
action_runner.Wait(2)
action_runner.WaitForElement('div[class~="navBack"]')
action_runner.ClickElement('div[class~="navBack"]')
action_runner.Wait(2)
action_runner.WaitForElement('div[class~="navForward"]')
class CalendarForwardBackwardPageSet(page_set_module.PageSet):
""" Chrome Endure test for Google Calendar. """
def __init__(self):
super(CalendarForwardBackwardPageSet, self).__init__(
credentials_path='data/credentials.json',
user_agent_type='desktop',
archive_data_file='data/calendar_forward_backward.json',
bucket=page_set_module.PUBLIC_BUCKET)
self.AddPage(CalendarForwardBackwardPage(self))
| 38.613333 | 72 | 0.726865 | 336 | 2,896 | 6.035714 | 0.306548 | 0.177515 | 0.071006 | 0.075444 | 0.557692 | 0.475838 | 0.475838 | 0.475838 | 0.430473 | 0.430473 | 0 | 0.009627 | 0.139157 | 2,896 | 74 | 73 | 39.135135 | 0.803851 | 0.095994 | 0 | 0.45614 | 0 | 0 | 0.299385 | 0.244427 | 0 | 0 | 0 | 0 | 0 | 1 | 0.070175 | false | 0 | 0.052632 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7e1af61c6d88f3c8ebda38462eb9bed7e0209ef | 3,662 | py | Python | src/wa_kat/analyzers/language_detector.py | WebArchivCZ/WA-KAT | 719f7607222f5a4d917c535b2da6371184222101 | [
"MIT"
] | 3 | 2017-03-23T12:59:21.000Z | 2017-11-22T08:23:14.000Z | src/wa_kat/analyzers/language_detector.py | WebArchivCZ/WA-KAT | 719f7607222f5a4d917c535b2da6371184222101 | [
"MIT"
] | 89 | 2015-06-28T22:10:28.000Z | 2017-01-30T16:06:05.000Z | src/wa_kat/analyzers/language_detector.py | WebarchivCZ/WA-KAT | 719f7607222f5a4d917c535b2da6371184222101 | [
"MIT"
] | 1 | 2015-12-17T02:56:59.000Z | 2015-12-17T02:56:59.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
"""
Parse and guess information about language of the resource. Normalize the
language tags to ISO 639-2 format.
"""
#
# Imports =====================================================================
import langdetect
import dhtmlparser
from .shared import parse_meta
from .source_string import SourceString
from ..convertors.iso_codes import normalize
# Functions & classes =========================================================
def get_html_lang_tags(index_page):
"""
Return `languages` stored in ``<meta>`` tags.
``<meta http-equiv="Content-language" content="cs">`` -> ``cs``
Args:
index_page (str): HTML content of the page you wish to analyze.
Returns:
list: List of :class:`.SourceString` objects.
"""
dom = dhtmlparser.parseString(index_page)
lang_tag = "content-language"
lang_tags = dom.find(
"meta",
fn=lambda x: x.params.get("http-equiv", "").lower() == lang_tag
)
return [
SourceString(tag.params["content"], "HTML")
for tag in lang_tags
if "content" in tag.params
]
def get_html_tag_lang_params(index_page):
"""
Parse lang and xml:lang parameters in the ``<html>`` tag.
See
https://www.w3.org/International/questions/qa-html-language-declarations
for details.
Args:
index_page (str): HTML content of the page you wisht to analyze.
Returns:
list: List of :class:`.SourceString` objects.
"""
dom = dhtmlparser.parseString(index_page)
html_tag = dom.find("html")
if not html_tag:
return []
html_tag = html_tag[0]
# parse parameters
lang = html_tag.params.get("lang")
xml_lang = html_tag.params.get("xml:lang")
if lang and lang == xml_lang:
return [SourceString(lang, source="<html> tag")]
out = []
if lang:
out.append(SourceString(lang, source="<html lang=..>"))
if xml_lang:
out.append(SourceString(xml_lang, source="<html xml:lang=..>"))
return out
def get_dc_lang_tags(index_page):
"""
Return `languages` stored in dublin core ``<meta>`` tags.
Args:
index_page (str): HTML content of the page you wish to analyze.
Returns:
list: List of :class:`.SourceString` objects.
"""
return parse_meta(index_page, "dc.language", "DC")
def detect_language(index_page):
"""
Detect `languages` using `langdetect` library.
Args:
index_page (str): HTML content of the page you wish to analyze.
Returns:
obj: One :class:`.SourceString` object.
"""
dom = dhtmlparser.parseString(index_page)
clean_content = dhtmlparser.removeTags(dom)
lang = None
try:
lang = langdetect.detect(clean_content)
except UnicodeDecodeError:
lang = langdetect.detect(clean_content.decode("utf-8"))
return SourceString(
lang,
source="langdetect"
)
def get_lang_tags(index_page):
"""
Collect informations about language of the page from HTML and Dublin core
tags and langdetect guesses.
Args:
index_page (str): HTML content of the page you wish to analyze.
Returns:
list: List of :class:`.SourceString` objects.
"""
dom = dhtmlparser.parseString(index_page)
lang_tags = [
get_html_lang_tags(dom),
get_dc_lang_tags(dom),
[detect_language(dom)],
get_html_tag_lang_params(dom),
]
return list(sorted(set(
SourceString(normalize(lang), source=lang.source)
for lang in sum(lang_tags, [])
)))
| 23.934641 | 79 | 0.615784 | 447 | 3,662 | 4.90604 | 0.252796 | 0.06156 | 0.024624 | 0.03648 | 0.362517 | 0.28135 | 0.28135 | 0.28135 | 0.24487 | 0.24487 | 0 | 0.003575 | 0.23621 | 3,662 | 152 | 80 | 24.092105 | 0.780479 | 0.408793 | 0 | 0.067797 | 0 | 0 | 0.067882 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.084746 | false | 0 | 0.084746 | 0 | 0.288136 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7e25c4592a79a5289131716eea200d53ce0a9d0 | 18,951 | py | Python | tungsten_tempest_plugin/tests/api/contrail/test_load_balancer.py | Goutham-Pratapa/tungsten-tempest | 966a2f2795435314c91e0d236040412d95fa2e96 | [
"Apache-2.0"
] | null | null | null | tungsten_tempest_plugin/tests/api/contrail/test_load_balancer.py | Goutham-Pratapa/tungsten-tempest | 966a2f2795435314c91e0d236040412d95fa2e96 | [
"Apache-2.0"
] | null | null | null | tungsten_tempest_plugin/tests/api/contrail/test_load_balancer.py | Goutham-Pratapa/tungsten-tempest | 966a2f2795435314c91e0d236040412d95fa2e96 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tempest test-case to test load balancer objects using RBAC roles
"""
from oslo_log import log as logging
from patrole_tempest_plugin import rbac_rule_validation
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest import test
from tungsten_tempest_plugin.tests.api.contrail import rbac_base
CONF = config.CONF
LOG = logging.getLogger(__name__)
class BaseLoadBalancerTest(rbac_base.BaseContrailTest):
"""Base class to test load balancer objects using RBAC roles"""
@classmethod
def skip_checks(cls):
super(BaseLoadBalancerTest, cls).skip_checks()
if not test.is_extension_enabled('lbaas', 'network'):
raise cls.skipException(
'%s skipped - lbaas extension not enabled' % cls.__name__)
def _create_load_balancer(self):
fq_name = data_utils.rand_name('load-balancer')
post_body = {
'parent_type': 'project',
'fq_name': ['default-domain', self.tenant_name, fq_name]
}
resp_body = self.load_balancer_client.create_load_balancers(
**post_body)
lb_uuid = resp_body['loadbalancer']['uuid']
self.addCleanup(self._try_delete_resource,
self.load_balancer_client.delete_load_balancer,
lb_uuid)
return lb_uuid
def _update_load_balancer(self, lb_uuid):
put_body = {
'display_name': data_utils.rand_name('load-balancer')
}
self.load_balancer_client.update_load_balancer(lb_uuid, **put_body)
def _create_load_balancer_health_monitor(self):
fq_name = data_utils.rand_name('load_balancer-health-monitor')
post_body = {
'parent_type': 'project',
'fq_name': ['default-domain', self.tenant_name, fq_name],
'loadbalancer_healthmonitor_properties': {
'monitor_type': 'PING',
'delay': 10,
'timeout': 60,
'max_retries': 3
}
}
resp_body = self.load_balancer_client \
.create_lb_healthmonitors(**post_body)
lb_hm_uuid = resp_body['loadbalancer-healthmonitor']['uuid']
self.addCleanup(self._try_delete_resource,
self.load_balancer_client
.delete_lb_healthmonitor,
lb_hm_uuid)
return lb_hm_uuid
def _update_load_balancer_health_monitor(self, lb_hm_uuid):
display_name = data_utils.rand_name('load_balancer-health-monitor')
put_body = {
'display_name': display_name
}
self.load_balancer_client.update_lb_healthmonitor(
lb_hm_uuid,
**put_body)
def _create_load_balancer_listener(self):
fq_name = data_utils.rand_name('load_balancer-listener')
post_body = {
'parent_type': 'project',
'fq_name': ['default-domain', self.tenant_name, fq_name]
}
resp_body = self.load_balancer_client.create_load_balancer_listeners(
**post_body)
lb_listener_uuid = resp_body['loadbalancer-listener']['uuid']
self.addCleanup(self._try_delete_resource,
self.load_balancer_client
.delete_load_balancer_listener,
lb_listener_uuid)
return lb_listener_uuid
def _update_load_balancer_listener(self, lb_listener_uuid):
put_body = {
'display_name': data_utils.rand_name('load_balancer-listener')
}
self.load_balancer_client.update_load_balancer_listener(
lb_listener_uuid,
**put_body)
def _create_load_balancer_pool(self, return_object=False):
fq_name = data_utils.rand_name('load_balancer-pool')
post_body = {
'parent_type': 'project',
'fq_name': ['default-domain', self.tenant_name, fq_name]
}
resp_body = self.load_balancer_client.create_load_balancer_pools(
**post_body)
lb_pool_uuid = resp_body['loadbalancer-pool']['uuid']
self.addCleanup(self._try_delete_resource,
self.load_balancer_client.delete_load_balancer_pool,
lb_pool_uuid)
if return_object:
return resp_body['loadbalancer-pool']
return lb_pool_uuid
def _update_load_balancer_pool(self, lb_pool_uuid):
put_body = {
'display_name': data_utils.rand_name('load_balancer-pool')
}
self.load_balancer_client.update_load_balancer_pool(lb_pool_uuid,
**put_body)
def _create_load_balancer_member(self):
lb_pool = self._create_load_balancer_pool(return_object=True)
fq_name = data_utils.rand_name('load_balancer-member')
post_body = {
'parent_type': 'loadbalancer-pool',
'fq_name': ['default-domain', self.tenant_name, lb_pool['name'],
fq_name]
}
resp_body = self.load_balancer_client.create_load_balancer_members(
**post_body)
lb_member_uuid = resp_body['loadbalancer-member']['uuid']
self.addCleanup(self._try_delete_resource,
self._delete_pool_and_member,
lb_pool['uuid'],
lb_member_uuid)
return lb_member_uuid
def _update_load_balancer_member(self, lb_member_uuid):
put_body = {
'display_name': data_utils.rand_name('load_balancer-member')
}
self.load_balancer_client.update_load_balancer_member(lb_member_uuid,
**put_body)
def _delete_pool_and_member(self, lb_pool_uuid, lb_member_uuid):
# Used by _try_delete_resource in _create_load_balancer_member.
# Guarantees that child (lb member) is deleted before parent
# dependency (lb pool).
self.load_balancer_client.delete_load_balancer_member(lb_member_uuid)
self.load_balancer_client.delete_load_balancer_pool(lb_pool_uuid)
class LoadBalancerContrailTest(BaseLoadBalancerTest):
"""Test class to test load balancer objects using RBAC roles"""
@rbac_rule_validation.action(service="Contrail",
rules=["list_load_balancers"])
@decorators.idempotent_id('5d840b6b-3974-4945-916f-dd53ba27e42f')
def test_list_load_balancers(self):
"""test method for list load balancer objects"""
with self.rbac_utils.override_role(self):
self.load_balancer_client.list_load_balancers()
@rbac_rule_validation.action(service="Contrail",
rules=["create_load_balancers"])
@decorators.idempotent_id('6a18d506-0794-4eb9-a945-165bf146005d')
def test_create_load_balancers(self):
"""test method for create load balancer objects"""
with self.rbac_utils.override_role(self):
self._create_load_balancer()
@rbac_rule_validation.action(service="Contrail",
rules=["show_load_balancer"])
@decorators.idempotent_id('428012aa-cd0e-4702-89d2-459046d4bd5f')
def test_show_load_balancer(self):
"""test method for show load balancer objects"""
lb_uuid = self._create_load_balancer()
with self.rbac_utils.override_role(self):
self.load_balancer_client.show_load_balancer(lb_uuid)
@rbac_rule_validation.action(service="Contrail",
rules=["update_load_balancer"])
@decorators.idempotent_id('7cd3d7b2-b149-40c1-a801-a6a8a660bd24')
def test_update_load_balancer(self):
"""test method for update load balancer objects"""
lb_uuid = self._create_load_balancer()
with self.rbac_utils.override_role(self):
self._update_load_balancer(lb_uuid)
@rbac_rule_validation.action(service="Contrail",
rules=["delete_load_balancer"])
@decorators.idempotent_id('b28c6b11-d1b0-45d0-8942-638b6b590702')
def test_delete_load_balancer(self):
"""test method for delete load balancer objects"""
lb_uuid = self._create_load_balancer()
with self.rbac_utils.override_role(self):
self.load_balancer_client.delete_load_balancer(lb_uuid)
class LoadBalancerHealthMonitorContrailTest(BaseLoadBalancerTest):
"""Test class to test load balancer Health Monitor objects using RBAC roles
"""
@rbac_rule_validation.action(service="Contrail",
rules=["list_load_balancer_health_monitors"])
@decorators.idempotent_id('3e3d8bdc-3621-4c5e-8130-1187f445a4e6')
def test_list_lb_health_monitors(self):
"""test method for list load balancer health monitor objects"""
with self.rbac_utils.override_role(self):
self.load_balancer_client.list_lb_healthmonitors()
@rbac_rule_validation.action(service="Contrail",
rules=["create_load_balancer_health_monitors"]
)
@decorators.idempotent_id('bddb93ad-d331-4bbc-bac6-2763cae4eb2c')
def test_create_lb_health_monitors(self):
"""test method for create load balancer health monitor objects"""
with self.rbac_utils.override_role(self):
self._create_load_balancer_health_monitor()
@rbac_rule_validation.action(service="Contrail",
rules=["show_load_balancer_health_monitor"])
@decorators.idempotent_id('30d23994-1e3a-4a76-8f18-e00d0854412a')
def test_show_lb_health_monitor(self):
"""test method for show load balancer health monitor objects"""
lb_hm_uuid = self._create_load_balancer_health_monitor()
with self.rbac_utils.override_role(self):
self.load_balancer_client.show_lb_healthmonitor(
lb_hm_uuid)
@rbac_rule_validation.action(service="Contrail",
rules=["update_load_balancer_health_monitor"])
@decorators.idempotent_id('c32ba92c-3a69-4255-867a-1423c93faa6f')
def test_update_lb_health_monitor(self):
"""test method for update load balancer health monitor objects"""
lb_hm_uuid = self._create_load_balancer_health_monitor()
with self.rbac_utils.override_role(self):
self._update_load_balancer_health_monitor(lb_hm_uuid)
@rbac_rule_validation.action(service="Contrail",
rules=["delete_load_balancer_health_monitor"])
@decorators.idempotent_id('b4d7ea9d-fd8c-433b-96fc-c24866b3f6a7')
def test_delete_lb_health_monitor(self):
"""test method for delete load balancer health monitor objects"""
lb_hm_uuid = self._create_load_balancer_health_monitor()
with self.rbac_utils.override_role(self):
self.load_balancer_client.delete_lb_healthmonitor(
lb_hm_uuid)
class LoadBalancerListenerContrailTest(BaseLoadBalancerTest):
"""Base class to test load balancer Listener objects using RBAC roles"""
@rbac_rule_validation.action(service="Contrail",
rules=["list_load_balancer_listeners"])
@decorators.idempotent_id('7e02882f-0eab-41c2-b48a-bf71e083b912')
def test_list_lb_listeners(self):
"""test method for list load balancer listener objects"""
with self.rbac_utils.override_role(self):
self.load_balancer_client.list_load_balancer_listeners()
@rbac_rule_validation.action(service="Contrail",
rules=["create_load_balancer_listeners"])
@decorators.idempotent_id('0551de87-fa4c-463f-8968-ec6f2a6098d0')
def test_create_lb_listeners(self):
"""test method for create load balancer listener objects"""
with self.rbac_utils.override_role(self):
self._create_load_balancer_listener()
@rbac_rule_validation.action(service="Contrail",
rules=["show_load_balancer_listener"])
@decorators.idempotent_id('ade38959-9506-4262-8d3c-5ba5eb63d85f')
def test_show_lb_listener(self):
"""test method for show load balancer listener objects"""
lb_listener_uuid = self._create_load_balancer_listener()
with self.rbac_utils.override_role(self):
self.load_balancer_client.show_load_balancer_listener(
lb_listener_uuid)
@rbac_rule_validation.action(service="Contrail",
rules=["update_load_balancer_listener"])
@decorators.idempotent_id('e529e538-da31-4159-91c2-6c0a828282a4')
def test_update_lb_listener(self):
"""test method for update load balancer listener objects"""
lb_listener_uuid = self._create_load_balancer_listener()
with self.rbac_utils.override_role(self):
self._update_load_balancer_listener(lb_listener_uuid)
@rbac_rule_validation.action(service="Contrail",
rules=["delete_load_balancer_listener"])
@decorators.idempotent_id('feaf3e9a-ffd1-4327-ad7a-35f9e9e4989b')
def test_delete_lb_listener(self):
"""test method for delete load balancer listener objects"""
lb_listener_uuid = self._create_load_balancer_listener()
with self.rbac_utils.override_role(self):
self.load_balancer_client.delete_load_balancer_listener(
lb_listener_uuid)
class LoadBalancerPoolContrailTest(BaseLoadBalancerTest):
"""Base class to test load balancer Pool objects using RBAC roles"""
@rbac_rule_validation.action(service="Contrail",
rules=["list_load_balancer_pools"])
@decorators.idempotent_id('3d177a9e-7067-4e9e-b4e8-0acc5887dff0')
def test_list_load_balancer_pools(self):
"""test method for list load balancer pool objects"""
with self.rbac_utils.override_role(self):
self.load_balancer_client.list_load_balancer_pools()
@rbac_rule_validation.action(service="Contrail",
rules=["create_load_balancer_pools"])
@decorators.idempotent_id('a52c6ec7-a996-4191-9a70-7879a211a711')
def test_create_load_balancer_pools(self):
"""test method for create load balancer pool objects"""
with self.rbac_utils.override_role(self):
self._create_load_balancer_pool()
@rbac_rule_validation.action(service="Contrail",
rules=["show_load_balancer_pool"])
@decorators.idempotent_id('7923da4e-53b1-4024-9a40-5bc91cee8e2d')
def test_show_load_balancer_pool(self):
"""test method for show load balancer pool objects"""
lb_pool_uuid = self._create_load_balancer_pool()
with self.rbac_utils.override_role(self):
self.load_balancer_client.show_load_balancer_pool(lb_pool_uuid)
@rbac_rule_validation.action(service="Contrail",
rules=["update_load_balancer_pool"])
@decorators.idempotent_id('391c0c5e-c218-4c98-9b58-6d2724ec4c20')
def test_update_load_balancer_pool(self):
"""test method for update load balancer pool objects"""
lb_pool_uuid = self._create_load_balancer_pool()
with self.rbac_utils.override_role(self):
self._update_load_balancer_pool(lb_pool_uuid)
@rbac_rule_validation.action(service="Contrail",
rules=["delete_load_balancer_pool"])
@decorators.idempotent_id('8b3617c0-4064-48f8-96b8-e2f996fce5c3')
def test_delete_load_balancer_pool(self):
"""test method for delete load balancer pool objects"""
lb_pool_uuid = self._create_load_balancer_pool()
with self.rbac_utils.override_role(self):
self.load_balancer_client.delete_load_balancer_pool(lb_pool_uuid)
class LoadBalancerMemberContrailTest(BaseLoadBalancerTest):
"""Base class to test load balancer Member using RBAC roles"""
@rbac_rule_validation.action(service="Contrail",
rules=["list_load_balancer_members"])
@decorators.idempotent_id('b3c51463-8166-486a-a26e-0f7aeaa41e0f')
def test_list_load_balancer_members(self):
"""test method for list load balancer member objects"""
with self.rbac_utils.override_role(self):
self.load_balancer_client.list_load_balancer_members()
@rbac_rule_validation.action(service="Contrail",
rules=["create_load_balancer_members"])
@decorators.idempotent_id('ad60688f-7a20-4dd5-8229-4076d85b9d55')
def test_create_lb_members(self):
"""test method for create load balancer member objects"""
with self.rbac_utils.override_role(self):
self._create_load_balancer_member()
@rbac_rule_validation.action(service="Contrail",
rules=["show_load_balancer_member"])
@decorators.idempotent_id('917602ff-24d5-4a07-a6a6-5e5b9539bbf1')
def test_show_load_balancer_member(self):
"""test method for show load balancer member objects"""
lb_member_uuid = self._create_load_balancer_member()
with self.rbac_utils.override_role(self):
self.load_balancer_client.show_load_balancer_member(lb_member_uuid)
@rbac_rule_validation.action(service="Contrail",
rules=["update_load_balancer_member"])
@decorators.idempotent_id('b1611005-5c77-4ac0-8fcc-4a035dfbaa84')
def test_update_lb_member(self):
"""test method for update load balancer member objects"""
lb_member_uuid = self._create_load_balancer_member()
with self.rbac_utils.override_role(self):
self._update_load_balancer_member(lb_member_uuid)
@rbac_rule_validation.action(service="Contrail",
rules=["delete_load_balancer_member"])
@decorators.idempotent_id('dc21883a-a822-4d39-b815-4dfd6b505b0b')
def test_delete_lb_member(self):
"""test method for delete load balancer member objects"""
lb_member_uuid = self._create_load_balancer_member()
with self.rbac_utils.override_role(self):
self.load_balancer_client.delete_load_balancer_member(
lb_member_uuid)
| 45.886199 | 79 | 0.672946 | 2,207 | 18,951 | 5.400544 | 0.141821 | 0.167128 | 0.060408 | 0.05722 | 0.751238 | 0.698129 | 0.646363 | 0.52001 | 0.50365 | 0.477725 | 0 | 0.037001 | 0.238457 | 18,951 | 412 | 80 | 45.997573 | 0.788872 | 0.130389 | 0 | 0.35082 | 0 | 0 | 0.153672 | 0.102877 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121311 | false | 0 | 0.022951 | 0 | 0.183607 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7e4a2c4f7a9a5ea3f6b7767d95410394a83bd59 | 24,000 | py | Python | flaskbb/cli/main.py | MansoorHanif/FYP-web-app | 918008d3b5eedaa904f3e720296afde9d73ac3f4 | [
"BSD-3-Clause"
] | null | null | null | flaskbb/cli/main.py | MansoorHanif/FYP-web-app | 918008d3b5eedaa904f3e720296afde9d73ac3f4 | [
"BSD-3-Clause"
] | null | null | null | flaskbb/cli/main.py | MansoorHanif/FYP-web-app | 918008d3b5eedaa904f3e720296afde9d73ac3f4 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
flaskbb.cli.commands
~~~~~~~~~~~~~~~~~~~~
This module contains the main commands.
:copyright: (c) 2016 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import os
import time
import requests
import binascii
from datetime import datetime
import click
from werkzeug.utils import import_string, ImportStringError
from jinja2 import Environment, FileSystemLoader
from flask import current_app
from flask.cli import FlaskGroup, ScriptInfo, with_appcontext
from sqlalchemy_utils.functions import (database_exists, create_database,
drop_database)
from flask_alembic import alembic_click
from flaskbb import create_app
from flaskbb._compat import iteritems
from flaskbb.extensions import db, whooshee, celery, alembic
from flaskbb.cli.utils import (prompt_save_user, prompt_config_path,
write_config, get_version, FlaskBBCLIError,
EmailType)
from flaskbb.utils.populate import (create_test_data, create_welcome_forum,
create_default_groups,
create_default_settings, insert_bulk_data,
update_settings_from_fixture)
from flaskbb.utils.translations import compile_translations
def make_app(script_info):
config_file = getattr(script_info, "config_file")
if config_file is not None:
# check if config file exists
if os.path.exists(os.path.abspath(config_file)):
click.secho("[+] Using config from: {}".format(
os.path.abspath(config_file)), fg="cyan")
# config file doesn't exist, maybe it's a module
else:
try:
import_string(config_file)
click.secho("[+] Using config from: {}".format(config_file),
fg="cyan")
except ImportStringError:
click.secho("[~] Config '{}' doesn't exist. "
"Using default config.".format(config_file),
fg="red")
config_file = None
else:
# lets look for a config file in flaskbb's root folder
# TODO: are there any other places we should look for the config?
# Like somewhere in /etc/?
# this walks back to flaskbb/ from flaskbb/flaskbb/cli/main.py
# can't use current_app.root_path because it's not (yet) available
config_dir = os.path.dirname(
os.path.dirname(os.path.dirname(__file__))
)
config_file = os.path.join(config_dir, "flaskbb.cfg")
if os.path.exists(config_file):
click.secho("[+] Found config file 'flaskbb.cfg' in {}"
.format(config_dir), fg="yellow")
click.secho("[+] Using config from: {}".format(config_file),
fg="cyan")
else:
config_file = None
click.secho("[~] Using default config.", fg="yellow")
return create_app(config_file)
def set_config(ctx, param, value):
"""This will pass the config file to the create_app function."""
ctx.ensure_object(ScriptInfo).config_file = value
@click.group(cls=FlaskGroup, create_app=make_app, add_version_option=False)
@click.option("--config", expose_value=False, callback=set_config,
required=False, is_flag=False, is_eager=True, metavar="CONFIG",
help="Specify the config to use in dotted module notation "
"e.g. flaskbb.configs.default.DefaultConfig")
@click.option("--version", expose_value=False, callback=get_version,
is_flag=True, is_eager=True, help="Show the FlaskBB version.")
def flaskbb():
"""This is the commandline interface for flaskbb."""
pass
flaskbb.add_command(alembic_click, "db")
@flaskbb.command()
@click.option("--welcome", "-w", default=True, is_flag=True,
help="Disable the welcome forum.")
@click.option("--force", "-f", default=False, is_flag=True,
help="Doesn't ask for confirmation.")
@click.option("--username", "-u", help="The username of the user.")
@click.option("--email", "-e", type=EmailType(),
help="The email address of the user.")
@click.option("--password", "-p", help="The password of the user.")
@click.option("--group", "-g", help="The group of the user.",
type=click.Choice(["admin", "super_mod", "mod", "member"]))
def install(welcome, force, username, email, password, group):
"""Installs flaskbb. If no arguments are used, an interactive setup
will be run.
"""
click.secho("[+] Installing FlaskBB...", fg="cyan")
if database_exists(db.engine.url):
if force or click.confirm(click.style(
"Existing database found. Do you want to delete the old one and "
"create a new one?", fg="magenta")
):
drop_database(db.engine.url)
else:
sys.exit(0)
create_database(db.engine.url)
alembic.upgrade()
click.secho("[+] Creating default settings...", fg="cyan")
create_default_groups()
create_default_settings()
click.secho("[+] Creating admin user...", fg="cyan")
prompt_save_user(username, email, password, group)
if welcome:
click.secho("[+] Creating welcome forum...", fg="cyan")
create_welcome_forum()
click.secho("[+] Compiling translations...", fg="cyan")
compile_translations()
click.secho("[+] FlaskBB has been successfully installed!",
fg="green", bold=True)
@flaskbb.command()
@click.option("--test-data", "-t", default=False, is_flag=True,
help="Adds some test data.")
@click.option("--bulk-data", "-b", default=False, is_flag=True,
help="Adds a lot of data.")
@click.option("--posts", default=100,
help="Number of posts to create in each topic (default: 100).")
@click.option("--topics", default=100,
help="Number of topics to create (default: 100).")
@click.option("--force", "-f", is_flag=True,
help="Will delete the database before populating it.")
@click.option("--initdb", "-i", is_flag=True,
help="Initializes the database before populating it.")
def populate(bulk_data, test_data, posts, topics, force, initdb):
"""Creates the necessary tables and groups for FlaskBB."""
if force:
click.secho("[+] Recreating database...", fg="cyan")
drop_database(db.engine.url)
# do not initialize the db if -i is passed
if not initdb:
alembic.upgrade()
if initdb:
click.secho("[+] Initializing database...", fg="cyan")
alembic.upgrade()
if test_data:
click.secho("[+] Adding some test data...", fg="cyan")
create_test_data()
if bulk_data:
timer = time.time()
topic_count, post_count = insert_bulk_data(int(topics), int(posts))
elapsed = time.time() - timer
click.secho("[+] It took {} seconds to create {} topics and {} posts"
.format(elapsed, topic_count, post_count), fg="cyan")
# this just makes the most sense for the command name; use -i to
# init the db as well
if not test_data:
click.secho("[+] Populating the database with some defaults...",
fg="cyan")
create_default_groups()
create_default_settings()
@flaskbb.command()
def reindex():
"""Reindexes the search index."""
click.secho("[+] Reindexing search index...", fg="cyan")
whooshee.reindex()
@flaskbb.command()
@click.option("all_latest", "--all", "-a", default=False, is_flag=True,
help="Upgrades migrations AND fixtures to the latest version.")
@click.option("--fixture/", "-f", default=None,
help="The fixture which should be upgraded or installed.")
@click.option("--force", default=False, is_flag=True,
help="Forcefully upgrades the fixtures.")
def upgrade(all_latest, fixture, force):
"""Updates the migrations and fixtures."""
if all_latest:
click.secho("[+] Upgrading migrations to the latest version...",
fg="cyan")
alembic.upgrade()
if fixture or all_latest:
try:
settings = import_string(
"flaskbb.fixtures.{}".format(fixture)
)
settings = settings.fixture
except ImportError:
raise FlaskBBCLIError("{} fixture is not available"
.format(fixture), fg="red")
click.secho("[+] Updating fixtures...", fg="cyan")
count = update_settings_from_fixture(
fixture=settings, overwrite_group=force, overwrite_setting=force
)
click.secho("[+] {} groups and {} settings updated.".format(
len(count.keys()), len(count.values())), fg="green"
)
@flaskbb.command("download-emojis")
@with_appcontext
def download_emoji():
"""Downloads emojis from emoji-cheat-sheet.com.
This command is probably going to be removed in future version.
"""
click.secho("[+] Downloading emojis...", fg="cyan")
HOSTNAME = "https://api.github.com"
REPO = "/repos/arvida/emoji-cheat-sheet.com/contents/public/graphics/emojis" # noqa
FULL_URL = "{}{}".format(HOSTNAME, REPO)
DOWNLOAD_PATH = os.path.join(current_app.static_folder, "emoji")
response = requests.get(FULL_URL)
cached_count = 0
count = 0
for image in response.json():
if not os.path.exists(os.path.abspath(DOWNLOAD_PATH)):
raise FlaskBBCLIError(
"{} does not exist.".format(os.path.abspath(DOWNLOAD_PATH)),
fg="red")
full_path = os.path.join(DOWNLOAD_PATH, image["name"])
if not os.path.exists(full_path):
count += 1
f = open(full_path, 'wb')
f.write(requests.get(image["download_url"]).content)
f.close()
if count == cached_count + 50:
cached_count = count
click.secho("[+] {} out of {} Emojis downloaded...".format(
cached_count, len(response.json())), fg="cyan")
click.secho("[+] Finished downloading {} Emojis.".format(count),
fg="green")
@flaskbb.command("celery", context_settings=dict(ignore_unknown_options=True,))
@click.argument('celery_args', nargs=-1, type=click.UNPROCESSED)
@click.option("show_help", "--help", "-h", is_flag=True,
help="Shows this message and exits")
@click.option("show_celery_help", "--help-celery", is_flag=True,
help="Shows the celery help message")
@click.pass_context
@with_appcontext
def start_celery(ctx, show_help, show_celery_help, celery_args):
"""Preconfigured wrapper around the 'celery' command.
Additional CELERY_ARGS arguments are passed to celery."""
if show_help:
click.echo(ctx.get_help())
sys.exit(0)
if show_celery_help:
click.echo(celery.start(argv=["--help"]))
sys.exit(0)
default_args = ['celery']
default_args = default_args + list(celery_args)
celery.start(argv=default_args)
@flaskbb.command()
@click.option("--server", "-s", default="gunicorn",
type=click.Choice(["gunicorn", "gevent"]),
help="The WSGI Server to run FlaskBB on.")
@click.option("--host", "-h", default="127.0.0.1",
help="The interface to bind FlaskBB to.")
@click.option("--port", "-p", default="8000", type=int,
help="The port to bind FlaskBB to.")
@click.option("--workers", "-w", default=4,
help="The number of worker processes for handling requests.")
@click.option("--daemon", "-d", default=False, is_flag=True,
help="Starts gunicorn as daemon.")
@click.option("--config", "-c",
help="The configuration file to use for FlaskBB.")
def start(server, host, port, workers, config, daemon):
"""Starts a production ready wsgi server.
TODO: Figure out a way how to forward additional args to gunicorn
without causing any errors.
"""
if server == "gunicorn":
try:
from gunicorn.app.base import Application
class FlaskBBApplication(Application):
def __init__(self, app, options=None):
self.options = options or {}
self.application = app
super(FlaskBBApplication, self).__init__()
def load_config(self):
config = dict([
(key, value) for key, value in iteritems(self.options)
if key in self.cfg.settings and value is not None
])
for key, value in iteritems(config):
self.cfg.set(key.lower(), value)
def load(self):
return self.application
options = {
"bind": "{}:{}".format(host, port),
"workers": workers,
"daemon": daemon,
}
FlaskBBApplication(create_app(config=config), options).run()
except ImportError:
raise FlaskBBCLIError("Cannot import gunicorn. "
"Make sure it is installed.", fg="red")
elif server == "gevent":
try:
from gevent import __version__
from gevent.pywsgi import WSGIServer
click.secho("* Starting gevent {}".format(__version__))
click.secho("* Listening on http://{}:{}/".format(host, port))
http_server = WSGIServer((host, port), create_app(config=config))
http_server.serve_forever()
except ImportError:
raise FlaskBBCLIError("Cannot import gevent. "
"Make sure it is installed.", fg="red")
@flaskbb.command("shell", short_help="Runs a shell in the app context.")
@with_appcontext
def shell_command():
"""Runs an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to it"s configuration.
This is useful for executing small snippets of management code
without having to manually configuring the application.
This code snippet is taken from Flask"s cli module and modified to
run IPython and falls back to the normal shell if IPython is not
available.
"""
import code
banner = "Python %s on %s\nInstance Path: %s" % (
sys.version,
sys.platform,
current_app.instance_path,
)
ctx = {"db": db}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup, "r") as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(current_app.make_shell_context())
try:
import IPython
IPython.embed(banner1=banner, user_ns=ctx)
except ImportError:
code.interact(banner=banner, local=ctx)
@flaskbb.command("urls", short_help="Show routes for the app.")
@click.option("--route", "-r", "order_by", flag_value="rule", default=True,
help="Order by route")
@click.option("--endpoint", "-e", "order_by", flag_value="endpoint",
help="Order by endpoint")
@click.option("--methods", "-m", "order_by", flag_value="methods",
help="Order by methods")
@with_appcontext
def list_urls(order_by):
"""Lists all available routes."""
from flask import current_app
rules = sorted(
current_app.url_map.iter_rules(),
key=lambda rule: getattr(rule, order_by)
)
max_rule_len = max(len(rule.rule) for rule in rules)
max_rule_len = max(max_rule_len, len("Route"))
max_endpoint_len = max(len(rule.endpoint) for rule in rules)
max_endpoint_len = max(max_endpoint_len, len("Endpoint"))
max_method_len = max(len(", ".join(rule.methods)) for rule in rules)
max_method_len = max(max_method_len, len("Methods"))
column_header_len = max_rule_len + max_endpoint_len + max_method_len + 4
column_template = "{:<%s} {:<%s} {:<%s}" % (
max_rule_len, max_endpoint_len, max_method_len
)
click.secho(column_template.format("Route", "Endpoint", "Methods"),
fg="blue", bold=True)
click.secho("=" * column_header_len, bold=True)
for rule in rules:
methods = ", ".join(rule.methods)
click.echo(column_template.format(rule.rule, rule.endpoint, methods))
@flaskbb.command("makeconfig")
@click.option("--development", "-d", default=False, is_flag=True,
help="Creates a development config with DEBUG set to True.")
@click.option("--output", "-o", required=False,
help="The path where the config file will be saved at. "
"Defaults to the flaskbb's root folder.")
@click.option("--force", "-f", default=False, is_flag=True,
help="Overwrite any existing config file if one exists.")
def generate_config(development, output, force):
"""Generates a FlaskBB configuration file."""
config_env = Environment(
loader=FileSystemLoader(os.path.join(current_app.root_path, "configs"))
)
config_template = config_env.get_template('config.cfg.template')
if output:
config_path = os.path.abspath(output)
else:
config_path = os.path.dirname(current_app.root_path)
if os.path.exists(config_path) and not os.path.isfile(config_path):
config_path = os.path.join(config_path, "flaskbb.cfg")
default_conf = {
"is_debug": True,
"server_name": "localhost:5000",
"url_scheme": "http",
"database_uri": "sqlite:///" + os.path.join(
os.path.dirname(current_app.root_path), "flaskbb.sqlite"),
"redis_enabled": False,
"redis_uri": "redis://localhost:6379",
"mail_server": "localhost",
"mail_port": 25,
"mail_use_tls": False,
"mail_use_ssl": False,
"mail_username": "",
"mail_password": "",
"mail_sender_name": "FlaskBB Mailer",
"mail_sender_address": "noreply@yourdomain",
"mail_admin_address": "admin@yourdomain",
"secret_key": binascii.hexlify(os.urandom(24)).decode(),
"csrf_secret_key": binascii.hexlify(os.urandom(24)).decode(),
"timestamp": datetime.utcnow().strftime("%A, %d. %B %Y at %H:%M")
}
if not force:
config_path = prompt_config_path(config_path)
if force and os.path.exists(config_path):
click.secho("Overwriting existing config file: {}".format(config_path),
fg="yellow")
if development:
write_config(default_conf, config_template, config_path)
sys.exit(0)
# SERVER_NAME
click.secho("The name and port number of the server.\n"
"This is needed to correctly generate URLs when no request "
"context is available.", fg="cyan")
default_conf["server_name"] = click.prompt(
click.style("Server Name", fg="magenta"), type=str,
default=default_conf.get("server_name"))
# PREFERRED_URL_SCHEME
click.secho("The URL Scheme is also needed in order to generate correct "
"URLs when no request context is available.\n"
"Choose either 'https' or 'http'.", fg="cyan")
default_conf["url_scheme"] = click.prompt(
click.style("URL Scheme", fg="magenta"),
type=click.Choice(["https", "http"]),
default=default_conf.get("url_scheme"))
# SQLALCHEMY_DATABASE_URI
click.secho("For Postgres use:\n"
" postgresql://flaskbb@localhost:5432/flaskbb\n"
"For more options see the SQLAlchemy docs:\n"
" http://docs.sqlalchemy.org/en/latest/core/engines.html",
fg="cyan")
default_conf["database_url"] = click.prompt(
click.style("Database URI", fg="magenta"),
default=default_conf.get("database_uri"))
# REDIS_ENABLED
click.secho("Redis will be used for things such as the task queue, "
"caching and rate limiting.", fg="cyan")
default_conf["redis_enabled"] = click.confirm(
click.style("Would you like to use redis?", fg="magenta"),
default=True) # default_conf.get("redis_enabled") is False
# REDIS_URI
if default_conf.get("redis_enabled", False):
default_conf["redis_uri"] = click.prompt(
click.style("Redis URI", fg="magenta"),
default=default_conf.get("redis_uri"))
else:
default_conf["redis_uri"] = ""
# MAIL_SERVER
click.secho("To use 'localhost' make sure that you have sendmail or\n"
"something similar installed. Gmail is also supprted.",
fg="cyan")
default_conf["mail_server"] = click.prompt(
click.style("Mail Server", fg="magenta"),
default=default_conf.get("mail_server"))
# MAIL_PORT
click.secho("The port on which the SMTP server is listening on.",
fg="cyan")
default_conf["mail_port"] = click.prompt(
click.style("Mail Server SMTP Port", fg="magenta"),
default=default_conf.get("mail_port"))
# MAIL_USE_TLS
click.secho("If you are using a local SMTP server like sendmail this is "
"not needed. For external servers it is required.",
fg="cyan")
default_conf["mail_use_tls"] = click.confirm(
click.style("Use TLS for sending mails?", fg="magenta"),
default=default_conf.get("mail_use_tls"))
# MAIL_USE_SSL
click.secho("Same as above. TLS is the successor to SSL.", fg="cyan")
default_conf["mail_use_ssl"] = click.confirm(
click.style("Use SSL for sending mails?", fg="magenta"),
default=default_conf.get("mail_use_ssl"))
# MAIL_USERNAME
click.secho("Not needed if you are using a local smtp server.\nFor gmail "
"you have to put in your email address here.", fg="cyan")
default_conf["mail_username"] = click.prompt(
click.style("Mail Username", fg="magenta"),
default=default_conf.get("mail_username"))
# MAIL_PASSWORD
click.secho("Not needed if you are using a local smtp server.\nFor gmail "
"you have to put in your gmail password here.", fg="cyan")
default_conf["mail_password"] = click.prompt(
click.style("Mail Password", fg="magenta"),
default=default_conf.get("mail_password"))
# MAIL_DEFAULT_SENDER
click.secho("The name of the sender. You probably want to change it to "
"something like '<your_community> Mailer'.", fg="cyan")
default_conf["mail_sender_name"] = click.prompt(
click.style("Mail Sender Name", fg="magenta"),
default=default_conf.get("mail_sender_name"))
click.secho("On localhost you want to use a noreply address here. "
"Use your email address for gmail here.", fg="cyan")
default_conf["mail_sender_address"] = click.prompt(
click.style("Mail Sender Address", fg="magenta"),
default=default_conf.get("mail_sender_address"))
# ADMINS
click.secho("Logs and important system messages are sent to this address."
"Use your email address for gmail here.", fg="cyan")
default_conf["mail_admin_address"] = click.prompt(
click.style("Mail Admin Email", fg="magenta"),
default=default_conf.get("mail_admin_address"))
write_config(default_conf, config_template, config_path)
# Finished
click.secho("The configuration file has been saved to:\n{cfg}\n"
"Feel free to adjust it as needed."
.format(cfg=config_path), fg="blue", bold=True)
click.secho("Usage: \nflaskbb --config {cfg} run"
.format(cfg=config_path), fg="green")
| 40.40404 | 88 | 0.617083 | 2,960 | 24,000 | 4.867905 | 0.190203 | 0.030536 | 0.014574 | 0.012631 | 0.217156 | 0.155667 | 0.10896 | 0.0719 | 0.042335 | 0.037338 | 0 | 0.003295 | 0.253917 | 24,000 | 593 | 89 | 40.472175 | 0.801407 | 0.089333 | 0 | 0.142222 | 0 | 0 | 0.270626 | 0.00789 | 0 | 0 | 0 | 0.003373 | 0 | 1 | 0.035556 | false | 0.022222 | 0.077778 | 0.002222 | 0.12 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7e63bdf2c11cd263f543defd3625dc398858335 | 3,866 | py | Python | openfda/device_clearance/pipeline.py | FDA/openfda | 93c3abed4042a4a2729975468c4e377a67e8a5ca | [
"CC0-1.0"
] | 388 | 2015-01-09T18:50:35.000Z | 2022-03-24T10:15:23.000Z | openfda/device_clearance/pipeline.py | FDA/openfda | 93c3abed4042a4a2729975468c4e377a67e8a5ca | [
"CC0-1.0"
] | 150 | 2015-01-21T20:30:54.000Z | 2022-03-28T20:46:29.000Z | openfda/device_clearance/pipeline.py | FDA/openfda | 93c3abed4042a4a2729975468c4e377a67e8a5ca | [
"CC0-1.0"
] | 113 | 2015-01-31T21:24:16.000Z | 2022-01-30T15:17:28.000Z | #!/usr/bin/python
''' 510k pipeline for downloading, transforming to JSON and loading into
Elasticsearch.
'''
import glob
import os
import re
from urllib.request import urlopen
from os.path import dirname, join
import luigi
from bs4 import BeautifulSoup
from openfda import common, config, index_util, parallel
from openfda import download_util
from openfda.common import first_file_timestamp
from openfda.device_clearance import transform
from openfda.device_harmonization.pipeline import (Harmonized2OpenFDA,
DeviceAnnotateMapper)
RUN_DIR = dirname(dirname(os.path.abspath(__file__)))
# A directory for holding files that track Task state
META_DIR = config.data_dir('510k/meta')
RAW_DIR = config.data_dir('510k/raw')
common.shell_cmd('mkdir -p %s', META_DIR)
CLEARED_DEVICE_URL = 'https://www.fda.gov/medical-devices/510k-clearances/downloadable-510k-files'
class Download_510K(luigi.Task):
def requires(self):
return []
def output(self):
return luigi.LocalTarget(RAW_DIR)
def run(self):
soup = BeautifulSoup(urlopen(CLEARED_DEVICE_URL).read(), 'lxml')
for a in soup.find_all(href=re.compile('.*.zip')):
if a.text.startswith('PMN') and a.text != 'PMNLSTMN.ZIP':
fileURL = a['href']
common.download(fileURL, join(self.output().path, a['href'].split('/')[-1]))
class ExtractAndCleanDownloads510k(luigi.Task):
''' Unzip each of the download files and remove all the non-UTF8 characters.
Unzip -p streams the data directly to iconv which then writes to disk.
'''
def requires(self):
return Download_510K()
def output(self):
return luigi.LocalTarget(config.data_dir('510k/extracted'))
def run(self):
output_dir = self.output().path
common.shell_cmd('mkdir -p %s', output_dir)
input_dir = self.input().path
download_util.extract_and_clean(input_dir, 'ISO-8859-1', 'UTF-8', 'txt')
class Clearance2JSON(parallel.MRTask):
def map(self, key, value, output):
# TODO(hansnelsen): bring the `transform.py` logic into the mapper and
# remove the file.
new_value = transform.transform_device_clearance(value)
output.add(self.filename + ':' + key, new_value)
def requires(self):
return ExtractAndCleanDownloads510k()
def output(self):
return luigi.LocalTarget(config.data_dir('510k', 'json.db'))
def mapreduce_inputs(self):
input_files = glob.glob(self.input().path + '/*.txt')
return parallel.Collection.from_glob(
input_files, parallel.CSVDictLineInput(delimiter='|', strip_str='\0'))
class ClearanceAnnotateMapper(DeviceAnnotateMapper):
def filter(self, data):
product_code = data['product_code']
harmonized = self.harmonized_db.get(product_code, None)
if harmonized:
# 510k should never have a PMA openfda key
if 'device_pma' in harmonized:
del harmonized['device_pma']
if self.table in harmonized:
del harmonized[self.table]
return harmonized
return None
class AnnotateDevice(luigi.Task):
def requires(self):
return [Harmonized2OpenFDA(), Clearance2JSON()]
def output(self):
return luigi.LocalTarget(config.data_dir('510k','annotate.db'))
def run(self):
harmonized_db = parallel.ShardedDB.open(self.input()[0].path).as_dict()
parallel.mapreduce(
parallel.Collection.from_sharded(self.input()[1].path),
mapper=ClearanceAnnotateMapper(harmonized_db=harmonized_db),
reducer=parallel.IdentityReducer(),
output_prefix=self.output().path,
num_shards=10)
class LoadJSON(index_util.LoadJSONBase):
index_name = 'deviceclearance'
type_name = 'device510k'
mapping_file = './schemas/clearance_mapping.json'
data_source = AnnotateDevice()
use_checksum = False
optimize_index = True
last_update_date = lambda _: first_file_timestamp(RAW_DIR)
if __name__ == '__main__':
luigi.run()
| 30.928 | 98 | 0.722193 | 502 | 3,866 | 5.40239 | 0.378486 | 0.029499 | 0.023968 | 0.031342 | 0.122788 | 0.108038 | 0.057522 | 0.057522 | 0.057522 | 0.057522 | 0 | 0.018484 | 0.160372 | 3,866 | 124 | 99 | 31.177419 | 0.817006 | 0.114071 | 0 | 0.129412 | 0 | 0.011765 | 0.092222 | 0.009428 | 0 | 0 | 0 | 0.008065 | 0 | 1 | 0.164706 | false | 0 | 0.141176 | 0.094118 | 0.588235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7e7fe013e5ce307df134f4f6388446b325ecad5 | 4,071 | py | Python | tests/test_game.py | jordimarinvalle/tictactoexxl | f20771fcc3d15d4a4baef392bb19b7a59703ee32 | [
"MIT"
] | null | null | null | tests/test_game.py | jordimarinvalle/tictactoexxl | f20771fcc3d15d4a4baef392bb19b7a59703ee32 | [
"MIT"
] | null | null | null | tests/test_game.py | jordimarinvalle/tictactoexxl | f20771fcc3d15d4a4baef392bb19b7a59703ee32 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from tictactoexxl.game import Game
from tictactoexxl.board import Board
from tictactoexxl.board import BoardPosition
from tictactoexxl.player import Player
class TestTicTacToeXXLGame(object):
board = None
player1 = None
player2 = None
PLAYER1_NAME = "ttt"
PLAYER1_MOVE_REPRESENTATION = "M"
PLAYER2_NAME = "tttxxl"
PLAYER2_MOVE_REPRESENTATION = "W"
def setup_method(self, _):
self.board = Board()
self.player1 = Player(self.PLAYER1_NAME,
self.PLAYER1_MOVE_REPRESENTATION)
self.player2 = Player(self.PLAYER2_NAME,
self.PLAYER2_MOVE_REPRESENTATION)
self.game = Game(board=self.board,
players=[self.player1, self.player2])
def test_game_winning_n_in_a_row_ok_1(self):
assert Game.is_winning_n_in_a_row_ok(num_players=2,
board_dim_x=3,
board_dim_y=3,
n_in_a_row=3) is True
def test_game_winning_n_in_a_row_ok_2(self):
assert Game.is_winning_n_in_a_row_ok(num_players=4,
board_dim_x=3,
board_dim_y=3,
n_in_a_row=3) is True
def test_game_winning_n_in_a_row_ok_3(self):
assert Game.is_winning_n_in_a_row_ok(num_players=3,
board_dim_x=2,
board_dim_y=4,
n_in_a_row=3) is True
def test_game_winning_n_in_a_row_ko_1(self):
assert Game.is_winning_n_in_a_row_ok(num_players=2,
board_dim_x=5,
board_dim_y=5,
n_in_a_row=6) is False
def test_game_winning_n_in_a_row_ko_2(self):
assert Game.is_winning_n_in_a_row_ok(num_players=5,
board_dim_x=3,
board_dim_y=3,
n_in_a_row=3) is False
def test_game_winning_n_in_a_row_ko_3(self):
assert Game.is_winning_n_in_a_row_ok(num_players=5,
board_dim_x=3,
board_dim_y=3,
n_in_a_row=4) is False
def test_game_winning_n_in_a_row_ko_4(self):
assert Game.is_winning_n_in_a_row_ok(num_players=3,
board_dim_x=2,
board_dim_y=5,
n_in_a_row=5) is False
def test_game_players(self):
assert len(self.game.players) is 2
def test_game_get_players_move_representations(self):
set_1 = set(self.game.get_players_move_representations())
set_2 = set([self.PLAYER1_MOVE_REPRESENTATION,
self.PLAYER2_MOVE_REPRESENTATION])
assert set_2.difference(set_1) == set()
def test_game_player_make_a_move(self):
board_position = BoardPosition("a", "1")
self.game.player_make_a_move(self.player1, board_position)
slot_value = self.game.board.get_slot_value(board_position)
assert slot_value is self.player1.move_repr
def test_game_has_player_won(self):
board_position_1 = BoardPosition("a", "1")
self.game.player_make_a_move(self.player1, board_position_1)
board_position_2 = BoardPosition("a", "2")
self.game.player_make_a_move(self.player1, board_position_2)
board_position_3 = BoardPosition("a", "3")
self.game.player_make_a_move(self.player1, board_position_3)
assert self.game.has_player_won(self.player1, board_position_3) is True
if __name__ == '__main__':
pytest.main()
| 38.40566 | 79 | 0.550725 | 516 | 4,071 | 3.881783 | 0.125969 | 0.031453 | 0.041937 | 0.07339 | 0.595107 | 0.522217 | 0.470794 | 0.470794 | 0.458313 | 0.443834 | 0 | 0.029423 | 0.382216 | 4,071 | 105 | 80 | 38.771429 | 0.766998 | 0.005158 | 0 | 0.269231 | 0 | 0 | 0.00667 | 0 | 0 | 0 | 0 | 0 | 0.141026 | 1 | 0.153846 | false | 0 | 0.064103 | 0 | 0.320513 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7ea1ca9b17ce839419febdbeb4b3749c49816ca | 11,643 | py | Python | test/petab/test_petab.py | Gabriel-p/pyABC | a1c963203c9f9e3fa40793ccf214753fb689d27f | [
"BSD-3-Clause"
] | null | null | null | test/petab/test_petab.py | Gabriel-p/pyABC | a1c963203c9f9e3fa40793ccf214753fb689d27f | [
"BSD-3-Clause"
] | null | null | null | test/petab/test_petab.py | Gabriel-p/pyABC | a1c963203c9f9e3fa40793ccf214753fb689d27f | [
"BSD-3-Clause"
] | null | null | null | import itertools
import os
import sys
import amici.petab_import
import git
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import petab
import petab.C as C
import pytest
import scipy.stats
import pyabc.petab
import pyabc.petab.base
@pytest.fixture(
params=itertools.product(
[petab.C.LIN, petab.C.LOG, petab.C.LOG10], [*petab.C.PRIOR_TYPES, None]
)
)
def prior_specs(request):
"""A one-line parameter df for a given prior type."""
scale, prior_type = request.param
var1, var2 = 0.2, 0.9
if prior_type:
# dataframe with objective prior
df = pd.DataFrame(
{
C.PARAMETER_ID: ['p1'],
C.ESTIMATE: [1],
C.PARAMETER_SCALE: [scale],
C.LOWER_BOUND: [np.nan],
C.UPPER_BOUND: [np.nan],
C.OBJECTIVE_PRIOR_TYPE: [prior_type],
C.OBJECTIVE_PRIOR_PARAMETERS: [f"{var1};{var2}"],
}
)
else:
# also consider the case that no prior is specified, resulting in a
# parameter scale uniform prior within the rescaled bounds
# unscale variables
unscaled_var1, unscaled_var2 = var1, var2
if scale == C.LOG:
unscaled_var1, unscaled_var2 = np.exp([var1, var2])
elif scale == C.LOG10:
unscaled_var1, unscaled_var2 = 10.0 ** var1, 10.0 ** var2
# dataframe without objective prior
df = pd.DataFrame(
{
C.PARAMETER_ID: ['p1'],
C.ESTIMATE: [1],
C.PARAMETER_SCALE: [scale],
C.LOWER_BOUND: [unscaled_var1],
C.UPPER_BOUND: [unscaled_var2],
}
)
# expected default if objective type not set
prior_type = C.PARAMETER_SCALE_UNIFORM
yield scale, prior_type, var1, var2, df
def test_petab_prior(prior_specs):
"""Test whether the prior is correctly defined by sampling from it."""
# need to fix random seed due to stochastics of multiple testing
np.random.seed(0)
# extract settings
scale, prior_type, var1, var2, parameter_df = prior_specs
# create prior from petab data frame
pyabc_prior = pyabc.petab.base.create_prior(parameter_df)
# generate random samples
n_samples = 10000
samples = pyabc_prior.rvs(size=n_samples)['p1']
# -- UNIFORM COVERAGE -- #
# check that uniform parameters fill their domain
if prior_type in [C.UNIFORM, C.PARAMETER_SCALE_UNIFORM]:
assert (samples >= var1).all() and (samples <= var2).all()
assert (samples >= var2 - (var2 - var1) * 0.01).any() and (
samples <= var1 + (var2 - var1) * 0.01
).any()
# -- MEAN AND VARIANCE -- #
# sample mean and variance
mean = np.mean(samples)
var = np.var(samples)
# ground truth mean and variance
if prior_type in [C.UNIFORM, C.PARAMETER_SCALE_UNIFORM]:
mean_th = var1 + (var2 - var1) / 2
var_th = (var2 - var1) ** 2 / 12
elif prior_type in [C.NORMAL, C.PARAMETER_SCALE_NORMAL]:
mean_th = var1
var_th = var2 ** 2
elif prior_type in [C.LAPLACE, C.PARAMETER_SCALE_LAPLACE]:
mean_th = var1
var_th = 2 * var2 ** 2
elif prior_type == C.LOG_NORMAL:
# just log-transform all
mean = np.mean(np.log(samples))
var = np.var(np.log(samples))
mean_th = var1
var_th = var2 ** 2
elif prior_type == C.LOG_LAPLACE:
# just log-transform all
mean = np.mean(np.log(samples))
var = np.var(np.log(samples))
mean_th = var1
var_th = 2 * var2 ** 2
else:
raise ValueError(f"Unexpected prior type: {prior_type}")
# multiplicative tolerance of sample vs ground truth variables
tol = 0.8
# compare means and variances
assert mean_th * tol < mean < mean_th * 1 / tol
assert var_th * tol < var < var_th * 1 / tol
# -- KOLMOGOROV-SMIRNOV CDF COMPARISON -- #
# create distribution object
if prior_type in [C.UNIFORM, C.PARAMETER_SCALE_UNIFORM]:
distr = scipy.stats.uniform(loc=var1, scale=var2 - var1)
elif prior_type in [C.NORMAL, C.PARAMETER_SCALE_NORMAL]:
distr = scipy.stats.norm(loc=var1, scale=var2)
elif prior_type in [C.LAPLACE, C.PARAMETER_SCALE_LAPLACE]:
distr = scipy.stats.laplace(loc=var1, scale=var2)
elif prior_type == C.LOG_NORMAL:
distr = scipy.stats.lognorm(s=var2, loc=0, scale=np.exp(var1))
elif prior_type == C.LOG_LAPLACE:
distr = scipy.stats.loglaplace(c=1 / var2, scale=np.exp(var1))
else:
raise ValueError(f"Unexpected prior type: {prior_type}")
# perform KS test
_, p_value = scipy.stats.kstest(samples, distr.cdf)
# at least check that there are no highly significant differences
assert p_value > 1e-2
# dummy check that the test recognizes use of the wrong distribution
if prior_type in [C.NORMAL, C.PARAMETER_SCALE_NORMAL]:
distr = scipy.stats.laplace(loc=var1, scale=var2)
_, p_value = scipy.stats.kstest(samples, distr.cdf)
assert p_value < 1e-5
def test_parameter_fixing():
"""Test that only free parameters are exposed to pyABC."""
# define problem with fixed parameters
parameter_df = pd.DataFrame(
{
C.PARAMETER_ID: ['p1', 'p2', 'p3'],
C.ESTIMATE: [1, 0, 1],
C.PARAMETER_SCALE: [C.LIN] * 3,
C.LOWER_BOUND: [0] * 3,
C.UPPER_BOUND: [1] * 3,
C.OBJECTIVE_PRIOR_TYPE: [C.PARAMETER_SCALE_UNIFORM] * 3,
}
).set_index(C.PARAMETER_ID)
# create prior from petab data frame
pyabc_prior = pyabc.petab.base.create_prior(parameter_df)
# create a sample
sample = pyabc_prior.rvs()
# check the entries
assert set(sample.keys()) == {'p1', 'p3'}
def test_get_nominal_parameters():
"""Test extraction of nominal parameters."""
parameter_df = pd.DataFrame(
{
C.PARAMETER_ID: ['p1', 'p2', 'p3'],
C.NOMINAL_VALUE: [2] * 3,
C.LOWER_BOUND: [1] * 3,
C.UPPER_BOUND: [3] * 3,
C.ESTIMATE: [1] * 3,
C.PARAMETER_SCALE: [C.LIN, C.LOG, C.LOG10],
C.OBJECTIVE_PRIOR_TYPE: [
C.PARAMETER_SCALE_UNIFORM,
C.PARAMETER_SCALE_UNIFORM,
C.UNIFORM,
],
C.OBJECTIVE_PRIOR_PARAMETERS: ["1;4", "1;3", "0;0.7"],
}
).set_index(C.PARAMETER_ID)
# expected nominal parameters
expected = {
C.LIN: pyabc.Parameter({'p1': 2, 'p2': 2, 'p3': 2}),
'prior': pyabc.Parameter({'p1': 2, 'p2': np.log(2), 'p3': 2}),
'scaled': pyabc.Parameter(
{'p1': 2, 'p2': np.log(2), 'p3': np.log10(2)}
),
}
# get scales
prior_scales, scaled_scales = pyabc.petab.base.get_scales(parameter_df)
# run for all target_scales
for scale in expected:
x_nominal = pyabc.petab.base.get_nominal_parameters(
parameter_df, scale, prior_scales, scaled_scales
)
assert x_nominal == expected[scale]
# raise
with pytest.raises(ValueError):
pyabc.petab.base.get_nominal_parameters(
parameter_df, C.LOG, prior_scales, scaled_scales
)
def test_get_bounds():
"""Test that bounds are extracted correctly."""
parameter_df = pd.DataFrame(
{
C.PARAMETER_ID: ['p1', 'p2', 'p3', 'p4'],
C.ESTIMATE: [1] * 4,
C.PARAMETER_SCALE: [C.LIN, C.LOG, C.LOG10, C.LOG10],
C.LOWER_BOUND: [1] * 4,
C.UPPER_BOUND: [3] * 4,
C.OBJECTIVE_PRIOR_TYPE: [
C.PARAMETER_SCALE_UNIFORM,
C.UNIFORM,
C.PARAMETER_SCALE_UNIFORM,
C.LAPLACE,
],
C.OBJECTIVE_PRIOR_PARAMETERS: ["1;4", "1;3", "0;0.7", "1;4"],
}
).set_index(C.PARAMETER_ID)
# most common use case
prior_scales, scaled_scales = pyabc.petab.base.get_scales(parameter_df)
bounds = pyabc.petab.base.get_bounds(
parameter_df, 'prior', prior_scales, scaled_scales, use_prior=True
)
assert bounds == {'p1': (1, 4), 'p2': (1, 3), 'p3': (0, 0.7), 'p4': (1, 3)}
# no prior parameter overrides
prior_scales, scaled_scales = pyabc.petab.base.get_scales(parameter_df)
bounds = pyabc.petab.base.get_bounds(
parameter_df, 'prior', prior_scales, scaled_scales, use_prior=False
)
assert bounds == {
'p1': (1, 3),
'p2': (1, 3),
'p3': (np.log10(1), np.log10(3)),
'p4': (1, 3),
}
# all on scale
prior_scales, scaled_scales = pyabc.petab.base.get_scales(parameter_df)
bounds = pyabc.petab.base.get_bounds(
parameter_df, 'scaled', prior_scales, scaled_scales, use_prior=False
)
assert bounds == {
'p1': (1, 3),
'p2': (np.log(1), np.log(3)),
'p3': (np.log10(1), np.log10(3)),
'p4': (np.log10(1), np.log10(3)),
}
# all off scale
prior_scales, scaled_scales = pyabc.petab.base.get_scales(parameter_df)
bounds = pyabc.petab.base.get_bounds(
parameter_df, C.LIN, prior_scales, scaled_scales, use_prior=False
)
assert bounds == {'p1': (1, 3), 'p2': (1, 3), 'p3': (1, 3), 'p4': (1, 3)}
# raise
with pytest.raises(ValueError):
pyabc.petab.base.get_bounds(
parameter_df, C.LOG, prior_scales, scaled_scales, use_prior=True
)
def test_pipeline():
"""Test the petab pipeline on an application model."""
# download archive
benchmark_dir = "doc/examples/tmp/benchmark-models-petab"
if not os.path.exists(benchmark_dir):
git.Repo.clone_from(
"https://github.com/benchmarking-initiative"
"/benchmark-models-petab.git",
benchmark_dir,
depth=1,
)
g = git.Git(benchmark_dir)
# update repo if online
try:
g.pull()
except git.exc.GitCommandError:
pass
# create problem
model_name = 'Boehm_JProteomeRes2014'
petab_problem = petab.Problem.from_yaml(
os.path.join(
benchmark_dir, 'Benchmark-Models', model_name, model_name + '.yaml'
)
)
# compile amici
output_folder = f'amici_models/{model_name}'
if output_folder not in sys.path:
sys.path.insert(0, output_folder)
model = amici.petab_import.import_petab_problem(
petab_problem, model_output_dir=output_folder
)
solver = model.getSolver()
# import to pyabc
importer = pyabc.petab.AmiciPetabImporter(petab_problem, model, solver)
# extract required objects
prior = importer.create_prior()
model = importer.create_model()
kernel = importer.create_kernel()
# call model
assert np.isclose(
model(importer.get_nominal_parameters())['llh'], -138.221996
)
# mini analysis, just to run it
temperature = pyabc.Temperature(
enforce_exact_final_temperature=False,
schemes=[pyabc.AcceptanceRateScheme()],
)
acceptor = pyabc.StochasticAcceptor()
abc = pyabc.ABCSMC(
model,
prior,
kernel,
eps=temperature,
acceptor=acceptor,
population_size=10,
)
abc.new(pyabc.storage.create_sqlite_db_id(), None)
h = abc.run(max_nr_populations=1)
# visualize
pyabc.visualization.plot_kde_matrix_highlevel(
h,
limits=importer.get_bounds(),
refval=importer.get_nominal_parameters(),
refval_color='grey',
names=importer.get_parameter_names(),
)
plt.close()
| 31.89863 | 79 | 0.601821 | 1,510 | 11,643 | 4.470861 | 0.198676 | 0.035995 | 0.042216 | 0.040883 | 0.436232 | 0.406606 | 0.389424 | 0.374167 | 0.322619 | 0.268108 | 0 | 0.031317 | 0.278708 | 11,643 | 364 | 80 | 31.986264 | 0.772565 | 0.138109 | 0 | 0.293893 | 0 | 0 | 0.039976 | 0.01135 | 0 | 0 | 0 | 0 | 0.049618 | 1 | 0.022901 | false | 0.003817 | 0.087786 | 0 | 0.110687 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7ea23679ac8c6baab271898f0b380cd592f5b79 | 6,736 | py | Python | gabrieltool/statemachine/callable_zoo/processor_zoo/base.py | junjuew/gabriel-tool | 030d623511a19e06f6340523733207d6bca63a65 | [
"Apache-2.0"
] | 1 | 2020-04-20T02:12:39.000Z | 2020-04-20T02:12:39.000Z | gabrieltool/statemachine/callable_zoo/processor_zoo/base.py | junjuew/gabriel-tool | 030d623511a19e06f6340523733207d6bca63a65 | [
"Apache-2.0"
] | 14 | 2018-12-17T23:21:17.000Z | 2019-04-23T18:47:27.000Z | gabrieltool/statemachine/callable_zoo/processor_zoo/base.py | cmusatyalab/OpenWorkflow | 7a79c7383e3fcb7ff6a24c762260fab21d4792ef | [
"Apache-2.0"
] | 1 | 2021-09-23T20:28:55.000Z | 2021-09-23T20:28:55.000Z | # -*- coding: utf-8 -*-
"""Basic callable classes for Processor.
"""
import copy
import cv2
import numpy as np
from logzero import logger
from gabrieltool.statemachine.callable_zoo import record_kwargs
from gabrieltool.statemachine.callable_zoo import CallableBase
def visualize_detections(img, results):
"""Visualize object detection outputs.
This is a helper function for debugging processor callables.
The results should follow Gabrieltool's convention, which is
Arguments:
img {OpenCV Image}
results {Dictionary} -- a dictionary of class_idx -> [[x1, y1, x2, y2, confidence, cls_idx],...]
Returns:
OpenCV Image -- Image with detected objects annotated
"""
img_detections = img.copy()
for _, dets in results.items():
for i in range(len(dets)):
cls_name = str(dets[i][-1])
bbox = dets[i][:4]
score = dets[i][-2]
text = "%s : %f" % (cls_name, score)
cv2.rectangle(img_detections, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 0, 255), 8)
cv2.putText(img_detections, text, (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
return img_detections
class DummyCallable(CallableBase):
"""A Dummy Callable class for testing and examples.
"""
@record_kwargs
def __init__(self, dummy_input='dummy_input_value'):
super(DummyCallable, self).__init__()
def __call__(self, image, debug=False):
return {'dummy_key': 'dummy_value'}
class FasterRCNNOpenCVCallable(CallableBase):
"""A callable class that executes a FasterRCNN object detection model using OpenCV.
"""
@record_kwargs
def __init__(self, proto_path, model_path, labels=None, conf_threshold=0.8):
"""Constructor.
Args:
proto_path (string): Path to the caffe proto files that defines the DNN.
model_path (string): Path to the model weights file.
labels (list of string, optional): List of labels. Defaults to None.
conf_threshold (float, optional): Confidence threshold for a detection. Defaults to 0.8.
"""
# For default parameter settings,
# see:
# https://github.com/rbgirshick/fast-rcnn/blob/b612190f279da3c11dd8b1396dd5e72779f8e463/lib/fast_rcnn/config.py
super(FasterRCNNOpenCVCallable, self).__init__()
self._scale = 600
self._max_size = 1000
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
self._pixel_means = [102.9801, 115.9465, 122.7717]
self._nms_threshold = 0.3
self._labels = labels
self._net = cv2.dnn.readNetFromCaffe(proto_path, model_path)
self._conf_threshold = conf_threshold
logger.debug(
'Created a FasterRCNNOpenCVProcessor:\nDNN proto definition is at {}\n'
'model weight is at {}\nlabels are {}\nconf_threshold is {}'.format(
proto_path, model_path, self._labels, self._conf_threshold))
@classmethod
def from_json(cls, json_obj):
"""Create an object from a JSON object.
Args:
json_obj (json): JSON object that has all the serialized constructor
arguments.
Raises:
ValueError: when constructor arguments' type don't match.
Returns:
FasterRCNNOpenCVCallable: The deserialized FasterRCNNOpenCVCallable object.
"""
try:
kwargs = copy.copy(json_obj)
kwargs['labels'] = json_obj['labels']
kwargs['_conf_threshold'] = float(json_obj['conf_threshold'])
except ValueError as e:
raise ValueError(
'Failed to convert json object to {} instance. '
'The input json object is {}. ({})'.format(cls.__name__,
json_obj, e))
return cls(**json_obj)
def _getOutputsNames(self, net):
layersNames = net.getLayerNames()
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
def __call__(self, image):
height, width = image.shape[:2]
# resize image to correct size
im_size_min = np.min(image.shape[0:2])
im_size_max = np.max(image.shape[0:2])
im_scale = float(self._scale) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > self._max_size:
im_scale = float(self._max_size) / float(im_size_max)
im = cv2.resize(image, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
# create input data
blob = cv2.dnn.blobFromImage(im, 1, (width, height), self._pixel_means,
swapRB=False, crop=False)
imInfo = np.array([height, width, im_scale], dtype=np.float32)
self._net.setInput(blob, 'data')
self._net.setInput(imInfo, 'im_info')
# infer
outs = self._net.forward(self._getOutputsNames(self._net))
t, _ = self._net.getPerfProfile()
logger.debug('Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency()))
# postprocess
classIds = []
confidences = []
boxes = []
for out in outs:
for detection in out[0, 0]:
confidence = detection[2]
if confidence > self._conf_threshold:
left = int(detection[3])
top = int(detection[4])
right = int(detection[5])
bottom = int(detection[6])
width = right - left + 1
height = bottom - top + 1
classIds.append(int(detection[1]) - 1) # Skip background label
confidences.append(float(confidence))
boxes.append([left, top, width, height])
indices = cv2.dnn.NMSBoxes(boxes, confidences, self._conf_threshold, self._nms_threshold)
results = {}
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
classId = int(classIds[i])
confidence = confidences[i]
if self._labels[classId] not in results:
results[self._labels[classId]] = []
results[self._labels[classId]].append([left, top, left+width, top+height, confidence, classId])
logger.debug('results: {}'.format(results))
return results
| 39.162791 | 119 | 0.595903 | 796 | 6,736 | 4.878141 | 0.340452 | 0.030131 | 0.017512 | 0.013907 | 0.071079 | 0.030904 | 0 | 0 | 0 | 0 | 0 | 0.027684 | 0.297506 | 6,736 | 171 | 120 | 39.391813 | 0.792899 | 0.240647 | 0 | 0.019608 | 0 | 0 | 0.068237 | 0.006296 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068627 | false | 0 | 0.058824 | 0.009804 | 0.196078 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7ecf23a96ea53951f75cfd38e0a7ee0d80fe388 | 1,074 | py | Python | modules/ylilauta.py | jasuka/pyBot | 44094d1bf5a1c82f37cf515a6d84849dfb6a1e6f | [
"MIT"
] | 1 | 2020-10-28T07:33:55.000Z | 2020-10-28T07:33:55.000Z | modules/ylilauta.py | jasuka/pyBot | 44094d1bf5a1c82f37cf515a6d84849dfb6a1e6f | [
"MIT"
] | 2 | 2015-04-09T20:49:22.000Z | 2015-04-25T03:25:47.000Z | modules/ylilauta.py | jasuka/pyBot | 44094d1bf5a1c82f37cf515a6d84849dfb6a1e6f | [
"MIT"
] | 1 | 2020-10-28T07:37:42.000Z | 2020-10-28T07:37:42.000Z | import urllib.parse
import syscmd
import random
from bs4 import BeautifulSoup
import sysErrorLog
def ylilauta(self):
if len(self.msg) >= 4:
url = "http://ylilauta.org/satunnainen/"
try:
html = syscmd.getHtml(self, url, True )
except Exception as e:
self.errormsg = "[ERROR]-[ylilauta] ylilauta()(1) stating: {0}".format(e)
sysErrorLog.log( self ) ## LOG the error
if self.config["debug"]:
print("{0}{1}{2}".format(self.color("red"), self.errormsg, self.color("end")))
try:
try:
soup = BeautifulSoup(html, "lxml")
except:
soup = BeautifulSoup(html, "html5lib")
data = soup.findAll("div", {"class" : "postsubject"})
x = random.randrange(0,len(data))
string = "{0}: http:{1}".format(data[x].a.string, data[x].a.get('href'))
self.send_chan(' '.join(string.split()))
except Exception as e:
self.errormsg = "[ERROR]-[ylilauta] ylilauta()(2) stating: {0}".format(e)
sysErrorLog.log( self ) ## LOG the error
if self.config["debug"]:
print("{0}{1}{2}".format(self.color("red"), self.errormsg, self.color("end")))
| 33.5625 | 83 | 0.645251 | 150 | 1,074 | 4.613333 | 0.406667 | 0.069364 | 0.049133 | 0.052023 | 0.468208 | 0.468208 | 0.468208 | 0.468208 | 0.468208 | 0.320809 | 0 | 0.01768 | 0.157356 | 1,074 | 31 | 84 | 34.645161 | 0.746961 | 0.02514 | 0 | 0.37931 | 0 | 0 | 0.202495 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034483 | false | 0 | 0.172414 | 0 | 0.206897 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7ee728491b0255e70888f9368b6a96c956bd1e3 | 2,359 | py | Python | resources/generate_spirv.py | MarkY-LunarG/LunarGlobe | d32a6145eebc68ad4d7e28bdd4fab88cbdd33545 | [
"Apache-2.0"
] | 2 | 2018-06-20T15:19:38.000Z | 2018-07-13T15:13:30.000Z | resources/generate_spirv.py | MarkY-LunarG/LunarGlobe | d32a6145eebc68ad4d7e28bdd4fab88cbdd33545 | [
"Apache-2.0"
] | 25 | 2018-07-27T23:02:01.000Z | 2019-03-15T17:00:05.000Z | resources/generate_spirv.py | MarkY-LunarG/LunarGravity | d32a6145eebc68ad4d7e28bdd4fab88cbdd33545 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3 -i
#
# Copyright (c) 2018, LunarG, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import getopt
import subprocess
def main(argv):
glslang_validator = "glslangValidator"
try:
opts, args = getopt.getopt(argv,"hg:",["glslang_dir="])
except getopt.GetoptError:
print ('generate_spirv.py -g <glslang_directory>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('generate_spirv.py -g <glslang_directory>')
sys.exit()
elif opt in ("-g", "--glslang_dir"):
glslang_folder = os.path.join(arg, 'bin')
validator = os.path.join(glslang_folder, glslang_validator)
glslang_validator = validator
current_path = os.getcwd()
shader_src_subfolder = 'shaders/source'
shader_dst_subfolder = 'shaders'
shader_src_full_path = os.path.join(current_path, shader_src_subfolder)
shader_dst_full_path = os.path.join(current_path, shader_dst_subfolder)
input_dir = os.fsencode(shader_src_full_path)
for file in os.listdir(input_dir):
filename = os.fsdecode(file)
if filename.endswith("_glsl.vert"):
output_name = filename.replace('_glsl.vert', '-vs.spv')
output_file = os.path.join(shader_dst_full_path, output_name)
elif filename.endswith("_glsl.frag"):
output_name = filename.replace('_glsl.frag', '-fs.spv')
output_file = os.path.join(shader_dst_full_path, output_name)
else:
continue
input_file = os.path.join(shader_src_full_path, filename)
glslang_command = '%s -g -V -o %s %s' % (glslang_validator, output_file, input_file)
print('GLSLANG COMMAND => %s' % glslang_command)
os.system(glslang_command)
if __name__ == "__main__":
main(sys.argv[1:])
| 36.859375 | 92 | 0.671895 | 319 | 2,359 | 4.758621 | 0.413793 | 0.027668 | 0.046113 | 0.033597 | 0.221344 | 0.16996 | 0.16996 | 0.16996 | 0.123847 | 0.065876 | 0 | 0.005975 | 0.219585 | 2,359 | 63 | 93 | 37.444444 | 0.818577 | 0.243323 | 0 | 0.097561 | 0 | 0 | 0.142534 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02439 | false | 0 | 0.097561 | 0 | 0.121951 | 0.073171 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7ef11b32d20d22ca5675ad2736b825d332c3f0c | 2,998 | py | Python | viz/scI.py | ksorat/IonTrap | 8b8cbb61904f8e587b04e36b5fbb9d0bae268049 | [
"MIT"
] | null | null | null | viz/scI.py | ksorat/IonTrap | 8b8cbb61904f8e587b04e36b5fbb9d0bae268049 | [
"MIT"
] | null | null | null | viz/scI.py | ksorat/IonTrap | 8b8cbb61904f8e587b04e36b5fbb9d0bae268049 | [
"MIT"
] | null | null | null | #Show spacecraft intensity for ion injection
import kCyl as kc
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.colors import LogNorm
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
import lfmViz as lfmv
import numpy
lfmv.ppInit()
BaseP = "~/Work/IonTrap/Data/KCyl/"
IDs = ["p","Hep","Hepp","O6"]
Labs = ["H+","He+","He++","O6"]
doDelI = False #Subtract background (t=0)
doI = True
x0 = -1.0
y0 = 6.0
Nk = 100
iScl = 1.0/(4.0*np.pi)
#iScl = 1.0
figQ = 300
Sig = -1
TINY = 1.0e-8
imeth = 'linear'
#NumS = len(IDs)
NumS = 1
for ns in range(NumS):
fIn = os.path.expanduser('~') + "/Work/IonTrap/Data/KCyl/KCyl_" + IDs[ns] + ".h5"
#Interpolate from simulation
R,P,K,Tkc,I = kc.getCyl(fIn)
if (Sig>0):
I = kc.SmoothI(I,sig=Sig)
Ii = kc.GetInterp(R,P,K,Tkc,I,imeth=imeth)
kMin = K.min()
kMax = K.max()
#Have interpolant, now construct SC data
Ksc = np.logspace(np.log10(kMin),np.log10(kMax),Nk)
#Ksc = np.linspace(kMin,kMax,Nk)
r0 = np.sqrt(x0**2.0+y0**2.0)
p0 = np.arctan2(y0,x0)
Nt = Tkc.shape[0]
Isc = np.zeros((Nt,Nk))
Isc0 = np.zeros((Nt,Nk))
dK = np.zeros((Nt,Nk))
dkScl = np.ones(Nk)
iPts = np.zeros((Nk,4))
for i in range(Nt):
iPts[:,0] = r0
iPts[:,1] = p0
iPts[:,2] = Ksc
iPts[:,3] = Tkc[i]
Isc[i,:] = Ii(iPts)
Isc = iScl*Isc
Ik0 = Isc[0,:]
#Ind = Ik0<TINY
dK0 = Ik0
#dK0[Ind] = 1.0
#dkScl[Ind] = 0.0
for i in range(Nt):
Isc0[i,:] = Isc[i,:] - Ik0
dK[i,:] = Isc[i,:]/dK0
#Now make figures
vMin = 1.0e+0
vMax = 1.0e+5
cMap = "jet"
vNorm = LogNorm(vmin=vMin,vmax=vMax)
Tkc = Tkc-Tkc.min()
#Ax = plt.gca()
#Ax.set_axis_bgcolor('black')
#Ax.patch.set_facecolor('black')
if (doDelI):
plt.pcolormesh(Tkc,Ksc,Isc0.T,norm=vNorm,cmap=cMap)
plt.yscale('log')
plt.ylim([50,1.0e+3])
plt.colorbar()
fOut = "dI_"+IDs[ns]+".png"
print("Writing figure %s"%(fOut))
plt.savefig(fOut,dpi=figQ)
plt.close('all')
if (doI):
plt.close('all')
plt.rc_context({'axes.edgecolor':'cyan', 'xtick.color':'cyan', 'ytick.color':'cyan', 'figure.facecolor':'cyan'})
fig = plt.figure(0)
fig.patch.set_facecolor('black')
plt.pcolormesh(Tkc,Ksc,Isc.T,norm=vNorm,cmap=cMap)
Ax = plt.gca()
Ax.set_axis_bgcolor('black')
plt.xlabel("Time [s]",fontsize="large")
plt.ylabel("Energy [keV]",fontsize="large")
plt.title("Intensity, %s"%(Labs[ns]),fontsize="large")
plt.yscale('log')
plt.ylim([50,1.0e+3])
plt.xlim([Ksc.min(),Ksc.max()])
cb = plt.colorbar()
cb.set_label("Intensity\ns-1 cm-2 keV-1 ster-1",fontsize='large',color='cyan')
Ax.xaxis.label.set_color('cyan')
Ax.yaxis.label.set_color('cyan')
#V = [1.2,2,5,10,20,50,100]
#V = [1.5,2,4,5,6,7,8,9,10]
#V = [10,50,100,500]
#V = [25,50]
#print(V)
#CS = Ax.contour(Tkc,Ksc,dK.T,V,colors='k')
#plt.clabel(CS,inline=1,fontsize=10)
fOut = "I_"+IDs[ns]+".png"
print("Writing figure %s"%(fOut))
plt.savefig(fOut,dpi=figQ,facecolor='black')
plt.close('all')
| 22.373134 | 114 | 0.626751 | 534 | 2,998 | 3.494382 | 0.342697 | 0.008039 | 0.014469 | 0.017685 | 0.157556 | 0.116827 | 0.116827 | 0.116827 | 0.085745 | 0.085745 | 0 | 0.050533 | 0.155103 | 2,998 | 133 | 115 | 22.541353 | 0.686143 | 0.163442 | 0 | 0.122222 | 0 | 0 | 0.13269 | 0.021713 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7efeac023f5c4457b3bd7654a38b9e883dc59bf | 10,149 | py | Python | persistor/storage_utils/utils_storage.py | syntio/aquarium-persistor-azure | 0dd5f390885b78ae670ea6b0362d93b9bbaa91c2 | [
"Apache-2.0"
] | 1 | 2020-12-14T15:41:35.000Z | 2020-12-14T15:41:35.000Z | persistor/storage_utils/utils_storage.py | syntio/aquarium-persistor-azure | 0dd5f390885b78ae670ea6b0362d93b9bbaa91c2 | [
"Apache-2.0"
] | null | null | null | persistor/storage_utils/utils_storage.py | syntio/aquarium-persistor-azure | 0dd5f390885b78ae670ea6b0362d93b9bbaa91c2 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Syntio Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains all of the utility functions required for saving Blobs (Block or Append) to Azure Storage.
"""
import asyncio
import datetime
import json
import logging
import uuid
from typing import Callable, Generator, Dict, List, Union, Optional
from azure.eventhub import EventData
from azure.functions import (
EventGridEvent,
EventHubEvent,
ServiceBusMessage,
)
from azure.servicebus import Message
from azure.storage.blob.aio import ContainerClient
from ..custom_exceptions.persistor_exceptions import StorageTypeConfigurationException
STORE_RETRY_POLICY_MAX = 3
STORE_RETRY_BACKOFF_TIME = 0.5
def form_data_af_event_grid(
event: EventGridEvent,
*args,
):
"""
Used to extract the payload from an Event Grid Event.
:param event: An Event Grid Event.
:return: A JSON-formatted dictionary; its "DATA" field contains the event payload.
"""
return {"DATA": event.get_json()}
def form_data_af_event_hub_push(
event: EventHubEvent,
get_metadata=False,
):
"""
Used to extract the payload from an Event Hub Event. (PUSH variant.)
:param event: An Event Hub Event.
:param get_metadata: Flag determining whether or not to extract the metadata from the Event Hub.
:return: A JSON-formatted dictionary; its "DATA" field contains the event payload.
"""
payload = event.get_body().decode("utf-8")
metadata = None
if get_metadata:
metadata = event.metadata
return form_store(payload, metadata)
def form_data_af_service_bus_push(
msg: ServiceBusMessage,
get_metadata=False,
):
"""
Used to extract the message payload from a Service Bus message. (PUSH variant.)
:param msg: Service Bus message.
:param get_metadata: Flag determining whether or not to extract the user_properties.
:return: A JSON-formatted dictionary containing the "DATA" and (if extracted) "METADATA" fields.
"""
payload = msg.get_body().decode("utf-8")
metadata = None
if get_metadata:
metadata = msg.user_properties
return form_store(payload, metadata)
def form_data_af_event_hub_pull(
event: EventData,
get_metadata=False,
):
"""
Used to extract the message from an EventData object. (Event Hub PULL variant.)
:param event: EventData object from which the payload will be extracted from.
:param get_metadata: Flag determining whether to retrieve metadata from the properties attribute.
:return: A JSON-formatted dictionary containing the "DATA" and (if extracted) "METADATA" fields.
"""
payload = event.body_as_str()
metadata = None
# For some unknown the official Microsoft documentation does not disclose that, when
# manually retrieving messages from the Event Hub, it encodes both the keys and values
# of custom properties into bytes, unlike its binding variation.
if get_metadata:
metadata = event.properties
if metadata:
metadata = {k.decode("utf-8"): metadata[k].decode("utf-8") for k in metadata}
return form_store(payload, metadata)
def form_data_af_service_bus_pull(
msg: Message,
get_metadata=False,
):
"""
Used to extract the message payload from a Service Bus message. (PULL variant.)
:param msg: Service Bus message.
:param get_metadata: Flag determining whether or not to extract the user_properties.
:return: A JSON-formatted dictionary containing the "DATA" and (if extracted) "METADATA" fields.
"""
payload = msg.body
metadata = None
if isinstance(payload, Generator):
message_body = bytearray()
for payload_bytes in payload:
message_body.extend(bytes(payload_bytes))
payload = message_body
payload = payload.decode("utf-8")
# For some unknown the official Microsoft documentation does not disclose that, when
# manually retrieving messages from Service Bus, it encodes both the keys and values of custom
# properties into bytes, unlike its binding variation.
if get_metadata:
metadata = msg.user_properties
if metadata:
metadata = {k.decode("utf-8"): metadata[k].decode("utf-8") for k in metadata}
return form_store(payload, metadata)
def form_store(
payload: Union[Dict, str],
metadata: Union[Dict, str, None],
):
"""
Processes the payload and the metadata (if any exists) into a JSON-like format.
:param payload: A string object or dictionary containing the message data.
:param metadata: A dictionary containing the message metadata.
:return: A dictionary with "DATA" and (optionally) "METADATA" fields.
"""
data = {"DATA": payload}
if metadata:
data["METADATA"] = metadata
return data
def generate_file_name(
store_param: str,
blob_name: Optional[str] = None,
):
"""
Generates the file name string for a blob.
:param store_param: The main folder in the container to store the file in.
:param blob_name: Name of the blob itself.
:return: Generated file name.
"""
if not blob_name:
blob_name = str(uuid.uuid4())
now = datetime.datetime.now()
file_name = "{store_param}/{year}/{month}/{day}/{blob_name}.txt".format(
store_param=store_param,
year=str(now.year),
month=str(now.month),
day=str(now.day),
blob_name=blob_name,
)
return file_name
def form_blob_json_string(
msg: Union[EventGridEvent, EventHubEvent, ServiceBusMessage, EventData, Message],
get_metadata: bool,
form_func: Callable,
):
"""
Create a blob JSON string from a message and the form function to process it with.
:param msg: Message/event to store.
:param get_metadata: Boolean determining whether or not to retrieve metadata from the message (if possible)
:param form_func: Function to extract the payload from the message/event object.
:return: JSON-string containing the data and metadata information.
"""
return json.dumps(form_func(msg, get_metadata))
async def save_to_storage(
data: List[str],
container_client: ContainerClient,
store_param: str,
append=False,
file_append_name: Optional[str] = None,
):
"""
Saves message to storage.
:param data: List of data be stored.
:param container_client: Blob service client (initialized outside this function).
:param store_param: The main folder in the container to store the file in.
:param append: Flag to determine whether the data should be appended to an append blob.
:param file_append_name: Name of the append blob to store to. Ignored if append is False.
:return: Name of the blob stored to and result
"""
# Success flag.
result = False
# Get the blob file name and the data (string) to store.
if not append:
file_name = generate_file_name(store_param=store_param)
else:
file_name = None
# If the file_name is None, we should be using the append blob name.
# If the append blob name is not given, an exception is raised.
if not file_name:
if not file_append_name:
raise StorageTypeConfigurationException("SET BLOB TO 'APPEND', YET NO FILE GIVEN FOR THE APPEND BLOB!")
file_name = file_append_name
# Store the data utilizing a simple retry policy.
# In truth, the Blob Client we're using already uses an ExponentialRetry mechanic. This is
# merely an additional fail-safe to it, in case the library at some point changes some of
# the default retry parameters or the save load per second is far bigger than expected.
# In addition, on the off-chance the user is using the TIMED_APPEND option, this retry loop helps with
# potential concurrency issues. If the function manages to get to this point without an append blob
# existing, this loop will give enough time for it to be created in the meantime and ensure a successful
# store.
# In practice, this loop will not execute more than once.
for i in range(STORE_RETRY_POLICY_MAX):
try:
# We include getting the blob client in the retry, due to the fact we likely
# need to renew the lease for the blob.
blob_client = container_client.get_blob_client(
file_name,
)
if append:
store_method = blob_client.append_block
else:
store_method = blob_client.upload_blob
async with blob_client:
await store_method("\n".join(data))
# Set the result to true.
result = True
# Escape the retry loop.
break
# Currently set to catch a general exception, seeing as how the documentation doesn't
# actually state the possible exceptions that could occur during these processes.
except Exception as exc:
if i == STORE_RETRY_POLICY_MAX - 1:
logging.error(
"FAILED TO SAVE TO STORAGE! | PATH: %s | FAILED MESSAGE CONTENTS: %s | EXCEPTION %s",
file_name,
data,
str(exc),
)
else:
logging.warning(
"FAILED TO SAVE TO STORAGE! | PATH: %s | RETRYING... (ATTEMPT NO. %s)",
file_name,
str(i + 1),
)
await asyncio.sleep(STORE_RETRY_BACKOFF_TIME)
return file_name, result
| 32.84466 | 115 | 0.673268 | 1,355 | 10,149 | 4.941697 | 0.239114 | 0.019116 | 0.016129 | 0.009707 | 0.326314 | 0.30003 | 0.290024 | 0.276284 | 0.260305 | 0.253883 | 0 | 0.002781 | 0.255986 | 10,149 | 308 | 116 | 32.951299 | 0.883989 | 0.450586 | 0 | 0.28169 | 0 | 0.014085 | 0.064763 | 0.010346 | 0 | 0 | 0 | 0 | 0 | 1 | 0.056338 | false | 0 | 0.077465 | 0 | 0.197183 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7f0c91fcc31d3e630bc67b1f9a48dda812a5f9d | 1,599 | py | Python | StringFunctions/lambda/macro.py | igorlg/aws-cloudformation-macros | 1fa2898ae8b5b1b420dc05a028417edc10e5943a | [
"Apache-2.0"
] | null | null | null | StringFunctions/lambda/macro.py | igorlg/aws-cloudformation-macros | 1fa2898ae8b5b1b420dc05a028417edc10e5943a | [
"Apache-2.0"
] | null | null | null | StringFunctions/lambda/macro.py | igorlg/aws-cloudformation-macros | 1fa2898ae8b5b1b420dc05a028417edc10e5943a | [
"Apache-2.0"
] | null | null | null | import traceback
def handler(event, _):
response = {
"requestId": event["requestId"],
"status": "success"
}
try:
operation = event["params"]["Operation"]
input = event["params"]["InputString"]
no_param_string_funcs = ["Upper", "Lower", "Capitalize", "Title", "SwapCase"]
if operation in no_param_string_funcs:
response["fragment"] = getattr(input, operation.lower())()
elif operation == "Strip":
response["fragment"] = op_strip(event)
elif operation == "Replace":
response["fragment"] = op_replace(event)
elif operation == "MaxLength":
response["fragment"] = op_max_length(event)
else:
response["status"] = "failure"
except Exception as e:
traceback.print_exc()
response["status"] = "failure"
response["errorMessage"] = str(e)
return response
def op_strip(event):
chars = None
input = event["params"]["InputString"]
if "Chars" in event["params"]:
chars = event["params"]["Chars"]
return input.strip(chars)
def op_replace(event):
return (
event["params"]["InputString"]
.replace(
event['params']['Old'],
event['params']['New'])
)
def op_max_length(event):
input = event["params"]["InputString"]
length = int(event["params"]["Length"])
strip_from = event["params"]["StripFrom"]
if len(input) <= length:
return input
if strip_from == 'Right':
return input[:length]
return input[len(input)-length:]
| 28.553571 | 85 | 0.577236 | 162 | 1,599 | 5.58642 | 0.333333 | 0.133702 | 0.097238 | 0.089503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.267667 | 1,599 | 55 | 86 | 29.072727 | 0.772844 | 0 | 0 | 0.106383 | 0 | 0 | 0.193871 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085106 | false | 0 | 0.021277 | 0.021277 | 0.234043 | 0.021277 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7f2aa7a4185b92aaf96c5933915d4b759b6711d | 863 | py | Python | sknn/tests/test_layers.py | hero9968/scikit-neuralnetwork | b7fd0c089bd7c721c4d9cf9ca71eed74c6bafc5e | [
"BSD-3-Clause"
] | 1,375 | 2015-03-31T22:20:00.000Z | 2022-03-25T07:50:46.000Z | sknn/tests/test_layers.py | hero9968/scikit-neuralnetwork | b7fd0c089bd7c721c4d9cf9ca71eed74c6bafc5e | [
"BSD-3-Clause"
] | 222 | 2015-04-03T16:25:59.000Z | 2021-05-13T15:38:51.000Z | sknn/tests/test_layers.py | hero9968/scikit-neuralnetwork | b7fd0c089bd7c721c4d9cf9ca71eed74c6bafc5e | [
"BSD-3-Clause"
] | 284 | 2015-04-03T18:24:21.000Z | 2021-09-14T16:08:28.000Z | import unittest
from nose.tools import (assert_equal, assert_raises, assert_in, assert_not_in)
from sknn.mlp import Regressor as MLPR
from sknn.mlp import Layer as L
class TestNestedParameters(unittest.TestCase):
def test_GetParamsIncludesLayers(self):
nn = MLPR(layers=[L("Linear", units=123)])
p = nn.get_params()
assert_in('output', p)
def test_GetParamsMissingLayer(self):
nn = MLPR(layers=[L("Linear", units=123)])
p = nn.get_params()
assert_not_in('hidden0', p)
def test_SetParamsDoubleUnderscore(self):
nn = MLPR(layers=[L("Linear", units=123)])
nn.set_params(output__units=456)
assert_equal(nn.layers[0].units, 456)
def test_SetParamsValueError(self):
nn = MLPR(layers=[L("Linear")])
assert_raises(ValueError, nn.set_params, output__range=1.0)
| 30.821429 | 78 | 0.676709 | 115 | 863 | 4.886957 | 0.373913 | 0.049822 | 0.071174 | 0.113879 | 0.270463 | 0.270463 | 0.229537 | 0.229537 | 0.174377 | 0.174377 | 0 | 0.027536 | 0.200464 | 863 | 27 | 79 | 31.962963 | 0.786957 | 0 | 0 | 0.25 | 0 | 0 | 0.042874 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.45 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7f40253017dfd0d80f06d5013d0605f3e3c992e | 2,932 | py | Python | pip_services3_expressions-3.3.4/pip_services3_expressions/variants/TypeSafeVariantOperations.py | pip-services3-python/pip-services3-expressions-python | 4ea237fbbba32e62f920e6be3bd48e6cc02184e5 | [
"MIT"
] | null | null | null | pip_services3_expressions-3.3.4/pip_services3_expressions/variants/TypeSafeVariantOperations.py | pip-services3-python/pip-services3-expressions-python | 4ea237fbbba32e62f920e6be3bd48e6cc02184e5 | [
"MIT"
] | null | null | null | pip_services3_expressions-3.3.4/pip_services3_expressions/variants/TypeSafeVariantOperations.py | pip-services3-python/pip-services3-expressions-python | 4ea237fbbba32e62f920e6be3bd48e6cc02184e5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from .AbstractVariantOperations import AbstractVariantOperations
from .Variant import Variant
from .VariantType import VariantType
class TypeSafeVariantOperations(AbstractVariantOperations):
"""
Implements a strongly typed (type safe) variant operations manager object.
"""
def convert(self, value: Variant, new_type: VariantType) -> Variant:
"""
Converts variant to specified type
:param value: A variant value to be converted.
:param new_type: A type of object to be returned.
:return: A converted Variant value.
"""
if new_type == VariantType.Null:
result = Variant()
return result
if new_type == value.type or new_type == VariantType.Object:
return value
if value.type == VariantType.Integer:
return self.__convert_from_integer(value, new_type)
if value.type == VariantType.Long:
return self.__convert_from_long(value, new_type)
if value.type == VariantType.Float:
return self.__convert_from_float(value, new_type)
if value.type == VariantType.Object:
return value
raise Exception(f"Variant convertion from {self._type_to_string(value.type)} " +
f"to {self._type_to_string(new_type)} is not supported.")
def __convert_from_integer(self, value: Variant, new_type: VariantType) -> Variant:
result = Variant()
if new_type == VariantType.Long:
result.as_long = value.as_integer
return result
elif new_type == VariantType.Float:
result.as_float = value.as_integer
return result
elif new_type == VariantType.Double:
result.as_double = value.as_integer
return result
raise Exception(f"Variant convertion from {self._type_to_string(value.type)} " +
f" to {self._type_to_string(new_type)} is not supported.")
def __convert_from_long(self, value: Variant, new_type: VariantType) -> Variant:
result = Variant()
if new_type == VariantType.Float:
result.as_float = value.as_long
return result
elif new_type == VariantType.Double:
result.as_double = value.as_long
return result
raise Exception(f"Variant convertion from {self._type_to_string(value.type)} " +
f" to {self._type_to_string(new_type)} is not supported.")
def __convert_from_float(self, value: Variant, new_type: VariantType) -> Variant:
result = Variant()
if new_type == VariantType.Double:
result.as_double = value.as_float
return result
raise Exception(f"Variant convertion from {self._type_to_string(value.type)} " +
f" to {self._type_to_string(new_type)} is not supported.")
| 38.077922 | 88 | 0.633015 | 341 | 2,932 | 5.205279 | 0.14956 | 0.082817 | 0.12169 | 0.072113 | 0.656338 | 0.620282 | 0.620282 | 0.539718 | 0.539718 | 0.465352 | 0 | 0.000475 | 0.282401 | 2,932 | 76 | 89 | 38.578947 | 0.843156 | 0.090723 | 0 | 0.46 | 0 | 0 | 0.173796 | 0.101734 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.06 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7f76daf62caf1aa9e14aa853c094680c91b01fe | 3,866 | py | Python | moto/cloudwatch/models.py | IlyaSukhanov/moto | a37838b6386a98433a2d4beb14b2abae616185c4 | [
"Apache-2.0"
] | null | null | null | moto/cloudwatch/models.py | IlyaSukhanov/moto | a37838b6386a98433a2d4beb14b2abae616185c4 | [
"Apache-2.0"
] | null | null | null | moto/cloudwatch/models.py | IlyaSukhanov/moto | a37838b6386a98433a2d4beb14b2abae616185c4 | [
"Apache-2.0"
] | null | null | null | from moto.core import BaseBackend
import boto.ec2.cloudwatch
import datetime
class Dimension(object):
def __init__(self, name, value):
self.name = name
self.value = value
class FakeAlarm(object):
def __init__(self, name, namespace, metric_name, comparison_operator, evaluation_periods,
period, threshold, statistic, description, dimensions, alarm_actions,
ok_actions, insufficient_data_actions, unit):
self.name = name
self.namespace = namespace
self.metric_name = metric_name
self.comparison_operator = comparison_operator
self.evaluation_periods = evaluation_periods
self.period = period
self.threshold = threshold
self.statistic = statistic
self.description = description
self.dimensions = [Dimension(dimension['name'], dimension['value']) for dimension in dimensions]
self.alarm_actions = alarm_actions
self.ok_actions = ok_actions
self.insufficient_data_actions = insufficient_data_actions
self.unit = unit
self.state_updated_timestamp = datetime.datetime.now()
self.configuration_updated_timestamp = datetime.datetime.now()
class MetricDatum(object):
def __init__(self, namespace, name, value, dimensions):
self.namespace = namespace
self.name = name
self.value = value
self.dimensions = [Dimension(dimension['name'], dimension['value']) for dimension in dimensions]
class CloudWatchBackend(BaseBackend):
def __init__(self):
self.alarms = {}
self.metric_data = []
def put_metric_alarm(self, name, namespace, metric_name, comparison_operator, evaluation_periods,
period, threshold, statistic, description, dimensions,
alarm_actions, ok_actions, insufficient_data_actions, unit):
alarm = FakeAlarm(name, namespace, metric_name, comparison_operator, evaluation_periods, period,
threshold, statistic, description, dimensions, alarm_actions,
ok_actions, insufficient_data_actions, unit)
self.alarms[name] = alarm
return alarm
def get_all_alarms(self):
return self.alarms.values()
@staticmethod
def _list_element_starts_with(items, needle):
"""True of any of the list elements starts with needle"""
for item in items:
if item.startswith(needle):
return True
return False
def get_alarms_by_action_prefix(self, action_prefix):
return [
alarm
for alarm in self.alarms.values()
if CloudWatchBackend._list_element_starts_with(
alarm.alarm_actions, action_prefix
)
]
def get_alarms_by_alarm_name_prefix(self, name_prefix):
return [
alarm
for alarm in self.alarms.values()
if alarm.name.startswith(name_prefix)
]
def get_alarms_by_alarm_names(self, alarm_names):
return [
alarm
for alarm in self.alarms.values()
if alarm.name in alarm_names
]
def get_alarms_by_state_value(self, state):
raise NotImplementedError(
"DescribeAlarm by state is not implemented in moto."
)
def delete_alarms(self, alarm_names):
for alarm_name in alarm_names:
self.alarms.pop(alarm_name, None)
def put_metric_data(self, namespace, metric_data):
for name, value, dimensions in metric_data:
self.metric_data.append(MetricDatum(namespace, name, value, dimensions))
def get_all_metrics(self):
return self.metric_data
cloudwatch_backends = {}
for region in boto.ec2.cloudwatch.regions():
cloudwatch_backends[region.name] = CloudWatchBackend()
| 34.828829 | 104 | 0.655717 | 423 | 3,866 | 5.739953 | 0.198582 | 0.023064 | 0.047364 | 0.049423 | 0.413097 | 0.356672 | 0.314662 | 0.314662 | 0.314662 | 0.314662 | 0 | 0.000708 | 0.269788 | 3,866 | 110 | 105 | 35.145455 | 0.859369 | 0.013192 | 0 | 0.204545 | 0 | 0 | 0.017852 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.159091 | false | 0 | 0.034091 | 0.056818 | 0.329545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7f827b1845202a8df966ed66c4dad3aaef84f11 | 3,286 | py | Python | swords/methods/rogets.py | p-lambda/swords | 04ca75370d0ce098a7f4db68240fc8e79a4f7b3b | [
"CC-BY-3.0"
] | 25 | 2021-05-24T06:54:45.000Z | 2022-03-18T15:30:39.000Z | swords/methods/rogets.py | p-lambda/swords | 04ca75370d0ce098a7f4db68240fc8e79a4f7b3b | [
"CC-BY-3.0"
] | 2 | 2021-06-11T02:39:47.000Z | 2021-09-20T15:06:46.000Z | swords/methods/rogets.py | p-lambda/swords | 04ca75370d0ce098a7f4db68240fc8e79a4f7b3b | [
"CC-BY-3.0"
] | 2 | 2021-11-19T09:06:30.000Z | 2022-03-24T18:31:40.000Z | from collections import defaultdict
from functools import lru_cache
import pickle
from ..assets import ASSETS
from ..datasets import get_dataset
from ..lemma import lemmatize
from .. import Pos, LexSubDataset
from . import LexSubGenerator, LexSubWithTargetPosGenerator
_ROGET_POS_TO_POS = {
'v': Pos.VERB,
'n': Pos.NOUN,
'adj': Pos.ADJ,
'adv': Pos.ADV
}
@lru_cache(maxsize=1)
def rogets_lemma_to_senses():
with open(ASSETS['rogets']['fp'], 'rb') as f:
d = pickle.load(f)
lemma_to_senses = defaultdict(list)
for (lemma, pos), v in d.items():
assert lemma.strip() == lemma
assert lemma.lower() == lemma
pos = pos.split('/')
pos = [_ROGET_POS_TO_POS[''.join(c for c in p if c.isalpha())] for p in pos]
lemma_to_senses[lemma].append({
'pos': pos,
'substitutes': v['results']
})
return lemma_to_senses
class RogetsThesaurusRawGenerator(LexSubGenerator):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._lemma_to_senses = rogets_lemma_to_senses()
def substitutes_will_be_lemmatized(self):
return False
def generate(self, context, target, target_offset, target_pos=None):
target = target.lower()
if target not in self._lemma_to_senses:
raise ValueError()
senses = self._lemma_to_senses[target]
subs = []
for sense in senses:
subs.extend(sense['substitutes'])
return [(sub, -i) for i, sub in enumerate(subs)]
class RogetsThesaurusWithTargetLemmatizationAndPosFilteringGenerator(LexSubWithTargetPosGenerator):
def __init__(self, *args, pos_tag_strategy='nltk', lemmatize_strategy='nltk', **kwargs):
super().__init__(*args, pos_tag_strategy=pos_tag_strategy, **kwargs)
self.lemmatize_strategy = lemmatize_strategy
self._lemma_to_senses = rogets_lemma_to_senses()
self._swords_dev = get_dataset('swords-latest_dev')
self._swords_test = get_dataset('swords-latest_test')
def substitutes_will_be_lemmatized(self):
return True
def generate_with_target_pos(self, context, target, target_offset, target_pos):
# Lemmatize (using "ground truth" from SWORDS)
cid = LexSubDataset.context_id(LexSubDataset.create_context(context))
tid = LexSubDataset.target_id(LexSubDataset.create_target(cid, target, target_offset, pos=target_pos))
split = None
for d in [self._swords_dev, self._swords_test]:
if d.has_target(tid):
split = d
break
if split is not None:
target_lemmatized = split.get_target(tid)['extra']['coinco_lemma']
else:
assert False
target_lemmatized = lemmatize(target, target_pos=target_pos, strategy=self.lemmatize_strategy).lower()
if target_lemmatized not in self._lemma_to_senses:
raise ValueError()
senses = self._lemma_to_senses[target_lemmatized]
subs = []
for sense in senses:
for sub in sense['substitutes']:
subs.append((sense['pos'], sub))
substitutes = [(sub, -i) for i, (pos, sub) in enumerate(subs) if target_pos in pos]
return substitutes
| 37.340909 | 114 | 0.658247 | 400 | 3,286 | 5.13 | 0.2575 | 0.040936 | 0.076023 | 0.049708 | 0.194932 | 0.175439 | 0.175439 | 0.099415 | 0.064327 | 0.064327 | 0 | 0.000399 | 0.237066 | 3,286 | 87 | 115 | 37.770115 | 0.818109 | 0.01339 | 0 | 0.133333 | 0 | 0 | 0.03858 | 0 | 0 | 0 | 0 | 0 | 0.04 | 1 | 0.093333 | false | 0 | 0.106667 | 0.026667 | 0.293333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7fc1beb7ba2562a8b4f7a17aa4fee17ce4220af | 964 | py | Python | dnn/scripts/gen_elemwise_each_mode.py | Olalaye/MegEngine | 695d24f24517536e6544b07936d189dbc031bbce | [
"Apache-2.0"
] | 5,168 | 2020-03-19T06:10:04.000Z | 2022-03-31T11:11:54.000Z | dnn/scripts/gen_elemwise_each_mode.py | Olalaye/MegEngine | 695d24f24517536e6544b07936d189dbc031bbce | [
"Apache-2.0"
] | 286 | 2020-03-25T01:36:23.000Z | 2022-03-31T10:26:33.000Z | dnn/scripts/gen_elemwise_each_mode.py | Olalaye/MegEngine | 695d24f24517536e6544b07936d189dbc031bbce | [
"Apache-2.0"
] | 515 | 2020-03-19T06:10:05.000Z | 2022-03-30T09:15:59.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import argparse
from gen_elemwise_utils import ARITIES, MODES
def main():
parser = argparse.ArgumentParser(
description='generate elemwise each mode',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('output', help='output directory')
args = parser.parse_args()
with open(args.output, 'w') as fout:
w = lambda s: print(s, file=fout)
w('// generated by gen_elemwise_each_mode.py')
keys = list(MODES.keys())
keys.sort()
for (anum, ctype) in keys:
w('#define MEGDNN_FOREACH_ELEMWISE_MODE_{}_{}(cb) \\'.format(
ARITIES[anum], ctype))
for mode in MODES[(anum, ctype)]:
w(' MEGDNN_ELEMWISE_MODE_ENABLE({}, cb) \\'.format(mode))
w('')
print('generated each_mode.inl')
os.utime(args.output)
if __name__ == '__main__':
main()
| 27.542857 | 76 | 0.610996 | 115 | 964 | 4.904348 | 0.547826 | 0.042553 | 0.056738 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00277 | 0.251037 | 964 | 34 | 77 | 28.352941 | 0.778393 | 0.044606 | 0 | 0 | 0 | 0 | 0.231774 | 0.102285 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.125 | 0 | 0.166667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d7fc5274536085a9dcfa9f37964e7791292221e4 | 3,711 | py | Python | utils/guts_analysis/create_mask.py | bioimage-analysis/AparnaGutAnalysis | e83d180377a796be3cb664b070704f35be830728 | [
"MIT"
] | null | null | null | utils/guts_analysis/create_mask.py | bioimage-analysis/AparnaGutAnalysis | e83d180377a796be3cb664b070704f35be830728 | [
"MIT"
] | null | null | null | utils/guts_analysis/create_mask.py | bioimage-analysis/AparnaGutAnalysis | e83d180377a796be3cb664b070704f35be830728 | [
"MIT"
] | null | null | null | from skimage import io, filters, feature, measure, morphology, util
import numpy as np
from skimage.draw import polygon
from scipy import ndimage
from skimage.external.tifffile import TiffFile
import re
import alphashape
from joblib import Parallel, delayed
def bounding_box(viewer, img, DAPI_Ch=0):
z,x,y = img.shape[0], img.shape[2], img.shape[3]
mask = np.zeros((x,y), dtype = np.bool)
rr, cc = polygon(viewer.layers[1].data[0][:,0], viewer.layers[1].data[0][:,1], mask.shape)
mask[rr, cc] = 1
mask_3D = np.zeros((z,x,y), dtype = np.bool)
mask_3D[:] = mask
DAPI = img[:,DAPI_Ch,:,:]
DAPI[~mask_3D] = 0
bbox = ndimage.find_objects(mask_3D)
DAPI_roi = DAPI[bbox[0]]
return DAPI_roi, bbox
def _metadata(file):
with TiffFile(file) as tif:
meta = tif.info()
metadata = {}
for line in meta.splitlines():
_, _, key_x = line.partition('x_resolution (2I)')
_, _, key_y = line.partition('y_resolution (2I)')
_, _, key_z = line.partition('spacing:')
_, _, key_unit = line.partition('unit:')
if key_x:
x_data = [int(x.group()) for x in re.finditer(r'\d+', key_x)]
x_resolution = 1/(x_data[0]/x_data[1])
metadata['x_resolution'] = x_resolution
if key_y:
y_data = [int(x.group()) for x in re.finditer(r'\d+', key_y)]
y_resolution = 1/(y_data[0]/y_data[1])
metadata['y_resolution'] = y_resolution
if key_z:
metadata['z_resolution'] = float(key_z)
if key_unit:
metadata['unit'] = key_unit
return metadata
def _gaussian_blur(file, DAPI_roi):
metadata = _metadata(file)
# The microscope reports the following spacing
original_spacing = np.array([metadata['z_resolution'], metadata['x_resolution'], metadata['y_resolution']])
base_sigma = 2.0
sigma = base_sigma / original_spacing
gaussian_to_seg = filters.gaussian(DAPI_roi, multichannel=False, sigma=sigma)
return gaussian_to_seg
def _roll_ball(file, DAPI_roi, size =20):
blured = _gaussian_blur(file, DAPI_roi)
result = np.empty(blured.shape)
# Background substraction
background = ndimage.minimum_filter(blured, size = 8)
result = blured-background
return(result)
def _binary(file, DAPI_roi, size =20):
back_sub = _roll_ball(file, DAPI_roi, size =20)
threshold_triangle = filters.threshold_triangle(back_sub)
binary_DAPI = back_sub > threshold_triangle
return binary_DAPI, back_sub
def _create_mask(binary_data, slices = 0):
result = np.zeros(binary_data[slices].shape)
coord_x, coord_y = np.where(binary_data[slices]>0)
lst_pts = np.concatenate((coord_x[:, np.newaxis], coord_y[:, np.newaxis]), axis=1)
alpha_shape = alphashape.alphashape(lst_pts[::16], 0.1)
if alpha_shape.type == 'MultiPolygon':
for alpha in alpha_shape:
if alpha.area > 20000:
x, y = alpha.exterior.coords.xy
rr, cc = polygon(np.asarray(x, dtype=np.int),np.asarray(y, dtype=np.int))
result[rr, cc] = 1
elif alpha_shape.type == 'GeometryCollection':
pass
else:
x, y = alpha_shape.exterior.coords.xy
rr, cc = polygon(np.asarray(x, dtype=np.int),np.asarray(y, dtype=np.int))
result[rr, cc] = 1
return result
def mask_guts(file, DAPI_roi, size =20):
binary_DAPI, back_sub = _binary(file, DAPI_roi, size =20)
res_paral = Parallel(n_jobs=-1)(delayed(_create_mask)(binary_DAPI, slices) for slices in range(len(binary_DAPI)))
mask = np.asarray(res_paral)
border = mask - morphology.erosion(mask, morphology.ball(1))
return mask, border, back_sub
| 36.029126 | 117 | 0.64996 | 538 | 3,711 | 4.273234 | 0.256506 | 0.030448 | 0.033493 | 0.032623 | 0.193562 | 0.139191 | 0.119182 | 0.097434 | 0.097434 | 0.097434 | 0 | 0.018634 | 0.219078 | 3,711 | 102 | 118 | 36.382353 | 0.774672 | 0.018324 | 0 | 0.047619 | 0 | 0 | 0.043681 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0.011905 | 0.095238 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc0039b711d25b9f0ef76267989b26383943ff68 | 2,670 | py | Python | carbrain/perception/object_detection/metrics.py | ssudholt/carbrain | a5b60449179c76f49207ce9daa4046149856c040 | [
"MIT"
] | null | null | null | carbrain/perception/object_detection/metrics.py | ssudholt/carbrain | a5b60449179c76f49207ce9daa4046149856c040 | [
"MIT"
] | null | null | null | carbrain/perception/object_detection/metrics.py | ssudholt/carbrain | a5b60449179c76f49207ce9daa4046149856c040 | [
"MIT"
] | null | null | null | """
Code adapted from https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/
"""
import numpy as np
import torch
def intersection_over_union(bb_a, bb_b):
"""Compute the intersection over union between two sets of bounding boxes
For computing the IoU, the edges of the bounding boxes are assumed to be
part of the object as well.
Args:
bb_a (BoundingBoxes): First set of bounding boxes.
bb_b (BoundingBoxes): Second set of bounding boxes.
Returns:
torch.tensor: A matrix of IoU values of shape (bb_a.shape[0],
bb_b.shape[0]). The matrix contains all pairwise IoU values
for all bounding boxes in bb_a and bb_b.
"""
with torch.no_grad():
orig_encoding_a = bb_a.encoding
orig_encoding_b = bb_b.encoding
orig_encoding_params_a = bb_a.encoding_params
orig_encoding_params_b = bb_b.encoding_params
bb_a.decode()
bb_b.decode()
ullr_a = bb_a.coords.numpy()
ullr_b = bb_b.coords.numpy()
ul_x_a = np.tile(ullr_a[:, 0].reshape(-1, 1), (1, ullr_b.shape[0]))
ul_y_a = np.tile(ullr_a[:, 1].reshape(-1, 1), (1, ullr_b.shape[0]))
lr_x_a = np.tile(ullr_a[:, 2].reshape(-1, 1), (1, ullr_b.shape[0]))
lr_y_a = np.tile(ullr_a[:, 3].reshape(-1, 1), (1, ullr_b.shape[0]))
ul_x_b = np.tile(ullr_b[:, 0], (ullr_a.shape[0], 1))
ul_y_b = np.tile(ullr_b[:, 1], (ullr_a.shape[0], 1))
lr_x_b = np.tile(ullr_b[:, 2], (ullr_a.shape[0], 1))
lr_y_b = np.tile(ullr_b[:, 3], (ullr_a.shape[0], 1))
# determine the (x, y)-coordinates of the intersection rectangle
xA = np.maximum(ul_x_a, ul_x_b)
yA = np.maximum(ul_y_a, ul_y_b)
xB = np.minimum(lr_x_a, lr_x_b)
yB = np.minimum(lr_y_a, lr_y_b)
# compute the area of intersection rectangle
inter_areas = np.maximum(0, xB - xA + 1) * np.maximum(0, yB - yA + 1)
# compute the areas of both sets of bounding boxes
areas_a = (lr_x_a - ul_x_a + 1) * (lr_y_a - ul_y_a + 1)
areas_b = (lr_x_b - ul_x_b + 1) * (lr_y_b - ul_y_b + 1)
# compute the intersection over union
ious = inter_areas / (areas_a + areas_b - inter_areas)
# encode the bounding boxes back to their original encoding
bb_a.encode(
encoding=orig_encoding_a,
encoding_params=orig_encoding_params_a,
)
bb_b.encode(
encoding=orig_encoding_b,
encoding_params=orig_encoding_params_b,
)
# return the intersection over union values
return torch.from_numpy(ious)
| 38.142857 | 108 | 0.621348 | 446 | 2,670 | 3.452915 | 0.213004 | 0.038961 | 0.051948 | 0.028571 | 0.281818 | 0.21039 | 0.05974 | 0.05974 | 0.05974 | 0 | 0 | 0.025368 | 0.261798 | 2,670 | 69 | 109 | 38.695652 | 0.755961 | 0.331086 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.027027 | false | 0 | 0.054054 | 0 | 0.108108 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc00f5681e5d6de33d25c3f40569baa6384aa095 | 885 | py | Python | setup.py | b30wulffz/automata-toolkit | 5e7a3bdbd9507cb551d3e08b548af3c6d0d69a58 | [
"MIT"
] | 8 | 2021-05-21T15:23:16.000Z | 2022-03-28T21:12:13.000Z | setup.py | b30wulffz/automata-toolkit | 5e7a3bdbd9507cb551d3e08b548af3c6d0d69a58 | [
"MIT"
] | 2 | 2022-01-11T18:35:29.000Z | 2022-01-12T10:00:23.000Z | setup.py | b30wulffz/automata-toolkit | 5e7a3bdbd9507cb551d3e08b548af3c6d0d69a58 | [
"MIT"
] | null | null | null | import pathlib
from setuptools import setup
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name="automata_toolkit",
version="1.0.2",
description="A tiny library which contains tools to convert, minimize and visualize Regular Expressions, NFA and DFA.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/b30wulffz/automata-toolkit",
author="Shlok Pandey",
author_email="shlokpandey123@gmail.com",
license="MIT",
keywords='automata, visualizer, nfa, dfa, regular expression',
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=["automata_toolkit"],
include_package_data=True,
install_requires=[],
entry_points={},
) | 32.777778 | 123 | 0.687006 | 101 | 885 | 5.861386 | 0.722772 | 0.076014 | 0.084459 | 0.087838 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015193 | 0.181921 | 885 | 27 | 124 | 32.777778 | 0.802486 | 0 | 0 | 0 | 0 | 0 | 0.459368 | 0.027088 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.08 | 0 | 0.08 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc0416097346819389f03c7603433e43bdd71c73 | 1,322 | py | Python | nd/io/tests/test_open.py | elfmanryan/geo | d83ce1d994c0a8a7fc461c22f8fd86e30216eefc | [
"MIT"
] | null | null | null | nd/io/tests/test_open.py | elfmanryan/geo | d83ce1d994c0a8a7fc461c22f8fd86e30216eefc | [
"MIT"
] | null | null | null | nd/io/tests/test_open.py | elfmanryan/geo | d83ce1d994c0a8a7fc461c22f8fd86e30216eefc | [
"MIT"
] | null | null | null | import pytest
import os
import xarray as xr
from nd.io import (open_dataset, open_netcdf, open_beam_dimap, open_rasterio,
to_netcdf, assemble_complex)
from nd.testing import generate_test_dataset
from xarray.testing import assert_equal as xr_assert_equal
data_path = 'data/'
nc_path = os.path.join(data_path, 'slc.nc')
tif_path = os.path.join(data_path, 'slc.tif')
dim_path = os.path.join(data_path, 'slc.dim')
@pytest.mark.parametrize('f', [nc_path, tif_path, dim_path])
def test_open_dataset(f):
ds = open_dataset(f)
assert isinstance(ds, (xr.Dataset, xr.DataArray))
ds.close()
def test_open_netcdf():
ds = open_netcdf(nc_path)
assert isinstance(ds, xr.Dataset)
ds.close()
def test_open_beam_dimap():
ds = open_beam_dimap(dim_path)
assert isinstance(ds, xr.Dataset)
ds.close()
def test_open_rasterio():
ds = open_rasterio(tif_path)
assert isinstance(ds, xr.DataArray)
@pytest.mark.skip
def test_equivalent_formats():
files = [nc_path, tif_path, dim_path]
datasets = [open_dataset(f) for f in files]
def test_write_read_netcdf(tmpdir):
ds = generate_test_dataset()
ds = assemble_complex(ds)
path = str(tmpdir.join('test_dataset.nc'))
to_netcdf(ds, path)
ds_read = open_dataset(path)
xr_assert_equal(ds, ds_read)
| 24.943396 | 77 | 0.717852 | 206 | 1,322 | 4.325243 | 0.237864 | 0.047138 | 0.049383 | 0.089787 | 0.316498 | 0.239057 | 0.194164 | 0.109989 | 0.109989 | 0.109989 | 0 | 0 | 0.16944 | 1,322 | 52 | 78 | 25.423077 | 0.811475 | 0 | 0 | 0.131579 | 0 | 0 | 0.031014 | 0 | 0 | 0 | 0 | 0 | 0.157895 | 1 | 0.157895 | false | 0 | 0.157895 | 0 | 0.315789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc0695e1331b13dbb4e6be26fc2d275ab6576be1 | 1,302 | py | Python | examples/animations/nonvarinterp.py | goodhertz/coldtype | 2460b66abb28e9532f9e2b55167ae565f95366e7 | [
"Apache-2.0"
] | 142 | 2020-06-12T17:01:58.000Z | 2022-03-16T23:21:37.000Z | examples/animations/nonvarinterp.py | goodhertz/coldtype | 2460b66abb28e9532f9e2b55167ae565f95366e7 | [
"Apache-2.0"
] | 35 | 2020-04-15T15:34:54.000Z | 2022-03-19T20:26:47.000Z | examples/animations/nonvarinterp.py | goodhertz/coldtype | 2460b66abb28e9532f9e2b55167ae565f95366e7 | [
"Apache-2.0"
] | 14 | 2020-06-23T18:56:46.000Z | 2022-03-31T15:54:56.000Z | from coldtype import *
from coldtype.blender import *
Style.RegisterShorthandPrefix("≈", "~/Type/fonts/fonts")
mdpb = Font.Cacheable("≈/MDNichrome0.7-Black.otf")
mdpl = Font.Cacheable("≈/MDNichrome0.7-Light.otf")
mdiob = Font.Cacheable("≈/MDIO0.2-Bold.otf")
mdior = Font.Cacheable("≈/MDIO0.2-Regular.otf")
r = Rect(1080, 1080)
def build(font, **kwargs):
return (StSt("Inter-\npolation",
font, 250, leading=50, **kwargs)
.xalign(r)
.align(r.take(0.85, "mxy"))
.pen())
a = build(mdpl)
b = build(mdpb)
@b3d_animation(r, timeline=Timeline(90))
def nonvarinterp(f):
i = "{:.7f}".format(f.e("eeio", 1))
return DPS([
(StSt(i, mdiob, 72)
.align(f.a.r.take(0.4, "mny"), th=0)
.pen()
.f(hsl(0.65, 1, 0.3))
.tag("Num")
.ch(b3d("Text", lambda bp: bp
.extrude(f.e(1, rng=(0.01, 0.5)))
.emission(hsl(0.65, 1, 0.3), 1)))),
(a.interpolate(f.e("eeio", 1), b)
.mod_contour(18, lambda p: p
.rotate(f.e("l", 3, cyclic=0, rng=(0, -360))))
.f(hsl(0.4, 1, 0.3))
.removeOverlap()
.tag("Interpolation")
.ch(b3d("Text", lambda bp: bp
.extrude(f.e("eeio", 1, rng=(0.01, 3))))))]) | 31 | 62 | 0.513825 | 190 | 1,302 | 3.536842 | 0.463158 | 0.014881 | 0.083333 | 0.03125 | 0.247024 | 0.110119 | 0.083333 | 0.083333 | 0.083333 | 0 | 0 | 0.077813 | 0.269585 | 1,302 | 42 | 63 | 31 | 0.623554 | 0 | 0 | 0.055556 | 0 | 0 | 0.132771 | 0.05449 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0.027778 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc085c443eb4a753fd242b159f5129c17ee28854 | 786 | py | Python | Exersice6/problem1.py | asmundkk/Robotics | fd801b1ff35640fee99948762de720866e88e13f | [
"MIT"
] | null | null | null | Exersice6/problem1.py | asmundkk/Robotics | fd801b1ff35640fee99948762de720866e88e13f | [
"MIT"
] | null | null | null | Exersice6/problem1.py | asmundkk/Robotics | fd801b1ff35640fee99948762de720866e88e13f | [
"MIT"
] | null | null | null | import numpy as np
from numpy import sin, cos, arccos, arcsin, sqrt, arctan2, pi
L1 = 3
L2 = 2
L3 = 1
thb = 0
xb = 4
yb = 2
th3 = thb
x3 = xb - L3 * cos(th3)
y3 = yb - L3 * sin(th3)
alpha = arccos((L1**2 + L2**2 + x3**2 + y3**2) / (2 * sqrt(xb**2 + yb**2) * L1))
beta = arccos((L1**2 + L2**2 - x3**2 - y3**2) / (2 * L1 * L2))
gamma = arctan2(x3, y3)
# elbow down solution
th1_down = gamma - alpha
th2_down = pi - beta
# elbow up solution
th1_up = gamma + alpha
th2_up = beta - pi
print("elbow down solution")
print("th1", th1_down * 180 / pi)
print("th2", th2_down * 180 / pi)
print("th3", (thb - th1_down -th2_down) * 180 / pi)
print()
print("elbow up solution")
print("th1", th1_up * 180 / pi)
print("th2", th2_up * 180 / pi)
print("th3", (thb - th1_up - th2_up) * 180 / pi)
| 20.153846 | 80 | 0.59542 | 145 | 786 | 3.144828 | 0.255172 | 0.092105 | 0.109649 | 0.092105 | 0.282895 | 0.166667 | 0.083333 | 0.083333 | 0.083333 | 0.083333 | 0 | 0.128665 | 0.21883 | 786 | 38 | 81 | 20.684211 | 0.614007 | 0.047074 | 0 | 0 | 0 | 0 | 0.072386 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.074074 | 0 | 0.074074 | 0.333333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc08aee9aa7efefdd5c0150b763fe63e81f662b4 | 7,249 | py | Python | letor_conversion.py | HarrieO/RankingComplexLayouts | 53e8fdca3b2d4efffc2506423997e257f01ba094 | [
"MIT"
] | 15 | 2018-05-11T07:44:34.000Z | 2020-10-29T12:03:41.000Z | letor_conversion.py | HarrieO/RankingComplexLayouts | 53e8fdca3b2d4efffc2506423997e257f01ba094 | [
"MIT"
] | null | null | null | letor_conversion.py | HarrieO/RankingComplexLayouts | 53e8fdca3b2d4efffc2506423997e257f01ba094 | [
"MIT"
] | 7 | 2018-09-13T16:08:49.000Z | 2022-01-11T07:46:07.000Z | """Converts MNIST data to TFRecords file format with Example protos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import random
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets import mnist
FLAGS = None
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _int64_list(value_list):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value_list))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _float_list(value_list):
return tf.train.Feature(float_list=tf.train.FloatList(value=value_list))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def convert_to(data_set, name):
"""Converts a dataset to tfrecords."""
images = data_set.images
labels = data_set.labels
num_examples = data_set.num_examples
if images.shape[0] != num_examples:
raise ValueError('Images size %d does not match label size %d.' %
(images.shape[0], num_examples))
rows = images.shape[1]
cols = images.shape[2]
depth = images.shape[3]
filename = os.path.join(FLAGS.directory, name + '.tfrecords')
print('Writing', filename)
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
image_raw = images[index].tostring()
example = tf.train.Example(features=tf.train.Features(feature={
'height': _int64_feature(rows),
'width': _int64_feature(cols),
'depth': _int64_feature(depth),
'label': _int64_feature(int(labels[index])),
# 'image_raw': _bytes_feature(image_raw)
}))
print('Example:', example)
writer.write(example.SerializeToString())
writer.close()
def main(unused_argv):
# # Get the data.
# data_sets = mnist.read_data_sets(FLAGS.directory,
# dtype=tf.uint8,
# reshape=False,
# validation_size=FLAGS.validation_size)
# # Convert to Examples and write the result to TFRecords.
# convert_to(data_sets.train, 'train')
# convert_to(data_sets.validation, 'validation')
# convert_to(data_sets.test, 'test')
train_queries, train_doclists, train_labels, train_feat = _read_file(FLAGS.input_folder + '/train.txt')
vali_queries, vali_doclists, vali_labels, vali_feat = _read_file(FLAGS.input_folder + '/vali.txt')
test_queries, test_doclists, test_labels, test_feat = _read_file(FLAGS.input_folder + '/test.txt')
features_to_keep = train_feat & vali_feat
for name, queries, doclists, labels, shards in [
('train', train_queries, train_doclists, train_labels, FLAGS.train_shards),
('vali', vali_queries, vali_doclists, vali_labels, FLAGS.vali_shards),
('test', test_queries, test_doclists, test_labels, FLAGS.test_shards),
]:
writers = []
for i in range(shards):
writers.append(
tf.python_io.TFRecordWriter(FLAGS.output_folder + '/%s.%d-of-%d.tfrecord' % (name, i , shards))
)
max_n_doc = 0
for qid, index in queries.items():
query_feat = {}
for fid in features_to_keep:
query_feat[fid] = []
# cutoff = int(np.random.uniform(4, 11))
# labels[index] = labels[index][:cutoff]
# doclists[index] = doclists[index][:cutoff]
np_labels = np.array(labels[index])
n_docs = len(labels[index])
max_n_doc = max(max_n_doc, n_docs)
# print(qid, 'n doc:', len(doclists[index]), 'labels', np_labels[np_labels > 0])
for doc in doclists[index]:
for fid in features_to_keep:
query_feat[fid].append(doc.get(fid,0.))
features = {}
features['qid'] = _int64_list([int(qid)]*len(labels[index]))
features['label'] = _int64_list(labels[index])
features['n_docs'] = _int64_list([n_docs])
# print("%s n_docs: %d" % (name, n_docs))
for fid in features_to_keep:
assert len(query_feat[fid]) == n_docs
min_v = min(query_feat[fid])
normalized = [x - min_v for x in query_feat[fid]]
max_v = max(normalized)
if max_v == 0:
max_v = 1.
normalized = [x/max_v for x in normalized]
features[fid] = _float_list(normalized)
example = tf.train.Example(features=tf.train.Features(feature=features))
random.choice(writers).write(example.SerializeToString())
print('%s total queries:' % name, len(queries))
print('%s max n docs:' % name, max_n_doc)
[writer.close() for writer in writers]
with open(FLAGS.output_folder +'/features.txt', 'w') as f:
for fid in features_to_keep:
f.write(fid + '\n')
def _read_file(path, filter_non_uniq=True):
'''
Read letor file and returns dict for qid to indices, labels for queries
and list of doclists of features per doc per query.
'''
current_qid = None
queries = {}
queryIndex = 0
doclists = []
labels = []
all_features = set()
feat_bounds = {}
for line in open(path, 'r'):
info = line[:line.find('#')].split()
qid = info[1].split(':')[1]
label = int(info[0])
if qid not in queries:
queryIndex = len(queries)
queries[qid] = queryIndex
doclists.append([])
labels.append([])
current_qid = qid
elif qid != current_qid:
queryIndex = queries[qid]
current_qid = qid
featureDict = {}
for pair in info[2:]:
featid, feature = pair.split(':')
all_features.add(featid)
feat_value = float(feature)
featureDict[featid] = feat_value
if featid in feat_bounds:
feat_bounds[featid] = (min(feat_bounds[featid][0], feat_value),
max(feat_bounds[featid][1], feat_value))
else:
feat_bounds[featid] = (feat_value, feat_value)
doclists[queryIndex].append(featureDict)
labels[queryIndex].append(label)
if filter_non_uniq:
unique_features = set()
for featid in all_features:
if feat_bounds[featid][0] < feat_bounds[featid][1]:
unique_features.add(featid)
return queries, doclists, labels, unique_features
else:
return queries, doclists, labels, all_features
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--input_folder',
type=str,
default='/Users/hroosterhuis/ILPS/datasets/NP2003/Fold1/',
help='Directory to with input lerot data.'
)
parser.add_argument(
'--output_folder',
type=str,
default='/Users/hroosterhuis/ILPS/datasets/TFRecords/NP2003/Fold1/',
help='Directory to with input lerot data.'
)
parser.add_argument(
'--train_shards',
type=int,
default=5,
help='Number of shards to store data in.'
)
parser.add_argument(
'--vali_shards',
type=int,
default=1,
help='Number of shards to store data in.'
)
parser.add_argument(
'--test_shards',
type=int,
default=1,
help='Number of shards to store data in.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) | 32.95 | 105 | 0.656366 | 961 | 7,249 | 4.736733 | 0.201873 | 0.021529 | 0.02109 | 0.021968 | 0.281415 | 0.262083 | 0.176845 | 0.171134 | 0.149605 | 0.112258 | 0 | 0.011101 | 0.217133 | 7,249 | 220 | 106 | 32.95 | 0.791013 | 0.125673 | 0 | 0.14881 | 0 | 0 | 0.090491 | 0.019844 | 0 | 0 | 0 | 0 | 0.005952 | 1 | 0.047619 | false | 0 | 0.059524 | 0.029762 | 0.14881 | 0.029762 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc08ddc4a629be5123ab41c0712a790befc0bb9a | 53,327 | py | Python | mvd/mvd/data_import/importer.py | mieterinnenverband/MVD | b0c18d28f4a65a3da730dbfd23e10d41822bb104 | [
"MIT"
] | null | null | null | mvd/mvd/data_import/importer.py | mieterinnenverband/MVD | b0c18d28f4a65a3da730dbfd23e10d41822bb104 | [
"MIT"
] | null | null | null | mvd/mvd/data_import/importer.py | mieterinnenverband/MVD | b0c18d28f4a65a3da730dbfd23e10d41822bb104 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2021, libracore AG and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import pandas as pd
from frappe.utils.data import add_days, getdate, get_datetime, now_datetime
# Header mapping (ERPNext <> MVD)
hm = {
'mitglied_nr': 'mitglied_nr',
'mitglied_id': 'mitglied_id',
'status_c': 'status_c',
'sektion_id': 'sektion_id',
'zuzug_sektion': 'sektion_zq_id',
'mitgliedtyp_c': 'mitgliedtyp_c',
'mitglied_c': 'mitglied_c',
'wichtig': 'wichtig',
'eintritt': 'datum_eintritt',
'austritt': 'datum_austritt',
'wegzug': 'datum_wegzug',
'zuzug': 'datum_zuzug',
'kuendigung': 'datum_kuend_per',
'adresstyp_c': 'adresstyp_c',
'adress_id': 'adress_id',
'firma': 'firma',
'zusatz_firma': 'zusatz_firma',
'anrede_c': 'anrede_c',
'nachname_1': 'nachname_1',
'vorname_1': 'vorname_1',
'tel_p_1': 'tel_p_1',
'tel_m_1': 'tel_m_1',
'tel_g_1': 'tel_g_1',
'e_mail_1': 'e_mail_1',
'zusatz_adresse': 'zusatz_adresse',
'strasse': 'strasse',
'nummer': 'nummer',
'nummer_zu': 'nummer_zu',
'postfach': 'postfach',
'postfach_nummer': 'postfach_nummer',
'plz': 'plz',
'ort': 'ort',
'nachname_2': 'nachname_2',
'vorname_2': 'vorname_2',
'tel_p_2': 'tel_p_2',
'tel_m_2': 'tel_m_2',
'tel_g_2': 'tel_g_2',
'e_mail_2': 'e_mail_2',
'datum': 'datum',
'jahr': 'jahr',
'offen': 'offen',
'ref_nr_five_1': 'ref_nr_five_1',
'kz_1': 'kz_1',
'tkategorie_d': 'tkategorie_d',
'pers_name': 'pers_name',
'datum_von': 'datum_von',
'datum_bis': 'datum_bis',
'datum_erinnerung': 'datum_erinnerung',
'notiz_termin': 'notiz_termin',
'erledigt': 'erledigt',
'nkategorie_d': 'nkategorie_d',
'notiz': 'notiz',
'weitere_kontaktinfos': 'weitere_kontaktinfos',
'mkategorie_d': 'mkategorie_d',
'benutzer_name': 'benutzer_name',
'jahr_bez_mitgl': 'jahr_bez_mitgl',
'objekt_hausnummer': 'objekt_hausnummer',
'nummer_zu': 'nummer_zu',
'objekt_nummer_zu': 'objekt_nummer_zu',
'rg_nummer_zu': 'rg_nummer_zu',
'buchungen': 'buchungen',
'online_haftpflicht': 'online_haftpflicht',
'online_gutschrift': 'online_gutschrift',
'online_betrag': 'online_betrag',
'datum_online_verbucht': 'datum_online_verbucht',
'datum_online_gutschrift': 'datum_online_gutschrift',
'online_payment_method': 'online_payment_method',
'online_payment_id': 'online_payment_id'
}
def read_csv(site_name, file_name, limit=False):
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if not migliedschaft_existiert(str(get_value(row, 'mitglied_id'))):
if get_value(row, 'adresstyp_c') == 'MITGL':
create_mitgliedschaft(row)
else:
frappe.log_error("{0}".format(row), 'Adresse != MITGL, aber ID noch nicht erfasst')
else:
update_mitgliedschaft(row)
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def create_mitgliedschaft(data):
try:
if get_value(data, 'vorname_2') or get_value(data, 'nachname_2'):
hat_solidarmitglied = 1
else:
hat_solidarmitglied = 0
strasse = get_value(data, 'strasse')
postfach = check_postfach(data, 'postfach')
if postfach == 1:
strasse = 'Postfach'
else:
if get_value(data, 'postfach_nummer') and not strasse:
strasse = 'Postfach'
postfach = 1
kundentyp = 'Einzelperson'
if get_value(data, 'mitgliedtyp_c') == 'GESCH':
kundentyp = 'Unternehmen'
zuzug = get_formatted_datum(get_value(data, 'zuzug'))
if zuzug:
zuzug_von = get_sektion(get_value(data, 'zuzug_sektion'))
else:
zuzug_von = ''
new_mitgliedschaft = frappe.get_doc({
'doctype': 'MV Mitgliedschaft',
'mitglied_nr': str(get_value(data, 'mitglied_nr')).zfill(8),
'mitglied_id': str(get_value(data, 'mitglied_id')),
'status_c': get_status_c(get_value(data, 'status_c')),
'sektion_id': get_sektion(get_value(data, 'sektion_id')),
'mitgliedtyp_c': get_mitgliedtyp_c(get_value(data, 'mitgliedtyp_c')),
'mitglied_c': get_mitglied_c(get_value(data, 'mitglied_c')),
#'wichtig': get_value(data, 'wichtig'),
'eintritt': get_formatted_datum(get_value(data, 'eintritt')),
'austritt': get_formatted_datum(get_value(data, 'austritt')),
'wegzug': get_formatted_datum(get_value(data, 'wegzug')),
#'wegzug_zu': '', --> woher kommt diese Info?
'zuzug': zuzug,
'zuzug_von': zuzug_von,
'kuendigung': get_formatted_datum(get_value(data, 'kuendigung')),
'kundentyp': kundentyp,
'firma': get_value(data, 'firma'),
'zusatz_firma': get_value(data, 'zusatz_firma'),
'anrede_c': get_anrede_c(get_value(data, 'anrede_c')),
'nachname_1': get_value(data, 'nachname_1'),
'vorname_1': get_value(data, 'vorname_1'),
'tel_p_1': str(get_value(data, 'tel_p_1')),
'tel_m_1': str(get_value(data, 'tel_m_1')),
'tel_g_1': str(get_value(data, 'tel_g_1')),
'e_mail_1': get_value(data, 'e_mail_1'),
'zusatz_adresse': get_value(data, 'zusatz_adresse'),
'strasse': strasse,
'objekt_strasse': strasse, # fallback
'objekt_ort': get_value(data, 'ort'), # fallback
'nummer': get_value(data, 'nummer'),
'nummer_zu': get_value(data, 'nummer_zu'),
'postfach': postfach,
'postfach_nummer': get_value(data, 'postfach_nummer'),
'plz': get_value(data, 'plz'),
'ort': get_value(data, 'ort'),
'hat_solidarmitglied': hat_solidarmitglied,
'nachname_2': get_value(data, 'nachname_2'),
'vorname_2': get_value(data, 'vorname_2'),
'tel_p_2': str(get_value(data, 'tel_p_2')),
#'tel_m_2': str(get_value(data, 'tel_m_2')),
'tel_g_2': str(get_value(data, 'tel_g_2')),
'e_mail_2': str(get_value(data, 'e_mail_2'))
})
new_mitgliedschaft.insert()
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n---\n{1}".format(err, data), 'create_mitgliedschaft')
return
def update_mitgliedschaft(data):
try:
mitgliedschaft = frappe.get_doc("MV Mitgliedschaft", str(get_value(data, 'mitglied_id')))
if get_value(data, 'adresstyp_c') == 'MITGL':
# Mitglied (inkl. Soli)
if get_value(data, 'vorname_2') or get_value(data, 'nachname_2'):
hat_solidarmitglied = 1
else:
hat_solidarmitglied = 0
strasse = get_value(data, 'strasse')
postfach = check_postfach(data, 'postfach')
if postfach == 1:
strasse = 'Postfach'
else:
if get_value(data, 'postfach_nummer') and not strasse:
strasse = 'Postfach'
postfach = 1
kundentyp = 'Einzelperson'
if get_value(data, 'mitglied_c') == 'GESCH':
kundentyp = 'Unternehmen'
zuzug = get_formatted_datum(get_value(data, 'zuzug'))
if zuzug:
zuzug_von = get_sektion(get_value(data, 'zuzug_sektion'))
else:
zuzug_von = ''
mitgliedschaft.mitglied_nr = str(get_value(data, 'mitglied_nr')).zfill(8)
mitgliedschaft.status_c = get_status_c(get_value(data, 'status_c'))
mitgliedschaft.sektion_id = get_sektion(get_value(data, 'sektion_id'))
mitgliedschaft.mitgliedtyp_c = get_mitgliedtyp_c(get_value(data, 'mitgliedtyp_c'))
mitgliedschaft.mitglied_c = get_mitglied_c(get_value(data, 'mitglied_c'))
#mitgliedschaft.wichtig = get_value(data, 'wichtig')
mitgliedschaft.eintritt = get_formatted_datum(get_value(data, 'eintritt'))
mitgliedschaft.austritt = get_formatted_datum(get_value(data, 'austritt'))
mitgliedschaft.wegzug = get_formatted_datum(get_value(data, 'wegzug'))
mitgliedschaft.zuzug = zuzug
#mitgliedschaft.wegzug_zu = '' --> woher kommt diese Info?
mitgliedschaft.zuzug_von = zuzug_von
mitgliedschaft.kuendigung = get_formatted_datum(get_value(data, 'kuendigung'))
mitgliedschaft.kundentyp = kundentyp
mitgliedschaft.firma = get_value(data, 'firma')
mitgliedschaft.zusatz_firma = get_value(data, 'zusatz_firma')
mitgliedschaft.anrede_c = get_anrede_c(get_value(data, 'anrede_c'))
mitgliedschaft.nachname_1 = get_value(data, 'nachname_1')
mitgliedschaft.vorname_1 = get_value(data, 'vorname_1')
mitgliedschaft.tel_p_1 = str(get_value(data, 'tel_p_1'))
mitgliedschaft.tel_m_1 = str(get_value(data, 'tel_m_1'))
mitgliedschaft.tel_g_1 = str(get_value(data, 'tel_g_1'))
mitgliedschaft.e_mail_1 = get_value(data, 'e_mail_1')
mitgliedschaft.zusatz_adresse = get_value(data, 'zusatz_adresse')
mitgliedschaft.strasse = strasse
mitgliedschaft.nummer = get_value(data, 'nummer')
mitgliedschaft.nummer_zu = get_value(data, 'nummer_zu')
mitgliedschaft.postfach = postfach
mitgliedschaft.postfach_nummer = get_value(data, 'postfach_nummer')
mitgliedschaft.plz = get_value(data, 'plz')
mitgliedschaft.ort = get_value(data, 'ort')
mitgliedschaft.hat_solidarmitglied = hat_solidarmitglied
mitgliedschaft.nachname_2 = get_value(data, 'nachname_2')
mitgliedschaft.vorname_2 = get_value(data, 'vorname_2')
mitgliedschaft.tel_p_2 = str(get_value(data, 'tel_p_2'))
#mitgliedschaft.tel_m_2 = str(get_value(data, 'tel_m_2'))
mitgliedschaft.tel_g_2 = str(get_value(data, 'tel_g_2'))
mitgliedschaft.e_mail_2 = get_value(data, 'e_mail_2')
mitgliedschaft.adress_id_mitglied = get_value(data, 'adress_id')
elif get_value(data, 'adresstyp_c') == 'OBJEKT':
# Objekt Adresse
mitgliedschaft.objekt_zusatz_adresse = get_value(data, 'zusatz_adresse')
mitgliedschaft.objekt_strasse = get_value(data, 'strasse') or 'Fehlende Angaben!'
mitgliedschaft.objekt_hausnummer = get_value(data, 'nummer')
mitgliedschaft.objekt_nummer_zu = get_value(data, 'nummer_zu')
mitgliedschaft.objekt_plz = get_value(data, 'plz')
mitgliedschaft.objekt_ort = get_value(data, 'ort') or 'Fehlende Angaben!'
mitgliedschaft.adress_id_objekt = get_value(data, 'adress_id')
elif get_value(data, 'adresstyp_c') == 'RECHN':
# Rechnungs Adresse
strasse = get_value(data, 'strasse')
postfach = check_postfach(data, 'postfach')
if postfach == 1:
strasse = 'Postfach'
else:
if get_value(data, 'postfach_nummer') and not strasse:
strasse = 'Postfach'
postfach = 1
mitgliedschaft.abweichende_rechnungsadresse = 1
mitgliedschaft.rg_zusatz_adresse = get_value(data, 'zusatz_adresse')
mitgliedschaft.rg_strasse = strasse
mitgliedschaft.rg_nummer = get_value(data, 'nummer')
mitgliedschaft.rg_nummer_zu = get_value(data, 'nummer_zu')
mitgliedschaft.rg_postfach = postfach
mitgliedschaft.rg_postfach_nummer = get_value(data, 'postfach_nummer')
mitgliedschaft.rg_plz = get_value(data, 'plz')
mitgliedschaft.rg_ort = get_value(data, 'ort')
mitgliedschaft.adress_id_rg = get_value(data, 'adress_id')
# else:
# TBD!
mitgliedschaft.save(ignore_permissions=True)
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n{1}".format(err, data), 'update_mitgliedschaft')
return
def get_sektion(id):
# Aufliestung nicht abschliessend, prüfen!
if id == 25:
return 'MVD'
elif id == 4:
return 'Bern'
elif id == 8:
return 'Basel Stadt'
elif id == 14:
return 'Luzern'
elif id == 3:
return 'Aargau'
else:
return 'Sektions-ID unbekannt'
def get_status_c(status_c):
# Aufliestung vermutlich nicht abschliessend, prüfen!
if status_c == 'AREG':
return 'Mitglied'
elif status_c == 'MUTATI':
return 'Mutation'
elif status_c == 'AUSSCH':
return 'Ausschluss'
elif status_c == 'GESTOR':
return 'Gestorben'
elif status_c == 'KUNDIG':
return 'Kündigung'
elif status_c == 'WEGZUG':
return 'Wegzug'
elif status_c == 'ZUZUG':
return 'Zuzug'
else:
return 'Mitglied'
def get_mitgliedtyp_c(mitgliedtyp_c):
# TBD!!!!!!!!!!
if mitgliedtyp_c == 'PRIV':
return 'Privat'
else:
return 'Privat'
def get_mitglied_c(mitglied_c):
# TBD!!!!!!!!!!
if mitglied_c == 'MITGL':
return 'Mitglied'
else:
return 'Mitglied'
def get_anrede_c(anrede_c):
anrede_c = int(anrede_c)
if anrede_c == 1:
return 'Herr'
elif anrede_c == 2:
return 'Frau'
elif anrede_c == 3:
return 'Frau und Herr'
elif anrede_c == 4:
return 'Herr und Frau'
elif anrede_c == 5:
return 'Familie'
elif anrede_c == 7:
return 'Herren'
elif anrede_c == 8:
return 'Frauen'
else:
return ''
def get_formatted_datum(datum):
if datum:
datum_raw = datum.split(" ")[0]
if not datum_raw:
return ''
else:
return datum_raw.replace("/", "-")
else:
return ''
def check_postfach(row, value):
value = row[hm[value]]
if not pd.isnull(value):
postfach = int(value)
if postfach < 0:
return 1
else:
return 0
else:
return 0
def get_value(row, value):
value = row[hm[value]]
if not pd.isnull(value):
try:
if isinstance(value, str):
return value.strip()
else:
return value
except:
return value
else:
return ''
def migliedschaft_existiert(mitglied_id):
anz = frappe.db.sql("""SELECT COUNT(`name`) AS `qty` FROM `tabMitgliedschaft` WHERE `mitglied_id` = '{mitglied_id}'""".format(mitglied_id=mitglied_id), as_dict=True)[0].qty
if anz > 0:
return True
else:
return False
# --------------------------------------------------------------
# Debitor Importer
# --------------------------------------------------------------
def import_debitoren(site_name, file_name, limit=False, delete_from=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_debitoren --kwargs "{'site_name': 'site1.local', 'file_name': 'offene_rechnungen.csv'}"
'''
if delete_from:
SQL_SAFE_UPDATES_false = frappe.db.sql("""SET SQL_SAFE_UPDATES=0""", as_list=True)
delete_sinvs = frappe.db.sql("""DELETE FROM `tabSales Invoice` WHERE `sektion_id` = '{delete_from}'
AND `docstatus` = 1
AND `status` = 'Overdue'""".format(delete_from=delete_from), as_list=True)
SQL_SAFE_UPDATES_true = frappe.db.sql("""SET SQL_SAFE_UPDATES=1""", as_list=True)
frappe.db.commit()
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if get_value(row, 'offen') > 0:
if not migliedschaft_existiert(str(get_value(row, 'mitglied_id'))):
frappe.log_error("{0}".format(row), 'Mitglied existiert nicht')
else:
erstelle_rechnung(row)
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def erstelle_rechnung(row):
try:
file_qrr = int(str(get_value(row, 'ref_nr_five_1')).replace(" ", ""))
qrr = '{num:027d}'.format(num=file_qrr)
existing_sinv_query = ("""SELECT `name` FROM `tabSales Invoice` WHERE REPLACE(`esr_reference`, ' ', '') = '{qrr}'""".format(qrr=qrr))
if len(frappe.db.sql(existing_sinv_query, as_list=True)) > 0:
frappe.log_error("{0}".format(row), 'Rechnung wurde bereits erstellt')
return
else:
existing_sinv_query = ("""SELECT `name` FROM `tabSales Invoice` WHERE `mv_mitgliedschaft` = '{mitglied_id}'""".format(mitglied_id=str(get_value(row, 'mitglied_id'))))
existing_sinv = frappe.db.sql(existing_sinv_query, as_dict=True)
if len(existing_sinv) > 0:
frappe.db.sql("""UPDATE `tabSales Invoice` SET `esr_reference` = '{qrr}' WHERE `name` = '{name}'""".format(qrr=qrr, name=existing_sinv[0].name), as_list=True)
frappe.log_error("{0}".format(row), 'Update QRR')
return
else:
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
posting_date = str(get_value(row, 'datum')).split(" ")[0]
item = frappe.get_value("Sektion", mitgliedschaft.sektion_id, "mitgliedschafts_artikel")
company = frappe.get_value("Sektion", mitgliedschaft.sektion_id, "company")
cost_center = frappe.get_value("Company", company, "cost_center")
sektions_code = str(frappe.get_value("Sektion", mitgliedschaft.sektion_id, "sektion_id"))
sinv = frappe.get_doc({
"doctype": "Sales Invoice",
"company": company,
"customer": mitgliedschaft.rg_kunde or mitgliedschaft.kunde_mitglied,
"set_posting_time": 1,
"posting_date": posting_date,
"posting_time": str(get_value(row, 'datum')).split(" ")[1],
"ist_mitgliedschaftsrechnung": 1,
"mv_mitgliedschaft": mitgliedschaft.name,
"sektion_id": mitgliedschaft.sektion_id,
"sektions_code": sektions_code,
"mitgliedschafts_jahr": str(get_value(row, 'jahr')),
"due_date": add_days(posting_date, 30),
"esr_reference": qrr,
"items": [
{
"item_code": item,
"qty": 1,
"rate": get_value(row, 'offen'),
"cost_center": cost_center
}
]
})
sinv.insert()
sinv.submit()
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Rechnung konnte nicht erstellt werden')
return
# --------------------------------------------------------------
# Miveba-Termin Importer
# --------------------------------------------------------------
def import_termine(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_termine --kwargs "{'site_name': 'site1.local', 'file_name': 'termine.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
create_termin(row)
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Termin konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def create_termin(row):
try:
kategorie = check_kategorie(row)
kontakt = check_kontakt(row)
termin_status = check_termin_status(row, 'erledigt')
sektion_id = frappe.get_value("Mitgliedschaft", str(get_value(row, 'mitglied_id')), "sektion_id")
new = frappe.get_doc({
"doctype": "Termin",
"kategorie": kategorie,
"kontakt": kontakt,
"sektion_id": sektion_id,
"von": str(get_value(row, 'datum_von')),
"bis": str(get_value(row, 'datum_bis')),
"erinnerung": str(get_value(row, 'datum_erinnerung')),
"notitz": str(get_value(row, 'notiz_termin')),
"status": termin_status,
"mv_mitgliedschaft": str(get_value(row, 'mitglied_id'))
})
new.insert()
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Termin konnte nicht erstellt werden')
def check_kategorie(row):
kategorie = str(get_value(row, 'tkategorie_d'))
sektion_id = frappe.get_value("Mitgliedschaft", str(get_value(row, 'mitglied_id')), "sektion_id")
query = ("""SELECT `name` FROM `tabTerminkategorie` WHERE `kategorie` = '{kategorie}' AND `sektion_id` = '{sektion_id}'""".format(kategorie=kategorie, sektion_id=sektion_id))
kat = frappe.db.sql(query, as_list=True)
if len(kat) > 0:
return kat[0][0]
else:
new = frappe.get_doc({
"doctype": "Terminkategorie",
"kategorie": kategorie,
"sektion_id": sektion_id
})
new.insert()
frappe.db.commit()
return new.name
def check_kontakt(row):
kontakt = str(get_value(row, 'pers_name'))
if kontakt and kontakt != '':
sektion_id = frappe.get_value("Mitgliedschaft", str(get_value(row, 'mitglied_id')), "sektion_id")
query = ("""SELECT `name` FROM `tabTermin Kontaktperson` WHERE `kontakt` = '{kontakt}' AND `sektion_id` = '{sektion_id}'""".format(kontakt=kontakt, sektion_id=sektion_id))
kat = frappe.db.sql(query, as_list=True)
if len(kat) > 0:
return kat[0][0]
else:
new = frappe.get_doc({
"doctype": "Termin Kontaktperson",
"kontakt": kontakt,
"sektion_id": sektion_id
})
new.insert()
frappe.db.commit()
return new.name
else:
return ''
def check_termin_status(row, value):
value = row[hm[value]]
if not pd.isnull(value):
termin_status = int(value)
if termin_status < 0:
return 'Closed'
else:
return 'Open'
else:
return 'Open'
# --------------------------------------------------------------
# Miveba-Notizen Importer
# --------------------------------------------------------------
def import_notizen(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_notizen --kwargs "{'site_name': 'site1.local', 'file_name': 'notizen.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
create_notiz(row)
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Notiz konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def create_notiz(row):
try:
datum_erinnerung = str(get_value(row, 'datum_erinnerung'))
if get_datetime(datum_erinnerung) > now_datetime():
create_todo(row)
else:
create_comment(row)
return
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Termin konnte nicht erstellt werden')
def create_comment(row):
try:
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
description = str(get_value(row, 'nkategorie_d')) + "<br>"
description += str(get_value(row, 'datum_von')) + "<br>"
description += str(get_value(row, 'notiz')) + "<br>"
description += str(get_value(row, 'benutzer_name')) + "<br>"
mitgliedschaft.add_comment('Comment', text=description)
frappe.db.commit()
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Kommentar konnte nicht erstellt werden')
def create_todo(row):
try:
description = str(get_value(row, 'nkategorie_d')) + "<br>"
description += str(get_value(row, 'datum_von')) + "<br>"
description += str(get_value(row, 'notiz')) + "<br>"
description += str(get_value(row, 'benutzer_name')) + "<br>"
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
owner = frappe.get_value("Sektion", mitgliedschaft.sektion_id, "virtueller_user")
todo = frappe.get_doc({
"doctype":"ToDo",
"owner": owner,
"reference_type": "Mitgliedschaft",
"reference_name": str(get_value(row, 'mitglied_id')),
"description": description or '',
"priority": "Medium",
"status": "Open",
"date": str(get_value(row, 'datum_erinnerung')),
"assigned_by": owner,
"mv_mitgliedschaft": str(get_value(row, 'mitglied_id'))
}).insert(ignore_permissions=True)
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'ToDo konnte nicht erstellt werden')
# --------------------------------------------------------------
# Weitere Kontaktinfos Importer
# --------------------------------------------------------------
def import_weitere_kontaktinfos(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_weitere_kontaktinfos --kwargs "{'site_name': 'site1.local', 'file_name': 'weitere_kontaktinfos.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
erstelle_weitere_kontaktinformation(row)
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Weitere Kontaktinformation konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def erstelle_weitere_kontaktinformation(row):
try:
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
description = str(get_value(row, 'weitere_kontaktinfos')).replace("\n", "<br>")
mitgliedschaft.add_comment('Comment', text=description)
frappe.db.commit()
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Kommentar konnte nicht erstellt werden')
# --------------------------------------------------------------
# Miveba Buchungen Importer
# --------------------------------------------------------------
def import_miveba_buchungen(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_miveba_buchungen --kwargs "{'site_name': 'site1.local', 'file_name': 'miveba_buchungen.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
commit_count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
mitglied_id = str(get_value(row, 'mitglied_id'))
miveba_buchungen = str(get_value(row, 'buchungen'))
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `miveba_buchungen` = '{miveba_buchungen}' WHERE `name` = '{mitglied_id}'""".format(miveba_buchungen=miveba_buchungen, mitglied_id=mitglied_id), as_list=True)
if commit_count == 1000:
frappe.db.commit()
commit_count = 1
else:
commit_count += 1
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Miveba Buchung konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
# --------------------------------------------------------------
# Tags Importer
# --------------------------------------------------------------
def import_tags(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_tags --kwargs "{'site_name': 'site1.local', 'file_name': 'kategorien.csv'}"
'''
from frappe.desk.tags import add_tag
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
add_tag(str(get_value(row, 'mkategorie_d')), "Mitgliedschaft", str(get_value(row, 'mitglied_id')))
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Tag konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
# --------------------------------------------------------------
# Special Importer
# --------------------------------------------------------------
def import_special(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_special --kwargs "{'site_name': 'site1.local', 'file_name': 'jahr_bez_mitgl-PROD-1.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
commit_count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
mitglied_id = str(get_value(row, 'mitglied_id'))
jahr = str(get_value(row, 'jahr_bez_mitgl'))
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `zahlung_mitgliedschaft` = '{jahr}' WHERE `name` = '{mitglied_id}'""".format(jahr=jahr, mitglied_id=mitglied_id), as_list=True)
frappe.db.commit()
if int(jahr) == 2022:
sinvs = frappe.db.sql("""SELECT `name` FROM `tabSales Invoice` WHERE `mv_mitgliedschaft` = '{mitglied_id}' AND `status` != 'Paid' AND `docstatus` = 1""".format(mitglied_id=mitglied_id), as_dict=True)
for sinv in sinvs:
try:
sinv = frappe.get_doc("Sales Invoice", sinv.name)
sinv.cancel()
sinv.delete()
frappe.db.commit()
except Exception as e:
frappe.log_error("{0}\n\n{1}\n\n{2}".format(e, sinv.name, row), 'RG konnte nicht gelöscht werden')
commit_count += 1
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Special konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
# --------------------------------------------------------------
# Adressen Update
# --------------------------------------------------------------
def update_adressen(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.update_adressen --kwargs "{'site_name': 'site1.local', 'file_name': 'hausnummer_zusatz_gefiltert.csv'}"
'''
from mvd.mvd.doctype.mitgliedschaft.mitgliedschaft import create_sp_queue
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
submit_counter = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
objekt_hausnummer = str(get_value(row, 'objekt_hausnummer'))
nummer_zu = str(get_value(row, 'nummer_zu'))
objekt_nummer_zu = str(get_value(row, 'objekt_nummer_zu'))
rg_nummer_zu = str(get_value(row, 'rg_nummer_zu'))
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
mitgliedschaft.objekt_hausnummer = objekt_hausnummer
mitgliedschaft.nummer_zu = nummer_zu
mitgliedschaft.objekt_nummer_zu = objekt_nummer_zu
mitgliedschaft.rg_nummer_zu = rg_nummer_zu
mitgliedschaft.letzte_bearbeitung_von = 'SP'
mitgliedschaft.save()
create_sp_queue(mitgliedschaft, True)
if submit_counter == 100:
frappe.db.commit()
submit_counter = 1
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Adressen Update konnte nicht durchgeführt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
submit_counter += 1
else:
break
# --------------------------------------------------------------
# Ampel Reset
# --------------------------------------------------------------
def ampel_reset():
'''
Example:
sudo bench --site [site_name] execute mvd.mvd.data_import.importer.ampel_reset
'''
from mvd.mvd.doctype.mitgliedschaft.mitgliedschaft import get_ampelfarbe
# neuberechnung aller roten ampeln
mitgliedschaften = frappe.db.sql("""SELECT `name` FROM `tabMitgliedschaft` WHERE `ampel_farbe` = 'ampelrot'""", as_dict=True)
total = len(mitgliedschaften)
print("Setze/Berechne Ampel bei {0} Mitgliedschaften".format(total))
submit_counter = 1
count = 1
for mitgliedschaft in mitgliedschaften:
m = frappe.get_doc("Mitgliedschaft", mitgliedschaft.name)
neue_farbe = get_ampelfarbe(m)
if neue_farbe != m.ampel_farbe:
set_neue_farbe = frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `ampel_farbe` = '{neue_farbe}' WHERE `name` = '{name}'""".format(neue_farbe=neue_farbe, name=m.name), as_list=True)
submit_counter += 1
if submit_counter == 100:
frappe.db.commit()
submit_counter = 1
print("{0} von {1}".format(count, total))
count += 1
frappe.db.commit()
# --------------------------------------------------------------
# Setze CB "Aktive Mitgliedschaft"
# --------------------------------------------------------------
def aktive_mitgliedschaft():
'''
Example:
sudo bench --site [site_name] execute mvd.mvd.data_import.importer.aktive_mitgliedschaft
'''
print("Aktiviere aktive Mitgliedschaften...")
SQL_SAFE_UPDATES_false = frappe.db.sql("""SET SQL_SAFE_UPDATES=0""", as_list=True)
update_cb = frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `aktive_mitgliedschaft` = 1 WHERE `status_c` NOT IN ('Gestorben', 'Wegzug', 'Ausschluss', 'Inaktiv')""", as_list=True)
SQL_SAFE_UPDATES_true = frappe.db.sql("""SET SQL_SAFE_UPDATES=1""", as_list=True)
frappe.db.commit()
print("Aktive Mitgliedschaften aktiviert")
# --------------------------------------------------------------
# Tausche CB "Geschenkunterlagen an Schenker"
# --------------------------------------------------------------
def change_geschenk_cb():
'''
Example:
sudo bench --site [site_name] execute mvd.mvd.data_import.importer.change_geschenk_cb
'''
mitgliedschaften = frappe.db.sql("""SELECT `name`, `geschenkunterlagen_an_schenker` FROM `tabMitgliedschaft` WHERE `ist_geschenkmitgliedschaft` = 1""", as_dict=True)
print("Change {0} Mitgliedschaften".format(len(mitgliedschaften)))
count = 1
for m in mitgliedschaften:
if int(m.geschenkunterlagen_an_schenker) == 1:
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `geschenkunterlagen_an_schenker` = 0 WHERE `name` = '{mitgliedschaft}'""".format(mitgliedschaft=m.name), as_list=True)
else:
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `geschenkunterlagen_an_schenker` = 1 WHERE `name` = '{mitgliedschaft}'""".format(mitgliedschaft=m.name), as_list=True)
print("{0} von {1}".format(count, len(mitgliedschaften)))
count += 1
frappe.db.commit()
# --------------------------------------------------------------
# Beitritt Update
# --------------------------------------------------------------
def update_beitritt(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.update_beitritt --kwargs "{'site_name': 'site1.local', 'file_name': 'mitglieder_ids_2022.csv'}"
'''
from mvd.mvd.doctype.mitgliedschaft.mitgliedschaft import create_sp_queue
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
submit_counter = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `zahlung_mitgliedschaft` = '2022' WHERE `name` = '{mitglied_id}'""".format(mitglied_id=str(get_value(row, 'mitglied_id'))), as_list=True)
if submit_counter == 100:
frappe.db.commit()
submit_counter = 1
else:
submit_counter += 1
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Beitritt Update konnte nicht durchgeführt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
submit_counter += 1
else:
break
# --------------------------------------------------------------
# OnlinePayment Update
# --------------------------------------------------------------
def update_online_payment(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.update_online_payment --kwargs "{'site_name': 'site1.local', 'file_name': 'mitglied_nr_paymentId_vor_7_Maerz.csv'}"
'''
from mvd.mvd.doctype.mitgliedschaft.mitgliedschaft import create_sp_queue
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
submit_counter = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
online_haftpflicht = str(get_value(row, 'online_haftpflicht'))
online_gutschrift = str(get_value(row, 'online_gutschrift'))
online_betrag = str(get_value(row, 'online_betrag'))
datum_online_verbucht = str(get_value(row, 'datum_online_verbucht'))
datum_online_gutschrift = str(get_value(row, 'datum_online_gutschrift'))
online_payment_method = str(get_value(row, 'online_payment_method'))
online_payment_id = str(get_value(row, 'online_payment_id'))
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET
`online_haftpflicht` = '{online_haftpflicht}',
`online_gutschrift` = '{online_gutschrift}',
`online_betrag` = '{online_betrag}',
`datum_online_verbucht` = '{datum_online_verbucht}',
`datum_online_gutschrift` = '{datum_online_gutschrift}',
`online_payment_method` = '{online_payment_method}',
`online_payment_id` = '{online_payment_id}'
WHERE `name` = '{mitglied_id}'""".format(online_haftpflicht=online_haftpflicht, \
online_gutschrift=online_gutschrift, \
online_betrag=online_betrag, \
datum_online_verbucht=datum_online_verbucht, \
datum_online_gutschrift=datum_online_gutschrift, \
online_payment_method=online_payment_method, \
online_payment_id=online_payment_id, \
mitglied_id=str(get_value(row, 'mitglied_id'))), as_list=True)
if submit_counter == 100:
frappe.db.commit()
submit_counter = 1
else:
submit_counter += 1
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'OnlinePayment Update konnte nicht durchgeführt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
submit_counter += 1
else:
break
# --------------------------------------------------------------
# Adressen reset (Postfach \n fix)
# --------------------------------------------------------------
def adressen_fix_postfach():
'''
Example:
sudo bench --site [site_name] execute mvd.mvd.data_import.importer.adressen_fix_postfach
'''
mitgliedschaften = frappe.db.sql("""SELECT `name` FROM `tabMitgliedschaft` WHERE `postfach` = 1""", as_dict=True)
total = len(mitgliedschaften)
print("Setze Adressenl bei {0} Mitgliedschaften".format(total))
submit_counter = 0
count = 0
for mitgliedschaft in mitgliedschaften:
m = frappe.get_doc("Mitgliedschaft", mitgliedschaft.name)
m.save()
submit_counter += 1
if submit_counter == 100:
frappe.db.commit()
submit_counter = 1
count += 1
print("{0} von {1}".format(count, total))
frappe.db.commit()
# --------------------------------------------------------------
# Fix: Zahlung Mitgliedschaft <> Bezahltes Mitgliedschaftsjahr
# --------------------------------------------------------------
def fix_zahlungs_jahr():
'''
Example:
sudo bench --site [site_name] execute mvd.mvd.data_import.importer.fix_zahlungs_jahr
'''
mitgliedschaften = frappe.db.sql("""SELECT `name`, `zahlung_mitgliedschaft` FROM `tabMitgliedschaft` WHERE `zahlung_mitgliedschaft` > 0""", as_dict=True)
total = len(mitgliedschaften)
print("Fixe Zahlung Mitgliedschaft <> Bezahltes Mitgliedschaftsjahr bei {0} Mitgliedschaften".format(total))
submit_counter = 0
count = 0
for mitgliedschaft in mitgliedschaften:
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `bezahltes_mitgliedschaftsjahr` = {zahlung_mitgliedschaft} WHERE `name` = '{name}'""".format(zahlung_mitgliedschaft=mitgliedschaft.zahlung_mitgliedschaft, name=mitgliedschaft.name), as_list=True)
submit_counter += 1
if submit_counter == 100:
frappe.db.commit()
submit_counter = 1
count += 1
print("{0} von {1}".format(count, total))
frappe.db.commit()
# --------------------------------------------------------------
# Nachmigration für SP
# --------------------------------------------------------------
def nachmigration_fuer_sp():
'''
Example:
sudo bench --site [site_name] execute mvd.mvd.data_import.importer.nachmigration_fuer_sp
'''
from mvd.mvd.doctype.mitgliedschaft.mitgliedschaft import send_mvm_to_sp
mitgliedschaften = frappe.db.sql("""SELECT `name` FROM `tabMitgliedschaft` WHERE `datum_zahlung_mitgliedschaft` BETWEEN CAST('2022-03-01' AS DATE) AND CAST('2022-05-03' AS DATE)""", as_dict=True)
submit_counter = 1
counter = 1
for mitgliedschaft in mitgliedschaften:
m = frappe.get_doc("Mitgliedschaft", mitgliedschaft.name)
send_mvm_to_sp(m, True)
print("{0} von {1}".format(counter, len(mitgliedschaften)))
counter += 1
if submit_counter == 100:
frappe.db.commit()
submit_counter = 1
else:
submit_counter += 1
frappe.db.commit()
# --------------------------------------------------------------
# regionCode SP Update
# --------------------------------------------------------------
def update_region_code(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.update_region_code --kwargs "{'site_name': 'site1.local', 'file_name': 'update_region_code.csv'}"
'''
from mvd.mvd.doctype.mitgliedschaft.mitgliedschaft import send_mvm_to_sp
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
submit_counter = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
m = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
send_mvm_to_sp(m, True)
if submit_counter == 100:
frappe.db.commit()
submit_counter = 1
else:
submit_counter += 1
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Queue konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
submit_counter += 1
else:
break
| 43.355285 | 251 | 0.569168 | 5,912 | 53,327 | 4.89276 | 0.06952 | 0.048676 | 0.04107 | 0.03146 | 0.708498 | 0.682604 | 0.620618 | 0.572634 | 0.533396 | 0.515592 | 0 | 0.010086 | 0.271157 | 53,327 | 1,229 | 252 | 43.390561 | 0.734145 | 0.118458 | 0 | 0.522484 | 0 | 0.008565 | 0.242238 | 0.03626 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040685 | false | 0 | 0.019272 | 0 | 0.120985 | 0.024625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc08fa5ba984ac80960ca5fe0f3a1c315ba43938 | 2,400 | py | Python | portal/util.py | liuxue0905/GoldenTimes | 9cc1fdd0b8c4b06e1f4f932baba0db02e895bc41 | [
"BSD-3-Clause"
] | null | null | null | portal/util.py | liuxue0905/GoldenTimes | 9cc1fdd0b8c4b06e1f4f932baba0db02e895bc41 | [
"BSD-3-Clause"
] | 10 | 2020-06-20T02:04:24.000Z | 2021-12-13T19:47:35.000Z | portal/util.py | liuxue0905/GoldenTimes | 9cc1fdd0b8c4b06e1f4f932baba0db02e895bc41 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from urllib.parse import quote_plus, unquote_plus
def get_file_content(filePath):
""" 读取图片 """
with open(filePath, 'rb') as fp:
return fp.read()
windows = '"*/:<>?\|'
def lx_quote(string):
from io import StringIO
result = StringIO()
for char in string:
if char in windows:
quote = quote_plus(char)
result.write(quote)
else:
result.write(char)
return result.getvalue()
def lx_unquote(string: str):
for char in windows:
string = string.replace(quote_plus(char), char)
return string
def get_extension(url):
from mimetypes import MimeTypes
mime_types = MimeTypes()
(type, encoding) = mime_types.guess_type(url)
extensions = mime_types.guess_all_extensions(type)
extension = extensions[-1]
return extension
def strftime():
from datetime import datetime
# (dt, micro) = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f').split('.')
(dt, micro) = datetime.utcnow().strftime('%Y-%m-%d %H%M%S.%f').split('.')
dt = "%s.%03d" % (dt, int(micro) / 1000)
print(dt)
return dt
class HashingFiles(object):
def md5_hash_small(self, file):
import hashlib
hasher = hashlib.md5()
# with open('myfile.jpg', 'rb') as afile:
with open(file, 'rb') as afile:
buf = afile.read()
hasher.update(buf)
# print(hasher.hexdigest())
return hasher.hexdigest()
def md5_hash_large(self, file):
import hashlib
BLOCKSIZE = 65536
hasher = hashlib.md5()
# with open('anotherfile.txt', 'rb') as afile:
with open(file, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
# print(hasher.hexdigest())
return hasher.hexdigest()
def sha1_hash_large(self, file):
import hashlib
BLOCKSIZE = 65536
hasher = hashlib.sha1()
# with open('anotherfile.txt', 'rb') as afile:
with open(file, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
# print(hasher.hexdigest())
return hasher.hexdigest()
| 26.086957 | 81 | 0.579583 | 288 | 2,400 | 4.739583 | 0.319444 | 0.041026 | 0.03956 | 0.061538 | 0.467399 | 0.447619 | 0.447619 | 0.413187 | 0.413187 | 0.413187 | 0 | 0.015258 | 0.29 | 2,400 | 91 | 82 | 26.373626 | 0.785798 | 0.13 | 0 | 0.360656 | 0 | 0 | 0.020733 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131148 | false | 0 | 0.131148 | 0 | 0.409836 | 0.016393 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc0a3d4504ef5a072c984ac0df1154580b794e75 | 2,100 | py | Python | config/models/app.py | bb-Ricardo/wordpress-hash-event-api | 374fc07915d0c00be43ef8eda4a43045ba2c0364 | [
"MIT"
] | null | null | null | config/models/app.py | bb-Ricardo/wordpress-hash-event-api | 374fc07915d0c00be43ef8eda4a43045ba2c0364 | [
"MIT"
] | 6 | 2022-01-20T10:03:08.000Z | 2022-01-22T00:19:28.000Z | config/models/app.py | bb-Ricardo/wordpress-hash-event-api | 374fc07915d0c00be43ef8eda4a43045ba2c0364 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2022 Ricardo Bartels. All rights reserved.
#
# wordpress-hash-event-api
#
# This work is licensed under the terms of the MIT license.
# For a copy, see file LICENSE.txt included in this
# repository or visit: <https://opensource.org/licenses/MIT>.
from typing import Union, List
from config.models import EnvOverridesBaseSettings
from pydantic import validator
import pytz
from common.misc import split_quoted_string
# noinspection PyMethodParameters
class AppSettings(EnvOverridesBaseSettings):
hash_kennels: Union[str, List]
default_hash_cash: int = None
default_hash_cash_non_members: int = None
default_run_type: str = "Regular Run"
default_currency: str = None
default_facebook_group_id: int = None
timezone_string: str = None
# currently not implemented in WP Event manager
# default_kennel: str = None
# default_run_attributes: Union[str, List] = None
class Config:
env_prefix = f"{__name__.split('.')[-1]}_"
@validator("timezone_string")
def check_time_zone_string(cls, value):
if value is None:
return
# noinspection PyBroadException
try:
return pytz.timezone(value)
except Exception:
raise ValueError(f"Time zone unknown: {value}")
@validator("hash_kennels")
def split_hash_kennels(cls, value):
if isinstance(value, str):
value = split_quoted_string(value, strip=True)
return value
"""
# currently not implemented in WP Event manager
@validator("default_run_attributes")
def split_run_attributes(cls, value):
if isinstance(value, str):
value = split_quoted_string(value, strip=True)
return value
@validator("default_kennel")
def check_default_kennel(cls, value, values):
if value is None:
return
if value not in values.get("hash_kennels"):
raise ValueError(f"Hash kennel '{value}' must be in list of 'hash_kennels': {values.get('hash_kennels')}")
return value
"""
| 30 | 118 | 0.67619 | 260 | 2,100 | 5.284615 | 0.423077 | 0.048035 | 0.037118 | 0.03639 | 0.193595 | 0.165939 | 0.165939 | 0.10917 | 0.10917 | 0.10917 | 0 | 0.003734 | 0.234762 | 2,100 | 69 | 119 | 30.434783 | 0.851276 | 0.218571 | 0 | 0 | 0 | 0 | 0.08629 | 0.024928 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.178571 | 0 | 0.678571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc0d14a3d90e7c46a1736f199da4ae387f995493 | 2,921 | py | Python | conanfile.py | danimtb/conan-Simple-WebSocket-Server | aa6b7fd4a081b41d944737c46eb549a155e91cbe | [
"MIT"
] | null | null | null | conanfile.py | danimtb/conan-Simple-WebSocket-Server | aa6b7fd4a081b41d944737c46eb549a155e91cbe | [
"MIT"
] | null | null | null | conanfile.py | danimtb/conan-Simple-WebSocket-Server | aa6b7fd4a081b41d944737c46eb549a155e91cbe | [
"MIT"
] | null | null | null | from conans import ConanFile, CMake, tools
import os
class SimpleWebSocketServerConan(ConanFile):
name = "Simple-WebSocket-Server"
version = "a4d0d064-git"
source_sha256 = ""
description = "A very simple, fast, multithreaded, platform independent WebSocket (WS) and WebSocket Secure (WSS) server and client library."
# topics can get used for searches, GitHub topics, Bintray tags etc. Add here keywords about the library
topics = ("conan", "Simple-WebSocket-Server", "socket")
url = "https://github.com/bincrafters/conan-Simple-WebSocket-Server"
homepage = "https://gitlab.com/eidheim/Simple-WebSocket-Server"
author = "Bincrafters <bincrafters@gmail.com>"
license = "MIT" # Indicates license type of the packaged library; please use SPDX Identifiers https://spdx.org/licenses/
no_copy_source = True
requires = (
"OpenSSL/1.1.1c@conan/stable",
)
options = {
"use_asio_standalone": [True, False],
}
default_options = {
"use_asio_standalone": True,
}
# Packages the license for the conanfile.py
exports = ["LICENSE.md"]
# Custom attributes for Bincrafters recipe conventions
_source_subfolder = "source_subfolder"
def requirements(self):
if self.options.use_asio_standalone:
self.default_options["asio:standalone"] = True
self.requires("asio/1.13.0@bincrafters/stable")
else:
self.requires("boost_asio/1.69.0@bincrafters/stable")
def source(self):
if self.version.endswith("-git"):
git = tools.Git(folder=self._source_subfolder)
git.clone("https://gitlab.com/eidheim/Simple-WebSocket-Server.git", "master")
git.checkout(self.version.split('-')[0])
else:
tools.get(f"https://gitlab.com/eidheim/Simple-WebSocket-Server/-/archive/v{self.version}/Simple-WebSocket-Server-v{self.version}.tar.gz",
sha256=self.source_sha256)
extracted_dir = self.name + "-v" + self.version
# Rename to "source_subfolder" is a convention to simplify later steps
os.rename(extracted_dir, self._source_subfolder)
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["USE_STANDALONE_ASIO"] = True
cmake.configure(source_folder=self._source_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
# If the CMakeLists.txt has a proper install method, the steps below may be redundant
# If so, you can just remove the lines below
self.copy(pattern="*.hpp", dst="include", src=self._source_subfolder)
def package_id(self):
self.info.header_only()
| 40.013699 | 149 | 0.663471 | 352 | 2,921 | 5.392045 | 0.423295 | 0.063224 | 0.07745 | 0.033193 | 0.125395 | 0.066386 | 0.066386 | 0 | 0 | 0 | 0 | 0.011449 | 0.222527 | 2,921 | 72 | 150 | 40.569444 | 0.824306 | 0.169805 | 0 | 0.072727 | 0 | 0.036364 | 0.310302 | 0.067025 | 0 | 0 | 0 | 0 | 0 | 1 | 0.109091 | false | 0 | 0.036364 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cc0e0214a1a4ff9fb980fd735b4c2c90a8e9ab96 | 3,072 | py | Python | AoC 2021/day16/day16.py | seetohjinwei/Advent-Of-Code | 3725ccecda1cc1a2355f35b46b9a972ce1f9044b | [
"MIT"
] | null | null | null | AoC 2021/day16/day16.py | seetohjinwei/Advent-Of-Code | 3725ccecda1cc1a2355f35b46b9a972ce1f9044b | [
"MIT"
] | null | null | null | AoC 2021/day16/day16.py | seetohjinwei/Advent-Of-Code | 3725ccecda1cc1a2355f35b46b9a972ce1f9044b | [
"MIT"
] | null | null | null | # I completely rewrote the entire script after part 1...
from functools import reduce
table = {
"0": "0000",
"1": "0001",
"2": "0010",
"3": "0011",
"4": "0100",
"5": "0101",
"6": "0110",
"7": "0111",
"8": "1000",
"9": "1001",
"A": "1010",
"B": "1011",
"C": "1100",
"D": "1101",
"E": "1110",
"F": "1111",
}
def solve(string):
if all(x == '0' for x in string):
return (len(string), 0, 0)
packet_version = int(string[0:3], 2)
packet_id = int(string[3:6], 2)
length = 6
packets = 1
total_version = packet_version
value = 0
if packet_id == 4:
last_group = False
groups = []
while not last_group:
last_group = string[length] == '0'
group = string[length + 1 : length + 5]
groups.append(group)
length += 5
value = int(''.join(groups), 2)
else:
length_type = string[length]
values = []
if length_type == '0':
# next 15 bits represent total length in bits of sub-packets
number_of_bits = int(string[length + 1 : length + 16], 2)
length += 16
bits_solved = 0
while bits_solved < number_of_bits:
next_string = string[length + bits_solved : length + number_of_bits]
next_length, next_packets, next_version, next_value = solve(next_string)
bits_solved += next_length
total_version += next_version
values.append(next_value)
length += number_of_bits
else:
# next 11 bits represent number of sub-packets immediately contained
number_of_packets = int(string[length + 1 : length + 12], 2)
length += 12
packets_solved = 0
while packets_solved < number_of_packets:
next_string = string[length :]
next_length, next_packets, next_version, next_value = solve(next_string)
length += next_length
packets_solved += next_packets
total_version += next_version
values.append(next_value)
if packet_id == 0:
value = sum(values)
elif packet_id == 1:
value = reduce(lambda x, y: x * y, values)
elif packet_id == 2:
value = reduce(lambda x, y: min(x, y), values)
elif packet_id == 3:
value = reduce(lambda x, y: max(x, y), values)
elif packet_id == 5:
value = 1 if values[0] > values[1] else 0
elif packet_id == 6:
value = 1 if values[0] < values[1] else 0
elif packet_id == 7:
value = 1 if values[0] == values[1] else 0
return (length, packets, total_version, value)
# data = "A0016C880162017C3686B18A3D4780" # for testing
with open("AoC 2021/day16/a.in") as f:
data = f.read()
bits = "".join(table[x] for x in data)
length, packets, total_version, value = solve(bits)
print("Part 1:", total_version)
print("Part 2:", value)
| 33.391304 | 88 | 0.540365 | 390 | 3,072 | 4.102564 | 0.266667 | 0.045 | 0.045 | 0.045 | 0.32875 | 0.228125 | 0.190625 | 0.190625 | 0.135625 | 0.11875 | 0 | 0.080177 | 0.338216 | 3,072 | 91 | 89 | 33.758242 | 0.706837 | 0.075846 | 0 | 0.096386 | 0 | 0 | 0.040946 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012048 | false | 0 | 0.012048 | 0 | 0.048193 | 0.024096 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |