hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
660bd601b31c2cdb55bcbc98f8dc987a833cfa02
| 2,743
|
py
|
Python
|
af_scripts/tmp/blendShapeEditor.py
|
aaronfang/small-Scripts
|
890b10ab19fa9cdf2415aaf2dc08b81cc64fc79d
|
[
"MIT"
] | 1
|
2018-03-08T16:34:00.000Z
|
2018-03-08T16:34:00.000Z
|
af_scripts/tmp/blendShapeEditor.py
|
aaronfang/personal_scripts
|
890b10ab19fa9cdf2415aaf2dc08b81cc64fc79d
|
[
"MIT"
] | null | null | null |
af_scripts/tmp/blendShapeEditor.py
|
aaronfang/personal_scripts
|
890b10ab19fa9cdf2415aaf2dc08b81cc64fc79d
|
[
"MIT"
] | null | null | null |
import maya.cmds as cmds
class blendShapeEditor(object):
def __init__(self):
self.blendshape_node = blendshape_node = []
self.target_nodes = target_nodes = []
def prepareOrginialGeo(self,*args):
sel = cmds.ls(sl=True)[0]
if '_org' in sel:
blendshape_base_geo = cmds.duplicate(sel,n="{0}_blendshape".format(sel.split('_org')[0]))
layers = cmds.ls(type='displayLayer')
if "org_geo_layer" not in layers:
org_layer = cmds.createDisplayLayer(n="org_geo_layer",e=True)
cmds.editDisplayLayerMembers(org_layer,sel,blendshape_base_geo,noRecurse=True)
cmds.setAttr("{0}.displayType".format(org_layer),2)
elif "org_geo_layer" in layers:
cmds.editDisplayLayerMembers(org_layer,sel,noRecurse=True)
cmds.setAttr("{0}.displayType".format(org_layer),2)
else:
cmds.confirmDialog(m="Please Select The Orginial Geo!")
cmds.select(sel,blendshape_base_geo,r=True)
def createBlendShape(self,*args):
objs = cmds.ls(sl=True,fl=True)
blendshape_node = cmds.blendShape(objs[0:-1],objs[-1],n="{0}_blendshape".format(objs[-1]))
if len(blendshape_node)>0:
for obj in objs[0:-1]:
cmds.setAttr("{0}.visibility".format(obj),False)
def _UI(self,*args):
target_nodes = cmds.blendShape(blendshape_node[0],q=True,t=True)
target_weights = cmds.blendShape(blendshape_node[0],q=True,w=True)
if len(target_nodes)>0:
w = 300
if cmds.window('blendshapeWin',exists=True):cmds.deleteUI('blendshapeWin',window=True)
cmds.window('blendshapeWin',t='BlendShape Editor',w=w,rtf=1,mxb=0,mnb=0,s=0)
#cmds.columnLayout("mainColumn",p="blendshapeWin",columnAttach=('both', 2), rowSpacing=10, columnWidth=w)
cmds.rowColumnLayout('mainRowColumn',p='blendshapeWin',numberOfColumns=3, columnWidth=[(1, 100), (2, 150), (3, 50)] )
for i,tgt in enumerate(target_nodes):
cmds.text(p='mainRowColumn',l=tgt)
cmds.floatSlider("{0}FltSld".format(tgt),p='mainRowColumn',v=target_weights[i],max=1,min=0,cc=self.updateTargetValue)
cmds.button(p='mainRowColumn',l='Edit')
cmds.showWindow('blendshapeWin')
def updateTargetValue(self,*args):
for i,tgt in enumerate(target_nodes):
last_value = cmds.blendShape(blendshape_node[0],q=True,w=True)
cur_value = cmds.floatSlider("{0}FltSld".format(tgt),q=True,v=True)
if cur_value != last_value:
cmds.blendShape(blendshape_node[0],e=True,w=(i,cur_value))
blendShapeEditor()._UI()
| 49.872727
| 133
| 0.629603
| 350
| 2,743
| 4.797143
| 0.308571
| 0.066706
| 0.044669
| 0.066706
| 0.272186
| 0.226921
| 0.189994
| 0.107207
| 0.107207
| 0.06075
| 0
| 0.022107
| 0.224936
| 2,743
| 54
| 134
| 50.796296
| 0.767639
| 0.037915
| 0
| 0.086957
| 0
| 0
| 0.120546
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.021739
| 0
| 0.152174
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
660dfb4d814fae5a38c519d53465f866ae0f301e
| 5,511
|
py
|
Python
|
model.py
|
braemt/attentive-multi-task-deep-reinforcement-learning
|
921feefce98076f88c892f0b7e6db8572f596763
|
[
"MIT"
] | 12
|
2019-04-07T02:04:48.000Z
|
2022-03-22T12:57:47.000Z
|
model.py
|
braemt/attentive-multi-task-deep-reinforcement-learning
|
921feefce98076f88c892f0b7e6db8572f596763
|
[
"MIT"
] | null | null | null |
model.py
|
braemt/attentive-multi-task-deep-reinforcement-learning
|
921feefce98076f88c892f0b7e6db8572f596763
|
[
"MIT"
] | 7
|
2019-04-07T02:04:49.000Z
|
2020-12-28T10:30:27.000Z
|
import numpy as np
import tensorflow as tf
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def flatten(x):
return tf.reshape(x, [-1, np.prod(x.get_shape().as_list()[1:])])
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None,
trainable=True):
with tf.variable_scope(name):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = np.prod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections, trainable=trainable)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
collections=collections, trainable=trainable)
return tf.nn.conv2d(x, w, stride_shape, pad) + b
def linear(x, size, name, initializer=None, bias_init=0, trainable=True):
w = tf.get_variable(name + "/W", [x.get_shape()[1], size], initializer=initializer, trainable=trainable)
b = tf.get_variable(name + "/b", [size], initializer=tf.constant_initializer(bias_init), trainable=trainable)
return tf.matmul(x, w) + b
def categorical_sample(logits, d):
value = tf.squeeze(tf.multinomial(logits - tf.reduce_max(logits, [1], keep_dims=True), 1), [1])
return tf.one_hot(value, d)
def policy_distribution(logits):
return tf.nn.softmax(logits)
class FFPolicy(object):
def __init__(self, ob_space, ac_spaces, tasks):
self.x = tf.placeholder(tf.float32, [None] + list(ob_space))
self.task = tf.placeholder(tf.uint8, [None])
self.all_entropy = []
self.all_logits = []
self.all_actions = []
self.all_vfs = []
self.nns = int((tasks + 2) / 4) + 1
x = tf.nn.relu(conv2d(self.x, 32, "c1", [3, 3], [2, 2]))
x = tf.nn.relu(conv2d(x, 32, "c2", [3, 3], [1, 1]))
shared_layer = x
for i in range(self.nns + 1):
with tf.variable_scope("nn_" + str(i)):
x = tf.nn.relu(conv2d(shared_layer, 16, "c3", [3, 3], [1, 1]))
x = flatten(x)
if i == self.nns:
x = tf.nn.relu(linear(x, self.nns * tasks, "task_in", normalized_columns_initializer(0.01)))
one_hot_task = tf.one_hot(self.task, tasks)
x = tf.concat([x, one_hot_task], -1)
x = tf.nn.relu(linear(x, 256, "h1", normalized_columns_initializer(0.01)))
if i < self.nns:
self.all_logits.append(linear(x, max(ac_spaces), "action", normalized_columns_initializer(0.01)))
self.all_actions.append(tf.nn.softmax(self.all_logits[-1]))
self.all_vfs.append(tf.reshape(linear(x, 1, "value", normalized_columns_initializer(1.0)), [-1]))
else:
self.w_logits = linear(x, self.nns, "attention", normalized_columns_initializer(0.01))
self.w = tf.nn.softmax(self.w_logits)
self.logits = []
self.vf = []
self.sample = []
self.evaluation_policy_dist = []
logits = tf.log(
tf.clip_by_value(tf.einsum('ij,jik->ik', self.w, tf.convert_to_tensor(self.all_actions)), 1e-8,
1e+8))
vf = tf.einsum('ij,ji->i', self.w, tf.convert_to_tensor(self.all_vfs))
for j in range(tasks):
with tf.variable_scope("task_" + str(j)):
x = linear(logits, ac_spaces[j], "logits", normalized_columns_initializer(0.01))
self.logits.append(x)
self.vf.append(vf)
self.sample.append(categorical_sample(self.logits[-1], ac_spaces[j])[0, :])
self.evaluation_policy_dist.append(policy_distribution(self.logits[-1]))
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
def act(self, ob, task):
sess = tf.get_default_session()
result = sess.run([self.sample[task], self.vf[task]], {self.x: [ob], self.task: [task]})
result[1] = result[1][0]
return result
def value(self, ob, task):
sess = tf.get_default_session()
return sess.run(self.vf[task], {self.x: [ob], self.task: [task]})[0]
def evaluation(self, ob, task):
sess = tf.get_default_session()
return sess.run([self.sample[task], self.vf[task], self.logits[task],
self.evaluation_policy_dist[task]], {self.x: [ob], self.task: [task]})
| 46.310924
| 119
| 0.577209
| 744
| 5,511
| 4.119624
| 0.227151
| 0.022839
| 0.063948
| 0.014682
| 0.225449
| 0.189886
| 0.102121
| 0.094617
| 0.064927
| 0.032626
| 0
| 0.02566
| 0.278715
| 5,511
| 119
| 120
| 46.310924
| 0.745409
| 0.047178
| 0
| 0.056818
| 0
| 0
| 0.014681
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.022727
| 0.022727
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
660ea1a100d2f6604cf5b576d0992caedb50b349
| 632
|
py
|
Python
|
src/data/852.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/852.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
src/data/852.py
|
NULLCT/LOMC
|
79a16474a8f21310e0fb47e536d527dd5dc6d655
|
[
"MIT"
] | null | null | null |
from collections import deque
n, q = map(int, input().split())
graph = [[] for _ in range(n + 1)]
for i in range(n - 1):
a, b = map(int, input().split())
graph[a].append(b)
graph[b].append(a)
CD = []
for i in range(q):
e, f = map(int, input().split())
CD.append([e, f])
dist = [-1] * (n + 1)
dist[0] = 0
dist[1] = 0
d = deque()
d.append(1)
while d:
v = d.popleft()
for i in graph[v]:
if dist[i] != -1:
continue
dist[i] = dist[v] + 1
d.append(i)
for i, j in CD:
x = dist[i] + dist[j]
if x % 2 == 0:
print("Town")
else:
print("Road")
| 16.631579
| 36
| 0.47943
| 109
| 632
| 2.770642
| 0.33945
| 0.05298
| 0.109272
| 0.15894
| 0.139073
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030162
| 0.318038
| 632
| 37
| 37
| 17.081081
| 0.670534
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.034483
| 0.068966
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6613dec8d628000fe7b472846f82eac73bd8f3ea
| 49,047
|
py
|
Python
|
autojail/config/memory.py
|
ekut-es/autojail
|
bc16e40e6df55c0a28a3059715851ffa59b14ba8
|
[
"MIT"
] | 6
|
2020-08-12T08:16:15.000Z
|
2022-03-05T02:25:53.000Z
|
autojail/config/memory.py
|
ekut-es/autojail
|
bc16e40e6df55c0a28a3059715851ffa59b14ba8
|
[
"MIT"
] | 1
|
2021-03-30T10:34:51.000Z
|
2021-06-09T11:24:00.000Z
|
autojail/config/memory.py
|
ekut-es/autojail
|
bc16e40e6df55c0a28a3059715851ffa59b14ba8
|
[
"MIT"
] | 1
|
2021-11-21T09:30:58.000Z
|
2021-11-21T09:30:58.000Z
|
import copy
import logging
import math
import sys
from collections import defaultdict
from functools import reduce
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
)
import tabulate
from ortools.sat.python import cp_model
from ..model import (
Board,
CellConfig,
DeviceMemoryRegion,
HypervisorMemoryRegion,
JailhouseConfig,
MemoryRegion,
MemoryRegionData,
ShMemNetRegion,
)
from ..model.datatypes import HexInt
from ..model.parameters import GenerateConfig, GenerateParameters, ScalarChoice
from ..utils import get_overlap
from .passes import BasePass
class MemoryAllocationInfeasibleException(Exception):
pass
class AllocatorSegment:
def __init__(
self,
name: str = "unnamed",
alignment: int = 2 ** 12,
shared_regions: Optional[
Dict[
str,
List[
Union[
MemoryRegion, DeviceMemoryRegion, HypervisorMemoryRegion
]
],
]
] = None,
) -> None:
self.name = name
self.shared_regions: Optional[
Dict[
str,
List[
Union[
MemoryRegion, DeviceMemoryRegion, HypervisorMemoryRegion
]
],
]
] = defaultdict(list)
if shared_regions:
self.shared_regions.update(shared_regions)
self.alignment = alignment
self.constraint: Optional[MemoryConstraint] = None
@property
def physical_start_addr(self):
key = self.shared_regions.keys()[0]
return self.shared_regions[key][0].physical_start_addr
@property
def size(self):
key = list(self.shared_regions)[0]
return sum(map(lambda r: r.size, self.shared_regions[key]))
class MemoryConstraint(object):
"""Implements a generic constraint for AllocatorSegments"""
def __init__(
self, size: int, virtual: bool, start_addr: int = None
) -> None:
self.size = size
self.virtual = virtual
self.start_addr: Optional[int] = start_addr
self.address_range: Optional[Tuple[int, int]] = None
# Addresses must be aligned such that
# addr % self.alignment == 0
self.alignment: Optional[int] = None
# Constraint for Memory regions where physical == virtual address
# E.g. mem loadable in root cell
self.equal_constraint: Optional["MemoryConstraint"] = None
# Solver Interval Variable
self.bound_vars: Optional[Tuple[Any, Any]] = None
# Values for the allocated range after constraint solving
self.allocated_range: Optional[Tuple[int, int]] = None
# allow arbitrary actions upon resolving a constraint
# this method is called iff, the solver found a valid
# solution and assigned start_addr
# Parameters:
# - self: MemoryConstraint
self.resolved: Optional[Callable[[MemoryConstraint], None]] = None
def __str__(self):
ret = ""
if self.start_addr is not None:
ret += f"addr: {hex(self.start_addr)} "
if self.address_range:
ret += f"range: {hex(self.address_range[0])}-{hex(self.address_range[1])} "
if self.alignment:
ret += f"alignment: {self.alignment} "
if self.address_range:
ret += f"allocated: {hex(self.address_range[0])}-{hex(self.adress_range[1])} "
ret += f"size: {self.size} virtual: {self.virtual}"
return ret
# Returns a constraint that satisfies both
# <self> and <other>, if possible
# Fails otherwise
def merge(self, other):
assert (
self.virtual == other.virtual
and "Unable to merge constraints for physical and virtual addresses"
)
assert (
self.size == other.size
and "Unable to merge constraints with different size"
)
assert (
self.start_addr == other.start_addr
and "Unbable to merge constraints with different start addresses"
)
alignment = self.alignment
if other.alignment:
if alignment:
alignment = (self.alignment * other.alignment) / math.gcd(
self.alignment, other.alignment
)
else:
alignment = other.alignment
resolved = self.resolved
if other.resolved:
if resolved:
def callback(mc: MemoryConstraint):
assert self.resolved
assert other.resolved
self.resolved(mc)
other.resolved(mc)
resolved = callback
else:
resolved = other.resolved
mc = MemoryConstraint(self.size, self.virtual)
mc.virtual = self.virtual
mc.start_addr = self.start_addr
mc.alignment = alignment
mc.resolved = resolved
class NoOverlapConstraint(object):
"""Implements a generic no-overlap constraint"""
def __init__(self) -> None:
self.constraints: List[MemoryConstraint] = []
def add_memory_constraint(self, mc: MemoryConstraint) -> None:
self.constraints.append(mc)
def __str__(self):
return str(self.__dict__)
class CPMemorySolver(object):
def __init__(
self,
constraints: List[NoOverlapConstraint],
physical_domain: cp_model.Domain,
virtual_domain: cp_model.Domain,
):
self.constraints = constraints
self.model = cp_model.CpModel()
self.physical_domain = physical_domain
self.virtual_domain = virtual_domain
self.ivars: Dict[cp_model.IntervalVar, MemoryConstraint] = dict()
self.vars: Dict[
cp_model.IntervalVar, Tuple[cp_model.IntVar, cp_model.IntVar]
] = dict()
self._build_cp_constraints()
def solve(self):
solver = cp_model.CpSolver()
solver.parameters.log_search_progress = True
status = solver.Solve(self.model)
if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL:
for ivar, mc in self.ivars.items():
lower, upper = self.vars[ivar]
mc.allocated_range = solver.Value(lower), solver.Value(upper)
else:
print("Memory allocation infeasible")
raise MemoryAllocationInfeasibleException()
def _build_cp_constraints(self):
equal_pairs = []
for overlap_index, no_overlap in enumerate(self.constraints):
cp_no_overlap = []
for constr_index, constr in enumerate(no_overlap.constraints):
lower = None
upper = None
constr_name = f"constr_{overlap_index}_{constr_index}"
if constr.start_addr is not None:
lower = self.model.NewConstant(constr.start_addr)
upper = self.model.NewConstant(
constr.start_addr + constr.size
)
else:
if constr.address_range:
l_addr, u_addr = constr.address_range
lower = self.model.NewIntVar(
l_addr, u_addr, f"{constr_name}_lower"
)
else:
domain = self.physical_domain
if constr.virtual:
domain = self.virtual_domain
lower = self.model.NewIntVarFromDomain(
domain, f"{constr_name}_lower"
)
if constr.address_range:
l_addr, u_addr = constr.address_range
upper = self.model.NewIntVar(
l_addr, u_addr, f"{constr_name}_upper"
)
else:
domain = self.physical_domain
if constr.virtual:
domain = self.virtual_domain
upper = self.model.NewIntVarFromDomain(
domain, f"{constr_name}_upper"
)
ivar = self.model.NewIntervalVar(
lower, constr.size, upper, f"{constr_name}_ivar"
)
print(lower, constr.size, upper)
constr.bound_vars = (lower, upper)
if constr.alignment:
self.model.AddModuloEquality(0, lower, constr.alignment)
if constr.equal_constraint:
equal_pairs.append((constr, constr.equal_constraint))
cp_no_overlap.append(ivar)
self.ivars[ivar] = constr
self.vars[ivar] = (lower, upper)
self.model.AddNoOverlap(cp_no_overlap)
for first, second in equal_pairs:
self.model.Add(first.bound_vars[0] == second.bound_vars[0])
self.model.Add(first.bound_vars[1] == second.bound_vars[1])
class AllocateMemoryPass(BasePass):
"""Implements a simple MemoryAllocator for AutoJail"""
def __init__(self) -> None:
self.logger = logging.getLogger("autojail")
self.config: Optional[JailhouseConfig] = None
self.board: Optional[Board] = None
self.root_cell: Optional[CellConfig] = None
self.root_cell_id: Optional[str] = None
self.unallocated_segments: List[AllocatorSegment] = []
self.allocated_regions: List[MemoryRegionData] = []
self.per_region_constraints: Dict[str, MemoryConstraint] = dict()
# data structure for creating and handling generic
# constraints
self.physical_domain: cp_model.Domain = None
self.virtual_domain: cp_model.Domain = None
self.global_no_overlap = NoOverlapConstraint()
self.no_overlap_constraints: Dict[
str, NoOverlapConstraint
] = defaultdict(NoOverlapConstraint)
self.memory_constraints: Dict[
MemoryConstraint, AllocatorSegment
] = dict()
def _iter_constraints(self, f_no_overlap, f_mc):
for cell_name, no_overlap in self.no_overlap_constraints.items():
if not f_no_overlap(cell_name, no_overlap):
continue
for mc in no_overlap.constraints:
f_mc(cell_name, mc)
def _dump_constraints(self):
constraint_tables = {}
def f_no_overlap(
cell_name: str, no_overlap: NoOverlapConstraint
) -> bool:
constraint_tables[cell_name] = []
return True
def f_mc(cell_name: str, mc: MemoryConstraint) -> None:
constraint_tables[cell_name].append(
[
hex(mc.start_addr) if mc.start_addr is not None else "-",
hex(mc.address_range[0]) + "-" + hex(mc.address_range[1])
if mc.address_range
else "-",
str(mc.size) if mc.size is not None else "-",
str(mc.alignment) if mc.alignment else "-",
str(mc.virtual),
"yes" if mc.equal_constraint else "-",
str(mc.resolved) if mc.resolved else "-",
]
)
self._iter_constraints(f_no_overlap, f_mc)
self.logger.info("")
self.logger.info("Memory Constraints:")
for cell_name, constraints in constraint_tables.items():
self.logger.info("Cell: %s", cell_name)
formatted = tabulate.tabulate(
constraints,
headers=[
"Start Address",
"Start Address Range",
"Size",
"Alignment",
"Virtual?",
"Equal?",
"Resolved callback",
],
)
self.logger.info(formatted)
self.logger.info("")
def _check_constraints(self):
def f_no_overlap(cell_name, no_overlap):
full_regions = []
def insert_region(region):
o_start, o_end = region
for (start, end) in full_regions:
if (
(o_start <= start and start <= o_end)
or (o_start <= end and end <= o_end)
or (start <= o_start and o_start <= end)
or (start <= o_end and o_end <= end)
):
print(
f"Regions overlap for {cell_name}: (0x{start:x}, 0x{end:x}) and (0x{o_start:x}, 0x{o_end:x})"
)
if mc not in self.memory_constraints:
continue
seg = self.memory_constraints[mc]
print("Affected memory cells:")
for sharer in seg.shared_regions.keys():
print(f"\t{sharer}")
full_regions.append(region)
for mc in no_overlap.constraints:
if mc.start_addr is not None:
region = (mc.start_addr, mc.start_addr + mc.size - 1)
insert_region(region)
return False
def f_mc(cell_name, mc):
pass
self._iter_constraints(f_no_overlap, f_mc)
def __call__(
self, board: Board, config: JailhouseConfig
) -> Tuple[Board, JailhouseConfig]:
self.logger.info("Memory Allocator")
self.board = board
self.config = config
self.root_cell = None
for id, cell in self.config.cells.items():
if cell.type == "root":
self.root_cell = cell
self.root_cell_id = id
break
vmem_size = 2 ** 32
if self.board.virtual_address_bits > 32:
vmem_size = 2 ** (self.board.virtual_address_bits - 1)
self.virtual_domain = cp_model.Domain(0, vmem_size)
self._build_allocation_domain()
self.logger.info(
"Physical Memory Domain: %s",
str(self.physical_domain.FlattenedIntervals()),
)
self.logger.info(
"Virtual Memory domain: %s",
str(self.virtual_domain.FlattenedIntervals()),
)
self.no_overlap_constraints["__global"] = self.global_no_overlap
self.unallocated_segments = self._build_unallocated_segments()
self._lift_loadable()
self._preallocate_vpci()
self.logger.info("")
self.logger.info("Unallocated physical segments: ")
table = [
[
s.name,
s.size,
len(s.shared_regions if s.shared_regions else []),
",".join(s.shared_regions.keys() if s.shared_regions else []),
]
for s in self.unallocated_segments
]
self.logger.info(
tabulate.tabulate(
table,
headers=["Name", "Size (Byte)", "# Subregions", "Sharers"],
)
)
for seg in self.unallocated_segments:
assert seg.size > 0
assert seg.shared_regions
mc_global = None
for sharer, regions in seg.shared_regions.items():
mc_seg = seg.constraint
mc_local = MemoryConstraint(seg.size, True)
if mc_seg and mc_seg.alignment:
mc_local.alignment = mc_seg.alignment
else:
if regions[0].virtual_start_addr is None:
mc_local.alignment = seg.alignment
fst_region = regions[0]
if fst_region.virtual_start_addr is not None:
if mc_seg and mc_seg.start_addr and mc_seg.virtual:
assert (
mc_seg.start_addr == fst_region.virtual_start_addr
and "Invalid state detected: start addresses must be equal"
)
mc_local.start_addr = fst_region.virtual_start_addr
elif mc_seg and mc_seg.start_addr and mc_seg.virtual:
mc_local.start_addr = mc_seg.start_addr
if mc_seg and mc_seg.virtual:
mc_local.resolved = mc_seg.resolved
if not mc_global:
mc_global = copy.deepcopy(mc_local)
mc_global.virtual = False
mc_global.start_addr = None
if fst_region.physical_start_addr is not None:
if mc_seg and mc_seg.start_addr and not mc_seg.virtual:
assert (
mc_seg.start_addr
== fst_region.virtual_start_addr
and "Invalid state detected: start addresses must be equal"
)
mc_global.start_addr = fst_region.physical_start_addr
elif mc_seg and mc_seg.start_addr and not mc_seg.virtual:
mc_global.start_addr = mc_seg.start_addr
if mc_seg and not mc_seg.virtual:
mc_global.resolved = mc_seg.resolved
if mc_global.start_addr and mc_global.size:
print(
f"Adding global no-overlapp (shared): [0x{mc_global.start_addr:x}, 0x{mc_global.start_addr + mc_global.size:x}]"
)
self.global_no_overlap.add_memory_constraint(mc_global)
self.memory_constraints[mc_global] = seg
# Add physical == virtual constraint for MEM_LOADABLEs in root cell
if sharer == self.root_cell_id:
is_loadable = False
for shared_regions in seg.shared_regions.values():
for shared_region in shared_regions:
if isinstance(shared_region, MemoryRegionData):
for flag in shared_region.flags:
if flag == "MEM_LOADABLE":
is_loadable = True
if is_loadable:
mc_local.equal_constraint = mc_global
self.no_overlap_constraints[sharer].add_memory_constraint(
mc_local
)
self.memory_constraints[mc_local] = seg
# Add virtually reserved segments
for cell_name, cell in self.config.cells.items():
assert cell.memory_regions is not None
for memory_region in cell.memory_regions.values():
assert memory_region is not None
if isinstance(memory_region, HypervisorMemoryRegion):
continue
if isinstance(memory_region, ShMemNetRegion):
continue
assert isinstance(memory_region, MemoryRegionData)
if (
memory_region.virtual_start_addr is not None
and memory_region.physical_start_addr is not None
):
if memory_region.allocatable:
continue
assert memory_region.size is not None
memory_constraint = MemoryConstraint(
size=int(memory_region.size),
virtual=True,
start_addr=memory_region.virtual_start_addr,
)
self.no_overlap_constraints[
cell_name
].add_memory_constraint(memory_constraint)
self._add_gic_constraints()
self._dump_constraints()
solver = CPMemorySolver(
list(self.no_overlap_constraints.values()),
self.physical_domain,
self.virtual_domain,
)
try:
solver.solve()
except MemoryAllocationInfeasibleException:
self._check_constraints()
sys.exit(-1)
for cell_name, no_overlap_constr in self.no_overlap_constraints.items():
for constr in no_overlap_constr.constraints:
if not constr.allocated_range:
print(constr, "has not been allocated")
continue
(start, _) = constr.allocated_range
if constr.resolved:
constr.resolved(constr)
if constr not in self.memory_constraints:
continue
seg = self.memory_constraints[constr]
if cell_name == "__global":
assert seg.shared_regions
for _, regions in seg.shared_regions.items():
for region in regions:
if region.physical_start_addr is None:
region.physical_start_addr = HexInt(start)
else:
assert seg.shared_regions
assert constr.virtual
for region in seg.shared_regions[cell_name]:
if region.virtual_start_addr is None:
region.virtual_start_addr = HexInt(start)
self._remove_allocatable()
return self.board, self.config
def _add_gic_constraints(self):
interrupt_ranges: List[Tuple[int, int]] = []
for interrupt_controller in self.board.interrupt_controllers:
if interrupt_controller.gic_version == 2:
interrupt_ranges.append(
(interrupt_controller.gicd_base, 0x1000)
)
interrupt_ranges.append(
(interrupt_controller.gicc_base, 0x2000)
)
interrupt_ranges.append(
(interrupt_controller.gich_base, 0x2000)
)
interrupt_ranges.append(
(interrupt_controller.gicv_base, 0x2000)
)
elif interrupt_controller.gic_version == 3:
interrupt_ranges.append(
(interrupt_controller.gicd_base, 0x10000)
)
interrupt_ranges.append(
(interrupt_controller.gicr_base, 0x20000)
)
for name, constraint in self.no_overlap_constraints.items():
for interrupt_range in interrupt_ranges:
mc = MemoryConstraint(
size=interrupt_range[1],
start_addr=interrupt_range[0],
virtual=False if name == "__global" else True,
)
constraint.add_memory_constraint(mc)
def _lift_loadable(self):
root_cell = self.root_cell
for cell_name, cell in self.config.cells.items():
if cell.type == "root":
continue
for name, region in cell.memory_regions.items():
if region.flags and "MEM_LOADABLE" in region.flags:
root_region_name = f"{name}@{cell_name}"
print("Adding region:", root_region_name, "to root cell")
copy_region = copy.deepcopy(region)
copy_region.flags.remove("MEM_LOADABLE")
if "MEM_EXECUTE" in copy_region.flags:
copy_region.flags.remove("MEM_EXECUTE")
if "MEM_DMA" in copy_region.flags:
copy_region.flags.remove("MEM_DMA")
# FIXME: is it really true, that that MEM_LOADABLE must be the same at their respective memory region
copy_region.virtual_start_addr = (
copy_region.physical_start_addr
)
root_cell.memory_regions[root_region_name] = copy_region
for seg in self.unallocated_segments:
if cell_name not in seg.shared_regions:
continue
if region not in seg.shared_regions[cell_name]:
continue
seg.shared_regions["root"].append(copy_region)
def _build_allocation_domain(self) -> None:
assert self.root_cell is not None
assert self.root_cell.memory_regions is not None
assert self.board is not None
start = None
end = 0
allocatable_regions = []
for region in self.board.memory_regions.values():
assert region is not None
if isinstance(region, MemoryRegionData) and region.allocatable:
assert region.physical_start_addr is not None
assert region.size is not None
allocatable_regions.append(region)
tmp_start = region.physical_start_addr
tmp_end = region.physical_start_addr + region.size
if start is None:
start = tmp_start
if tmp_start < start:
start = tmp_start
if tmp_end > end:
end = tmp_end
allocatable_regions.sort(
key=lambda r: r.physical_start_addr
if r.physical_start_addr is not None
else 0
)
holes: List[List[int]] = []
for i in range(0, len(allocatable_regions) - 1):
r0 = allocatable_regions[i]
r1 = allocatable_regions[i + 1]
assert r0.physical_start_addr is not None and r0.size is not None
assert r1.physical_start_addr is not None
r0_end = r0.physical_start_addr + r0.size
r1_start = r1.physical_start_addr
if r0_end != r1_start:
holes.append([r0_end, r1_start])
# Physical domain spans the entire range from the first allocatable memory region
# to the end of the last one. Any holes in that range are accomodated for using
# constant interval constraints
def remove_hole(start, end):
try:
holes.remove([start, end])
except ValueError:
pass
self.physical_domain = cp_model.Domain.FromIntervals([[start, end]])
# Make sure all pre-allocated regions part of a cell have a corresponding
# constraint (technically, we only need constraints for those regions that
# overlapp with the allocatable range/physical domain)
non_alloc_ranges: List[List[int]] = []
assert self.config
for cell in self.config.cells.values():
assert cell.memory_regions
for r in cell.memory_regions.values():
if not isinstance(r, ShMemNetRegion) and not isinstance(
r, MemoryRegion
):
continue
if r.physical_start_addr is not None:
assert r.size is not None
end = r.physical_start_addr + r.size
non_alloc_range = [r.physical_start_addr, end]
if non_alloc_range in non_alloc_ranges:
continue
if not self.physical_domain.Contains(
non_alloc_range[0]
) and not self.physical_domain.Contains(non_alloc_range[1]):
continue
non_alloc_ranges.append(non_alloc_range)
remove_hole(r.physical_start_addr, end)
mc = MemoryConstraint(r.size, False, r.physical_start_addr)
self.global_no_overlap.add_memory_constraint(mc)
# fill remaining holes in between allocatable regions
for hole in holes:
s, e = hole
size = e - s
mc = MemoryConstraint(size, False, s)
self.global_no_overlap.add_memory_constraint(mc)
def _remove_allocatable(self):
"""Finally remove allocatable memory regions from cells"""
assert self.config is not None
for cell in self.config.cells.values():
delete_list = []
for name, region in cell.memory_regions.items():
if isinstance(region, MemoryRegionData):
if region.allocatable:
delete_list.append(name)
for name in delete_list:
del cell.memory_regions[name]
def _build_unallocated_segments(
self, key: Callable = lambda x: x.physical_start_addr
) -> List[AllocatorSegment]:
assert self.config
assert self.config.cells
ana = UnallocatedOrSharedSegmentsAnalysis(
self.root_cell,
self.config.cells,
self.logger,
self.per_region_constraints,
self.physical_domain,
key,
)
ana.run()
unallocated = ana.unallocated
assert unallocated
return unallocated
def _preallocate_vpci(self):
"""Preallocate a virtual page on all devices"""
assert self.config is not None
if self.root_cell and self.root_cell.platform_info:
# see hypvervisor/pci.c:850
end_bus = self.root_cell.platform_info.pci_mmconfig_end_bus
vpci_size = (end_bus + 2) * 256 * 4096
if self.root_cell.platform_info.pci_mmconfig_base:
for constraints in self.no_overlap_constraints.values():
mc = MemoryConstraint(
vpci_size,
True,
self.root_cell.platform_info.pci_mmconfig_base,
)
constraints.add_memory_constraint(mc)
else:
def callback(mc: MemoryConstraint):
assert mc.allocated_range
assert self.root_cell
assert self.root_cell.platform_info
assert self.root_cell.memory_regions
physical_start_addr, _ = mc.allocated_range
self.root_cell.platform_info.pci_mmconfig_base = HexInt(
physical_start_addr
)
self.logger.info(
"Print resolved pci_mmconfig %s",
hex(physical_start_addr),
)
# Allocate vpci physically
last_mc = MemoryConstraint(
vpci_size, True
) # This is a physical constraint, but it does not need to be backed by allocatable memory
last_mc.resolved = callback
last_mc.alignment = self.board.pagesize
last_mc.address_range = (0x0, 2 ** 32 - 1)
self.no_overlap_constraints["__global"].add_memory_constraint(
last_mc
)
for cell_name in self.config.cells.keys():
mc = MemoryConstraint(vpci_size, True)
mc.equal_constraint = last_mc
self.no_overlap_constraints[
cell_name
].add_memory_constraint(mc)
mc.alignment = self.board.pagesize
last_mc = mc
class UnallocatedOrSharedSegmentsAnalysis(object):
""" Group unallocated memory regions into segments
that are allocated continuously.
Detect (un-)allocated regions that are shared
between cells
"""
def __init__(
self,
root_cell,
cells,
logger,
per_region_constraints,
physical_domain,
key=lambda x: x.physical_start_addr,
) -> None:
self.root_cell: CellConfig = root_cell
self.cells: Dict[str, CellConfig] = cells
self.logger = logger
self.key = key
self.per_region_constraints = per_region_constraints
self.physical_domain: Optional[cp_model.Domain] = physical_domain
# result store
self.unallocated: List[AllocatorSegment] = []
self.shared: Dict[str, AllocatorSegment] = {}
def _detect_shared_memio(self):
shared: Dict[
Tuple[int, int], Tuple[int, List[MemoryRegionData]]
] = defaultdict(lambda: (0, []))
for cell in self.cells.values():
for region in cell.memory_regions.values():
if not isinstance(region, MemoryRegionData):
continue
if not self.key(region) or "MEM_IO" not in region.flags:
continue
start = region.physical_start_addr
key = (start, region.size)
count, regions = shared[key]
regions.append(region)
shared[key] = (count + 1, regions)
for count, regions in shared.values():
if count > 1:
for region in regions:
region.shared = True
def _log_shared_segments(self):
self.logger.info("Shared segments:")
for name, seg in self.shared.items():
self.logger.info(f"Region: '{name}' shared by")
for cell_name in seg.shared_regions:
self.logger.info(f"\t{cell_name}")
self.logger.info("\n")
def run(self) -> None:
assert self.root_cell is not None
assert self.cells is not None
self._detect_shared_memio()
# Add cell memories
self.logger.debug("building allocatable regions")
for cell_name, cell in self.cells.items():
assert cell is not None
assert cell.memory_regions is not None
for region_name, region in cell.memory_regions.items():
if not isinstance(region, MemoryRegionData):
continue
if region.allocatable:
continue
assert self.shared is not None
if region.shared and region_name in self.shared:
current_segment = self.shared[region_name]
assert current_segment.shared_regions
current_segment.shared_regions[cell_name].append(region)
if region_name in self.per_region_constraints:
constraint = self.per_region_constraints[region_name]
if current_segment.constraint:
constraint = constraint.merge(
current_segment.constraint
)
current_segment.constraint = constraint
else:
current_segment = AllocatorSegment(
region_name, shared_regions={cell_name: [region]},
)
if region_name in self.per_region_constraints:
current_segment.constraint = self.per_region_constraints[
region_name
]
if region.physical_start_addr is None:
self.unallocated.append(current_segment)
# TODO are shared regions required to have
# the same name accross cells?
if region.shared:
self.shared[region_name] = current_segment
# Add hypervisor memories
hypervisor_memory = self.root_cell.hypervisor_memory
assert isinstance(hypervisor_memory, HypervisorMemoryRegion)
if hypervisor_memory.physical_start_addr is None:
self.unallocated.append(
AllocatorSegment(
"hypervisor_memory",
alignment=hypervisor_memory.size, # FIXME: this is too much alignment
shared_regions={"hypervisor": [hypervisor_memory]},
)
)
self._log_shared_segments()
class MergeIoRegionsPass(BasePass):
""" Merge IO regions in root cell that are at most n kB apart.
n defaults to 64 kb
"""
def __init__(
self,
set_params: Optional[GenerateConfig],
gen_params: Optional[GenerateParameters],
) -> None:
self.config: Optional[JailhouseConfig] = None
self.board: Optional[Board] = None
self.root_cell: Optional[CellConfig] = None
self.logger = logging.getLogger("autojail")
self.max_dist = 64 * 1024
if set_params:
self.max_dist = set_params.mem_io_merge_threshold
if gen_params:
threshold_choice = ScalarChoice()
threshold_choice.lower = 1024
threshold_choice.upper = 64 * 1024 * 1024
threshold_choice.step = 1024
threshold_choice.integer = True
threshold_choice.log = True
gen_params.mem_io_merge_threshold = threshold_choice
def __call__(
self, board: Board, config: JailhouseConfig
) -> Tuple[Board, JailhouseConfig]:
self.logger.info("Merge IO Regions")
self.board = board
self.config = config
for cell in self.config.cells.values():
if cell.type == "root":
self.root_cell = cell
assert self.root_cell
assert self.root_cell.memory_regions
shared_regions_ana = UnallocatedOrSharedSegmentsAnalysis(
self.root_cell,
self.config.cells,
self.logger,
dict(),
None,
key=lambda region: region.physical_start_addr,
)
shared_regions_ana.run()
def get_io_regions(
regions: Dict[
str,
Union[str, ShMemNetRegion, MemoryRegion, DeviceMemoryRegion],
]
) -> List[Tuple[str, Union[DeviceMemoryRegion, MemoryRegion]]]:
return list(
[
(name, r)
for name, r in regions.items()
if isinstance(r, MemoryRegionData) and "MEM_IO" in r.flags
]
)
regions: Sequence[Tuple[str, MemoryRegionData]] = get_io_regions(
self.root_cell.memory_regions
)
regions = sorted(
regions,
key=lambda t: t[1].physical_start_addr
if t[1].physical_start_addr is not None
else 0,
)
grouped_regions: List[List[Tuple[str, MemoryRegionData]]] = []
current_group: List[Tuple[str, MemoryRegionData]] = []
max_dist = self.max_dist
vpci_start_addr = None
vpci_end_addr = None
if (
self.root_cell.platform_info is not None
and self.root_cell.platform_info.pci_mmconfig_base is not None
and self.root_cell.platform_info.pci_mmconfig_base > 0
):
vpci_start_addr = self.root_cell.platform_info.pci_mmconfig_base
vpci_end_addr = (
vpci_start_addr
+ (self.root_cell.platform_info.pci_mmconfig_end_bus + 1)
* 256
* 4096
)
for name, r in regions:
assert r.physical_start_addr is not None
assert r.size is not None
if current_group:
r1_end = r.physical_start_addr + r.size
r1_start = r.physical_start_addr
assert current_group[-1][1].physical_start_addr is not None
assert current_group[-1][1].size is not None
assert current_group[0][1].physical_start_addr is not None
last_region_end = (
current_group[-1][1].physical_start_addr
+ current_group[-1][1].size
)
# Do not merge regions if merged regions would
# overlap with gic
gic_overlap = False
interrupt_ranges = []
for interrupt_controller in board.interrupt_controllers:
if interrupt_controller.gic_version == 2:
interrupt_ranges.append(
(interrupt_controller.gicd_base, 0x1000)
)
interrupt_ranges.append(
(interrupt_controller.gicc_base, 0x2000)
)
interrupt_ranges.append(
(interrupt_controller.gich_base, 0x2000)
)
interrupt_ranges.append(
(interrupt_controller.gicv_base, 0x2000)
)
elif interrupt_controller.gic_version == 3:
interrupt_ranges.append(
(interrupt_controller.gicd_base, 0x10000)
)
interrupt_ranges.append(
(interrupt_controller.gicr_base, 0x20000)
)
for interrupt_range in interrupt_ranges:
if (
current_group[0][1].physical_start_addr
< interrupt_range[0] + interrupt_range[1]
):
if r1_end > interrupt_range[0]:
gic_overlap = True
break
vpci_overlap = False
if vpci_start_addr is not None and vpci_end_addr is not None:
if (
get_overlap(
(r1_start, r1_end), (vpci_start_addr, vpci_end_addr)
)
> 0
):
vpci_overlap = True
if (
r1_start - last_region_end > max_dist
or gic_overlap
or vpci_overlap
):
grouped_regions.append(current_group)
if not gic_overlap and not vpci_overlap:
current_group = [(name, r)]
else:
current_group = []
else:
current_group.append((name, r))
else:
current_group.append((name, r))
if current_group:
grouped_regions.append(current_group)
self.logger.info(f"Got {len(grouped_regions)} grouped region(s):")
for group in grouped_regions:
assert group[0][1].physical_start_addr is not None
assert group[-1][1].physical_start_addr is not None
assert group[-1][1].size is not None
group_begin = group[0][1].physical_start_addr
group_end = group[-1][1].physical_start_addr + group[-1][1].size
self.logger.info(
f"Group-Begin: (0x{group_begin:x} - 0x{group_end:x})"
)
for region in group:
self.logger.info(f"\t{region}")
self.logger.info("Group-End\n")
for index, regions in enumerate(grouped_regions):
r_start = regions[0][1]
r_end = regions[-1][1]
assert r_start.physical_start_addr is not None
assert r_end.size is not None
assert r_end.physical_start_addr is not None
new_size = (
r_end.physical_start_addr + r_end.size
) - r_start.physical_start_addr
def aux(
acc: Iterable[str], t: Tuple[str, MemoryRegionData]
) -> Iterable[str]:
_, r = t
return set(acc) | set(r.flags)
init: Iterable[str] = set()
flags: List[str] = sorted(list(reduce(aux, regions, init)))
physical_start_addr = r_start.physical_start_addr
virtual_start_addr = r_start.virtual_start_addr
new_region = MemoryRegion(
size=new_size,
physical_start_addr=physical_start_addr,
virtual_start_addr=virtual_start_addr,
flags=flags,
allocatable=False,
shared=False,
)
assert self.root_cell.memory_regions
for name, _ in regions:
del self.root_cell.memory_regions[name]
self.root_cell.memory_regions[f"mmio_{index}"] = new_region
return (self.board, self.config)
class PrepareMemoryRegionsPass(BasePass):
""" Prepare memory regions by merging regions from Extracted Board Info and Cell Configuration"""
def __init__(self) -> None:
self.config: Optional[JailhouseConfig] = None
self.board: Optional[Board] = None
def __call__(
self, board: Board, config: JailhouseConfig
) -> Tuple[Board, JailhouseConfig]:
self.board = board
self.config = config
assert self.board is not None
assert self.config is not None
for cell in self.config.cells.values():
assert cell.memory_regions is not None
for region in cell.memory_regions.values():
if isinstance(region, MemoryRegionData) and region.size is None:
region.size = self.board.pagesize
if cell.type == "root":
self._prepare_memory_regions_root(cell)
return self.board, self.config
def _prepare_memory_regions_root(self, cell: CellConfig) -> None:
assert self.board is not None
assert self.board.memory_regions is not None
assert cell.memory_regions is not None
allocatable_ranges = []
for region in self.board.memory_regions.values():
if region.allocatable:
assert region.size is not None
assert region.physical_start_addr is not None
start = region.physical_start_addr
end = start + region.size
allocatable_ranges.append([start, end])
allocatable_ranges.sort(key=lambda r: r[0])
def overlaps_allocatable_region(start, end):
for r in allocatable_ranges:
if (
r[0] <= start
and start <= r[1]
or r[0] <= end
and end <= r[1]
):
return True
return False
for name, memory_region in self.board.memory_regions.items():
if memory_region.physical_start_addr is None:
continue
if memory_region.virtual_start_addr is None:
continue
if memory_region.size is None:
continue
p_start = memory_region.physical_start_addr
v_start = memory_region.virtual_start_addr
p_end = memory_region.physical_start_addr + memory_region.size
v_end = memory_region.virtual_start_addr + memory_region.size
assert p_start is not None
assert v_start is not None
assert p_end is not None
assert v_end is not None
if overlaps_allocatable_region(p_start, p_end):
continue
skip = False
for cell_region in cell.memory_regions.values():
if not isinstance(cell_region, MemoryRegionData):
continue
assert cell_region.size is not None
if cell_region.physical_start_addr is not None:
if (
p_start >= cell_region.physical_start_addr
and p_start
< cell_region.physical_start_addr + cell_region.size
):
skip = True
if (
p_end >= cell_region.physical_start_addr
and p_end
< cell_region.physical_start_addr + cell_region.size
):
skip = True
if cell_region.virtual_start_addr is not None:
if (
v_start >= cell_region.virtual_start_addr
and v_start
< cell_region.virtual_start_addr + cell_region.size
):
skip = True
if (
v_end >= cell_region.virtual_start_addr
and v_end
< cell_region.virtual_start_addr + cell_region.size
):
skip = True
if skip is True:
continue
cell.memory_regions[name] = memory_region
| 35.438584
| 140
| 0.538728
| 5,019
| 49,047
| 5.033672
| 0.084877
| 0.04453
| 0.041719
| 0.013379
| 0.419728
| 0.336843
| 0.262548
| 0.222016
| 0.167234
| 0.143366
| 0
| 0.007732
| 0.390911
| 49,047
| 1,383
| 141
| 35.464208
| 0.837953
| 0.04302
| 0
| 0.275212
| 0
| 0.001885
| 0.035627
| 0.005233
| 0
| 0
| 0.001687
| 0.001446
| 0.065975
| 1
| 0.040528
| false
| 0.006598
| 0.013195
| 0.001885
| 0.075401
| 0.00754
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
661499e1d6e03e831ef662b0f10bed15a0e0160c
| 806
|
py
|
Python
|
tests/test_util.py
|
tsufeki/python-restclientaio
|
2af2ded9e22ba5552ace193691ed3a4b520cadf8
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_util.py
|
tsufeki/python-restclientaio
|
2af2ded9e22ba5552ace193691ed3a4b520cadf8
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_util.py
|
tsufeki/python-restclientaio
|
2af2ded9e22ba5552ace193691ed3a4b520cadf8
|
[
"BSD-2-Clause"
] | null | null | null |
import pytest
from restclientaio._util import *
class TestFullName:
@pytest.mark.parametrize('args,expected', [
((str,), 'builtins.str'),
((str, 'lower'), 'builtins.str.lower'),
])
def test_full_name(self, args, expected):
assert full_name(*args) == expected
class TestFormatRecur:
@pytest.mark.parametrize('args,kwargs,expected', [
(('{}', 42), {}, '42'),
(({'{0}': '{foo}foo'}, 'bar'), {'foo': 'FOO'}, {'bar': 'FOOfoo'}),
((['{0.real:02d}'], 1), {}, ['01']),
])
def test_format_recur(self, args, kwargs, expected):
assert format_recur(*args, **kwargs) == expected
def test_throws_on_self_referencing(self):
d = {}
d['foo'] = d
with pytest.raises(ValueError):
format_recur(d)
| 25.1875
| 74
| 0.555831
| 88
| 806
| 4.954545
| 0.454545
| 0.082569
| 0.123853
| 0.114679
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017974
| 0.240695
| 806
| 31
| 75
| 26
| 0.694444
| 0
| 0
| 0.090909
| 0
| 0
| 0.146584
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.136364
| false
| 0
| 0.090909
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6615213f91d714c671d2fcc45dfcb7ea7995394c
| 1,112
|
py
|
Python
|
experiments/test_experiments.py
|
srikarym/torchrl
|
fee98e78ac1657a2c9a4063dd8d63ba207a121e2
|
[
"Apache-2.0"
] | 3
|
2019-02-27T19:00:32.000Z
|
2020-07-19T03:18:28.000Z
|
experiments/test_experiments.py
|
srikarym/torchrl
|
fee98e78ac1657a2c9a4063dd8d63ba207a121e2
|
[
"Apache-2.0"
] | null | null | null |
experiments/test_experiments.py
|
srikarym/torchrl
|
fee98e78ac1657a2c9a4063dd8d63ba207a121e2
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=redefined-outer-name
"""Test Experiments.
This test runs all problems and hyperparameter
pairs for 100 time steps. It only guarantees
correct API compatiblity and not the problem
performance metrics.
"""
import pytest
from torchrl import registry
from torchrl.cli.commands.run import do_run
problem_hparams_tuples = []
for problem_id, hparams_list in registry.list_problem_hparams().items():
for hparam_set_id in hparams_list:
problem_hparams_tuples.append((problem_id, hparam_set_id))
@pytest.fixture(scope='function')
def problem_argv(request):
problem_id, hparam_set_id = request.param
args_dict = {
'problem': problem_id,
'hparam_set': hparam_set_id,
'seed': None,
'extra_hparams': {
'num_total_steps': 100,
},
'log_interval': 50,
'eval_interval': 50,
'num_eval': 1,
}
yield args_dict
@pytest.mark.parametrize('problem_argv', problem_hparams_tuples,
indirect=['problem_argv'])
def test_problem(problem_argv):
problem = problem_argv.pop('problem')
do_run(problem, **problem_argv)
| 24.711111
| 72
| 0.714029
| 146
| 1,112
| 5.164384
| 0.486301
| 0.087533
| 0.058355
| 0.071618
| 0.05305
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012128
| 0.184353
| 1,112
| 44
| 73
| 25.272727
| 0.819184
| 0.192446
| 0
| 0
| 0
| 0
| 0.135802
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.111111
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
661ea642755905b0144b877774494043f351d6cb
| 1,316
|
py
|
Python
|
magicauth/otp_forms.py
|
glibersat/django-magicauth
|
545cb0df2b2368b27089e253fafa666ca8a870f1
|
[
"MIT"
] | null | null | null |
magicauth/otp_forms.py
|
glibersat/django-magicauth
|
545cb0df2b2368b27089e253fafa666ca8a870f1
|
[
"MIT"
] | null | null | null |
magicauth/otp_forms.py
|
glibersat/django-magicauth
|
545cb0df2b2368b27089e253fafa666ca8a870f1
|
[
"MIT"
] | null | null | null |
from django import forms
from django.core.validators import RegexValidator
from django.core.exceptions import ValidationError
from django_otp import user_has_device, devices_for_user
from magicauth import settings as magicauth_settings
class OTPForm(forms.Form):
OTP_NUM_DIGITS = magicauth_settings.OTP_NUM_DIGITS
otp_token = forms.CharField(
max_length=OTP_NUM_DIGITS,
min_length=OTP_NUM_DIGITS,
validators=[RegexValidator(r"^\d{6}$")],
label=f"Entrez le code à {OTP_NUM_DIGITS} chiffres généré par votre téléphone ou votre carte OTP",
widget=forms.TextInput(attrs={"autocomplete": "off"}),
)
def __init__(self, user, *args, **kwargs):
super(OTPForm, self).__init__(*args, **kwargs)
self.user = user
def clean_otp_token(self):
otp_token = self.cleaned_data["otp_token"]
user = self.user
if not user_has_device(user):
raise ValidationError("Le système n'a pas trouvé d'appareil (carte OTP ou générateur sur téléphone) pour votre compte. Contactez le support pour en ajouter un.")
for device in devices_for_user(user):
if device.verify_is_allowed() and device.verify_token(otp_token):
return otp_token
raise ValidationError("Ce code n'est pas valide.")
| 37.6
| 173
| 0.705167
| 179
| 1,316
| 4.949721
| 0.47486
| 0.054176
| 0.06772
| 0.040632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000962
| 0.209726
| 1,316
| 34
| 174
| 38.705882
| 0.850962
| 0
| 0
| 0
| 0
| 0.038462
| 0.212766
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.192308
| 0
| 0.423077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
662336d44c462304b2b5f94e8721da8b5b0d3e73
| 7,450
|
py
|
Python
|
supercache/engine/memory.py
|
Peter92/supercache
|
e85ae87e4c2fead6e2a6aa55c0983d249512f34d
|
[
"MIT"
] | 2
|
2020-03-02T01:22:25.000Z
|
2020-05-18T16:52:11.000Z
|
supercache/engine/memory.py
|
huntfx/supercache
|
e85ae87e4c2fead6e2a6aa55c0983d249512f34d
|
[
"MIT"
] | null | null | null |
supercache/engine/memory.py
|
huntfx/supercache
|
e85ae87e4c2fead6e2a6aa55c0983d249512f34d
|
[
"MIT"
] | null | null | null |
import time
from collections import defaultdict
from .. import exceptions, utils
class Memory(object):
"""Cache directly in memory.
This is by far the fastest solution, but the cache cannot be shared
outside the current process.
This is not completely thread safe, but care has been taken to
avoid any errors from stopping the code working.
"""
FIFO = FirstInFirstOut = 0
FILO = FirstInLastOut = 1
LRU = LeastRecentlyUsed = 2
MRU = MostRecentlyUsed = 3
LFU = LeastFrequentlyUsed = 4
def __init__(self, ttl=None, mode=LRU, count=None, size=None):
"""Create a new engine.
Parameters:
mode (int): How to purge the old keys.
ttl (int): Time the cache is valid for.
Set to None for infinite.
count (int): Maximum cache results to store.
Set to None or 0 for infinite.
size (int): Maximum size of cache in bytes.
This is a soft limit, where the memory will be
allocated first, and any extra cache purged later.
The latest cache item will always be stored.
Set to None for infinite.
"""
self.data = dict(
result={},
hits=defaultdict(int),
misses=defaultdict(int),
size={None: 0},
ttl={},
insert={},
access={}
)
self.mode = mode
self.ttl = ttl
self.count = count
self.size = size
self._next_ttl = float('inf')
def keys(self):
"""Get the current stored cache keys."""
return list(iter(self))
def __iter__(self):
"""Iterate through all the keys."""
self._purge()
return iter(self.data['result'])
def exists(self, key):
"""Find if cache currently exists for a given key.
Any key past its ttl will be removed.
"""
if key in self.data['result']:
if self.expired(key):
self.delete(key)
return False
return True
return False
def expired(self, key, _current_time=None):
"""Determine is a key has expired."""
if key not in self.data['ttl']:
return False
if _current_time is None:
_current_time = time.time()
try:
return self.data['ttl'][key] <= _current_time
except KeyError:
return True
def get(self, key, purge=False):
"""Get the value belonging to a key.
An error will be raised if the cache is expired or doesn't
exist.
"""
if purge:
self._purge()
if not self.exists(key):
raise exceptions.CacheNotFound(key)
# If a purge was done, then skip the expiry check
if not purge and self.expired(key):
raise exceptions.CacheExpired(key)
try:
self.data['hits'][key] += 1
self.data['access'][key] = time.time()
return self.data['result'][key]
except KeyError:
raise exceptions.CacheExpired(key)
def put(self, key, value, ttl=None, purge=True):
"""Add a new value to cache.
This will overwrite any old cache with the same key.
"""
if ttl is None:
ttl = self.ttl
self.data['result'][key] = value
try:
self.data['misses'][key] += 1
except KeyError:
self.data['misses'][key] = 1
# Calculate size
if self.size is not None:
size = utils.getsize(value)
self.data['size'][None] += size - self.data['size'].get(key, 0)
self.data['size'][key] = size
# Set insert/access time
current_time = time.time()
self.data['insert'][key] = self.data['access'][key] = current_time
# Set timeout
if ttl is None or ttl <= 0:
try:
del self.data['ttl'][key]
except KeyError:
pass
else:
self.data['ttl'][key] = current_time + ttl
self._next_ttl = min(self._next_ttl, self.data['ttl'][key])
# Clean old keys
if purge:
self._purge(ignore=key)
def delete(self, key):
"""Delete an item of cache.
This will not remove the hits or misses.
"""
if key in self.data['result']:
try:
del self.data['result'][key]
del self.data['insert'][key]
del self.data['access'][key]
if key in self.data['ttl']:
del self.data['ttl'][key]
if self.size is not None:
self.data['size'][None] -= self.data['size'].pop(key)
except KeyError:
pass
return True
return False
def hits(self, key):
"""Return the number of hits on an item of cache."""
return self.data['hits'].get(key, 0)
def misses(self, key):
"""Return the number of misses on an item of cache."""
return self.data['misses'].get(key, 0)
def _purge(self, ignore=None):
"""Remove old cache."""
count = self.count
size = self.size
purged = 0
# Delete expired
if self.data['ttl']:
current_time = time.time()
if current_time > self._next_ttl:
self._next_ttl = float('inf')
for key in tuple(self.data['result']):
if self.expired(key, _current_time=current_time):
self.delete(key)
elif key in self.data['ttl']:
try:
self._next_ttl = min(self._next_ttl, self.data['ttl'][key])
except KeyError:
pass
# Determine if we can skip
if count is not None and len(self.data['result']) < count:
count = None
if size is not None and self.data['size'][None] < size:
size = None
if count is None and size is None:
return purged
# Order the keys
if self.mode == self.FirstInFirstOut:
order_by = lambda k: self.data['insert'][k]
elif self.mode == self.FirstInLastOut:
order_by = lambda k: -self.data['insert'][k]
elif self.mode == self.LeastRecentlyUsed:
order_by = lambda k: self.data['access'][k]
elif self.mode == self.MostRecentlyUsed:
order_by = lambda k: -self.data['access'][k]
elif self.mode == self.LeastFrequentlyUsed:
order_by = lambda k: self.data['hits'][k]
else:
raise NotImplementedError(self.mode)
ordered_keys = sorted(self.data['result'], key=order_by, reverse=True)
# Remove the cache data
if count is not None:
for key in ordered_keys[count:]:
if key == ignore:
continue
self.delete(key)
purged += 1
if size is not None:
total_size = 0
for key in ordered_keys:
if key == ignore:
continue
total_size += self.data['size'][key]
if total_size > size:
self.delete(key)
purged += 1
return purged
| 32.251082
| 87
| 0.52094
| 902
| 7,450
| 4.242794
| 0.201774
| 0.087797
| 0.028743
| 0.021949
| 0.263392
| 0.159916
| 0.111314
| 0.082571
| 0.067416
| 0.067416
| 0
| 0.003882
| 0.377584
| 7,450
| 230
| 88
| 32.391304
| 0.821436
| 0.195839
| 0
| 0.357616
| 0
| 0
| 0.035265
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072848
| false
| 0.019868
| 0.019868
| 0
| 0.264901
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6623eb16f8643fe3f8e712cd421dfd5fbd6d80c3
| 2,541
|
py
|
Python
|
problem_1_falling/test_agent.py
|
victor-armegioiu/ai-mas-challenges
|
c449f4bb3fd7e7a5c1c9b00d22209532c92f39aa
|
[
"MIT"
] | null | null | null |
problem_1_falling/test_agent.py
|
victor-armegioiu/ai-mas-challenges
|
c449f4bb3fd7e7a5c1c9b00d22209532c92f39aa
|
[
"MIT"
] | null | null | null |
problem_1_falling/test_agent.py
|
victor-armegioiu/ai-mas-challenges
|
c449f4bb3fd7e7a5c1c9b00d22209532c92f39aa
|
[
"MIT"
] | null | null | null |
from utils import read_cfg
from falling_objects_env import FallingObjects, PLAYER_KEYS, ACTIONS
from argparse import ArgumentParser
from demo_agent import DemoAgent
from dqn_agent import DDQNAgent
import importlib
import numpy as np
import cv2 as cv2
BATCH_SIZE = 32
if __name__ == "__main__":
arg_parser = ArgumentParser()
arg_parser.add_argument(
'-c', '--config-file', default='configs/default.yaml', type=str, dest='config_file',
help='Default configuration file'
)
arg_parser.add_argument(
'-a', '--agent', default='demo_agent+DemoAgent', type=str, dest='agent',
help='The agent to test in format <module_name>+<class_name>'
)
args = arg_parser.parse_args()
config_file = args.config_file
cfg = read_cfg(config_file)
test_agent_name = args.agent.split("+")
test_steps = cfg.test_steps
test_agent = getattr(importlib.import_module(test_agent_name[0]), test_agent_name[1])
print(f"Testing agent {test_agent_name[1]}")
env = FallingObjects(cfg)
#agent = test_agent(max(ACTIONS.keys()))
# Dueling Deep Q-Learning Agent
agent = DDQNAgent()
all_r = 0
obs = env.reset()
# In lieu of having a state comprised of a single observation, we stack the last 3 images
# at any given time in order to create a state, as suggested in DeepMind's DQN paper;
# we do this in order to preserve the movement of the falling objects.
s1, _, r1, _ = env.step(0)
s2, _, r2, _ = env.step(0)
s3, _, r3, _ = env.step(0)
all_r += (r1 + r2 + r3)
curr_obs = [s1, s2, s3]
# Lambda function to reshape, convert to grayscale and stack the images in our observation list.
make_obs = lambda obs_list : np.stack((cv2.cvtColor(obs, cv2.COLOR_BGR2GRAY).reshape((1, 86, 86)) for obs in obs_list), axis=3)
for i in range(test_steps):
# curr_obs is a list of the last 3 frames
action = agent.act(make_obs(curr_obs))
next_frame, r, done, _ = env.step(action)
all_r += r
print('STEP', i, ':', action, '->', all_r)
# next_obs takes the last 2 entries in our initial observation and adds the current frame
next_obs = curr_obs[1:] + [next_frame]
# We cache the experiences in our replay buffer for further usage in our training steps.
agent.remember(make_obs(curr_obs), action, r, make_obs(next_obs), done)
curr_obs = next_obs
agent.replay(min(len(agent.memory), BATCH_SIZE))
print(f"Reward for {test_steps} steps: {all_r} ")
| 33.88
| 131
| 0.67375
| 384
| 2,541
| 4.257813
| 0.385417
| 0.033028
| 0.031804
| 0.024465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018173
| 0.220386
| 2,541
| 74
| 132
| 34.337838
| 0.807168
| 0.243998
| 0
| 0.043478
| 0
| 0
| 0.13023
| 0.013598
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.195652
| 0
| 0.195652
| 0.065217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6629f9d2ad476bdf575b8d97233a3d2d8781d35e
| 2,874
|
py
|
Python
|
src/unpurple.py
|
rnbdev/purple-fringe
|
b372c13f0e4d7d7ba9da53d7e6a4326c0e6c453a
|
[
"BSD-3-Clause"
] | 2
|
2021-03-16T03:29:41.000Z
|
2021-05-21T03:32:57.000Z
|
src/unpurple.py
|
rnbdev/purple-fringe
|
b372c13f0e4d7d7ba9da53d7e6a4326c0e6c453a
|
[
"BSD-3-Clause"
] | null | null | null |
src/unpurple.py
|
rnbdev/purple-fringe
|
b372c13f0e4d7d7ba9da53d7e6a4326c0e6c453a
|
[
"BSD-3-Clause"
] | null | null | null |
import cv2
import numpy as np
import argparse
def unpurple(params):
img = cv2.imread(params.input).astype(np.float64)
img_b = img[..., 0]
img_b = np.maximum(0, img_b - params.m * 255)
img_b *= params.i / (1 - params.m)
width = (params.r << 1) + 1
bl = cv2.blur(img_b, (width, width))
if params.mode == "blur":
cv2.imwrite(params.output, bl)
else:
db = np.maximum(img[..., 0] - img[..., 1], 0)
dr = np.maximum(img[..., 2] - img[..., 1], 0)
mb = np.minimum(bl, db)
r_diff = np.minimum(dr, mb * params.maxred)
if params.minred > 0:
b_diff = np.minimum(mb, r_diff / params.minred)
else:
b_diff = mb
if params.mode == "diff":
img_diff = np.dstack(
[b_diff,
np.zeros_like(b_diff),
r_diff]
)
img_diff = img_diff.astype(np.uint8)
cv2.imwrite(params.output, img_diff)
else:
assert(params.mode == "normal")
img_fix = img.copy()
img_fix[..., 0] -= b_diff
img_fix[..., 2] -= r_diff
img_fix = img_fix.astype(np.uint8)
cv2.imwrite(params.output, img_fix)
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument("-i",
help="Intensity (scalar more or less around 1.0)",
type=float,
default=1.)
ap.add_argument("-m",
help="Minimum brightness (positive scalar smaller 1.0)",
type=float,
default=0.)
ap.add_argument("-r",
help="Blur radius (pixel)",
type=int,
default=5)
ap.add_argument("-minred",
help="Minimum red:blue ratio in the fringe",
type=float,
default=0.)
ap.add_argument("-maxred",
help="Maximum red:blue ratio in the fringe",
type=float,
default=.33)
ap.add_argument("-gentle",
help="Gentle (Same as -m 0.8 -minred 0.15)",
action="store_true")
ap.add_argument("-diff", action="store_const",
dest="mode",
const="diff",
default="normal",
help="Output image type")
ap.add_argument("-blur", action="store_const",
dest="mode",
const="blur",
help="Output image type")
ap.add_argument(help="input image",
dest="input")
ap.add_argument(help="output image",
dest="output")
args = ap.parse_args()
if args.gentle:
args.m = 0.8
args.minred = 0.15
img = unpurple(args)
| 29.9375
| 76
| 0.4746
| 336
| 2,874
| 3.919643
| 0.261905
| 0.037965
| 0.098709
| 0.050114
| 0.258162
| 0.242976
| 0.198937
| 0.116932
| 0.059226
| 0
| 0
| 0.026903
| 0.392136
| 2,874
| 95
| 77
| 30.252632
| 0.726961
| 0
| 0
| 0.164557
| 0
| 0
| 0.138483
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 1
| 0.012658
| false
| 0
| 0.037975
| 0
| 0.050633
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
662b8a1aba28c4cad375998a888ed93da1fcc650
| 1,588
|
py
|
Python
|
scripts/run_episodes.py
|
takuma-ynd/rrc_example_package
|
f53cf3191f4c38f4d1f394ccd55b1d935a6a70ba
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/run_episodes.py
|
takuma-ynd/rrc_example_package
|
f53cf3191f4c38f4d1f394ccd55b1d935a6a70ba
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/run_episodes.py
|
takuma-ynd/rrc_example_package
|
f53cf3191f4c38f4d1f394ccd55b1d935a6a70ba
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import numpy as np
# from rrc_example_package.code.training_env import make_training_env
from rrc_example_package.code.training_env.env import ActionType
import rrc_example_package.move_cube
import rrc_example_package.cube_env
# env = make_training_env(visualization=False, **eval_config)
# env.unwrapped.initializer = initializer
# eval_config = {
# 'action_space': 'torque_and_position',
# 'frameskip': 3,
# 'residual': True,
# 'reward_fn': f'task{difficulty}_competition_reward',
# 'termination_fn': 'no_termination',
# 'initializer': f'task{difficulty}_init',
# 'monitor': False,
# 'rank': 0
# }
goal = move_cube.sample_goal(3)
goal_dict = {
'position': move_cube.position,
'orientation': move_cube.orientation
}
env = cube_env.RealRobotCubeEnv(goal_dict, 3)
obs = env.reset()
done = False
accumulated_reward = 0
if env.action_type == ActionType.TORQUE_AND_POSITION:
zero_action = {
'torque': (env.action_space['torque'].sample() * 0).astype(np.float64),
'position': (env.action_space['position'].sample() * 0).astype(np.float64)
}
assert zero_action['torque'].dtype == np.float64
assert zero_action['position'].dtype == np.float64
else:
zero_action = np.array(env.action_space.sample() * 0).astype(np.float64)
assert zero_action.dtype == np.float64
while not done:
obs, reward, done, info = env.step(zero_action)
accumulated_reward += reward
print("Accumulated reward: {}".format(accumulated_reward))
| 32.408163
| 82
| 0.687028
| 198
| 1,588
| 5.262626
| 0.348485
| 0.057582
| 0.065259
| 0.043186
| 0.18714
| 0.142035
| 0.142035
| 0.072937
| 0
| 0
| 0
| 0.016179
| 0.18262
| 1,588
| 48
| 83
| 33.083333
| 0.786595
| 0.316121
| 0
| 0
| 0
| 0
| 0.077425
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0
| false
| 0
| 0.148148
| 0
| 0.148148
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
662db785b141a51d5e5c2d7a6ae200c6d7fc2fa6
| 1,458
|
py
|
Python
|
src/sprite.py
|
GreenXenith/zoria
|
30a16baab3643c820613a8c8669ee6235a2cd47c
|
[
"MIT"
] | null | null | null |
src/sprite.py
|
GreenXenith/zoria
|
30a16baab3643c820613a8c8669ee6235a2cd47c
|
[
"MIT"
] | null | null | null |
src/sprite.py
|
GreenXenith/zoria
|
30a16baab3643c820613a8c8669ee6235a2cd47c
|
[
"MIT"
] | null | null | null |
import pygame
from . import assets
from .vector import *
global registered_sprites
registered_sprites = {}
def register_sprite(name, definition):
registered_sprites[name] = definition
class Sprite:
texture = "none.png"
rect = pygame.Rect(0, 0, 0, 0)
def __init__(self, name, pos, z):
self.name = name
self.pos = pos
self.rot = 0
self.z = z
self.vel = Vector(0, 0)
for key in registered_sprites[name]:
value = registered_sprites[name][key]
if not callable(value):
setattr(self, key, value)
def set_pos(self, vec_or_x, y = None):
self.pos = vec_or_num(vec_or_x, y)
def set_texture(self, filename):
self.texture = assets.get(filename)
def set_rect(self, x, y, w, h):
self.rect = pygame.Rect(x, y, w, h)
def get_rect(self):
return self.rect
def on_step(self, dtime, map, player):
oldx = self.pos.x
self.set_pos(self.pos.x + self.vel.x * dtime, self.pos.y)
if map.collides(self.pos, self.z, self.rect):
self.set_pos(oldx, self.pos.y)
oldy = self.pos.y
self.set_pos(self.pos.x, self.pos.y + self.vel.y * dtime)
if map.collides(self.pos, self.z, self.rect):
self.set_pos(self.pos.x, oldy)
if "on_step" in registered_sprites[self.name]:
registered_sprites[self.name]["on_step"](self, dtime, map, player)
| 27
| 78
| 0.598765
| 218
| 1,458
| 3.87156
| 0.238532
| 0.099526
| 0.047393
| 0.042654
| 0.220379
| 0.220379
| 0.154028
| 0.101896
| 0.101896
| 0.101896
| 0
| 0.006635
| 0.276406
| 1,458
| 53
| 79
| 27.509434
| 0.793365
| 0
| 0
| 0.051282
| 0
| 0
| 0.0151
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.179487
| false
| 0
| 0.076923
| 0.025641
| 0.358974
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
662f6bd952e2e4b17cde090c9644c4ad6ec3e3d7
| 5,873
|
py
|
Python
|
pset1 /ps1.py
|
vnb/Data-Science
|
af2936b621d51a6839111b6c793f9a270cded16c
|
[
"MIT"
] | null | null | null |
pset1 /ps1.py
|
vnb/Data-Science
|
af2936b621d51a6839111b6c793f9a270cded16c
|
[
"MIT"
] | null | null | null |
pset1 /ps1.py
|
vnb/Data-Science
|
af2936b621d51a6839111b6c793f9a270cded16c
|
[
"MIT"
] | null | null | null |
###########################
# 6.00.2x Problem Set 1: Space Cows
from ps1_partition import get_partitions
import time
#================================
# Part A: Transporting Space Cows
#================================
def load_cows(filename):
"""
Read the contents of the given file. Assumes the file contents contain
data in the form of comma-separated cow name, weight pairs, and return a
dictionary containing cow names as keys and corresponding weights as values.
Parameters:
filename - the name of the data file as a string
Returns:
a dictionary of cow name (string), weight (int) pairs
"""
cow_dict = dict()
f = open(filename, 'r')
for line in f:
line_data = line.split(',')
cow_dict[line_data[0]] = int(line_data[1])
return cow_dict
# Problem 1
def greedy_cow_transport(cows,limit=10):
"""
Uses a greedy heuristic to determine an allocation of cows that attempts to
minimize the number of spaceship trips needed to transport all the cows. The
returned allocation of cows may or may not be optimal.
The greedy heuristic should follow the following method:
1. As long as the current trip can fit another cow, add the largest cow that will fit
to the trip
2. Once the trip is full, begin a new trip to transport the remaining cows
Does not mutate the given dictionary of cows.
Parameters:
cows - a dictionary of name (string), weight (int) pairs
limit - weight limit of the spaceship (an int)
Returns:
A list of lists, with each inner list containing the names of cows
transported on a particular trip and the overall list containing all the
trips
"""
# # TODO: Your code here
# pass
#initialize trip list
#helper function sub_set
selected_cows = []
cow_names = [x for x in cows]
available_cows = [cows[x] for x in cows]
cows_remaining = [cows[x] for x in cows]
set_limit = limit
trip_list = []
cow_list = []
cow_trip_list = []
while len(available_cows) > 0:
limit = set_limit
cows_remaining = available_cows[:]
selected_cows = []
sub_list = []
while limit > 0:
if len(cows_remaining) == 0:
break
a = max(cows_remaining)
cows_remaining.remove(a)
if a <= limit:
limit -= a
selected_cows.append(a)
available_cows.remove(a)
trip_list.append(selected_cows)
for i in selected_cows:
for name in cows:
if cows[name] == i:
if name in cow_names:
cow_list.append(name)
cow_names.remove(name)
break
cow_trip_list.append(cow_list)
cow_list = []
return cow_trip_list
# Problem 2
def brute_force_cow_transport(cows,limit=10):
"""
Finds the allocation of cows that minimizes the number of spaceship trips
via brute forc
e. The brute force algorithm should follow the following method:
1. Enumerate all possible ways that the cows can be divided into separate trips
2. Select the allocation that minimizes the number of trips without making any trip
that does not obey the weight limitation
Does not mutate the given dictionary of cows.
Parameters:
cows - a dictionary of name (string), weight (int) pairs
limit - weight limit of the spaceship (an int)
Returns:
A list of lists, with each inner list containing the names of cows
transported on a particular trip and the overall list containing all the
trips
"""
def count_sum(listofcows, cows):
weight = 0
for i in listofcows:
weight += cows[i]
if weight > limit:
return False
break
return True
cow_list = list(cows.keys())
flight_list = []
all_partitions = get_partitions(cow_list)
for i in all_partitions:
switch = 'green'
for j in i:
if count_sum(j, cows) == False:
switch = 'red'
break
if switch == 'green':
flight_list.append(i)
trip_len_list = [len(i) for i in flight_list]
for i in flight_list:
if len(i) == min(trip_len_list):
ideal_trip = i
break
return ideal_trip
# Problem 3
def compare_cow_transport_algorithms():
"""
Using the data from ps1_cow_data.txt and the specified weight limit, run your
greedy_cow_transport and brute_force_cow_transport functions here. Use the
default weight limits of 10 for both greedy_cow_transport and
brute_force_cow_transport.
Print out the number of trips returned by each method, and how long each
method takes to run in seconds.
Returns:
Does not return anything.
"""
cows = load_cows("ps1_cow_data.txt")
limit=10
start = time.time()
ans_a = greedy_cow_transport(cows, limit)
end = time.time()
print(end-start)
start = time.time()
ans_b = brute_force_cow_transport(cows, limit)
end = time.time()
print(end-start)
return
"""
Here is some test data for you to see the results of your algorithms with.
Do not submit this along with any of your answers. Uncomment the last two
lines to print the result of your problem.
"""
cows = load_cows("ps1_cow_data.txt")
limit=10
#print(cows)
#
#print(greedy_cow_transport(cows, limit))
#print(brute_force_cow_transport(cows, limit))
print(compare_cow_transport_algorithms())
| 30.910526
| 90
| 0.604461
| 786
| 5,873
| 4.389313
| 0.260814
| 0.041739
| 0.027826
| 0.036522
| 0.327536
| 0.261449
| 0.213913
| 0.213913
| 0.188986
| 0.170435
| 0
| 0.007994
| 0.318406
| 5,873
| 189
| 91
| 31.074074
| 0.85386
| 0.429253
| 0
| 0.22619
| 0
| 0
| 0.017511
| 0
| 0
| 0
| 0
| 0.005291
| 0
| 1
| 0.059524
| false
| 0
| 0.02381
| 0
| 0.154762
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6630cfae3164736554237dd2d66a2111da1f1715
| 6,080
|
py
|
Python
|
blackboxopt/optimization_loops/dask_distributed.py
|
boschresearch/blackboxopt
|
85abea86f01a4a9d50f05d15e7d850e3288baafd
|
[
"ECL-2.0",
"Apache-2.0"
] | 8
|
2021-07-05T13:37:22.000Z
|
2022-03-11T12:23:27.000Z
|
blackboxopt/optimization_loops/dask_distributed.py
|
boschresearch/blackboxopt
|
85abea86f01a4a9d50f05d15e7d850e3288baafd
|
[
"ECL-2.0",
"Apache-2.0"
] | 14
|
2021-07-07T13:55:23.000Z
|
2022-02-07T13:09:01.000Z
|
blackboxopt/optimization_loops/dask_distributed.py
|
boschresearch/blackboxopt
|
85abea86f01a4a9d50f05d15e7d850e3288baafd
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 - for information on the respective copyright owner
# see the NOTICE file and/or the repository https://github.com/boschresearch/blackboxopt
#
# SPDX-License-Identifier: Apache-2.0
import logging
import time
from typing import Callable, List, Set, Union
try:
import dask.distributed as dd
except ImportError as e:
raise ImportError(
"Unable to import Dask Distributed specific dependencies. "
+ "Make sure to install blackboxopt[dask]"
) from e
from blackboxopt import (
Evaluation,
EvaluationSpecification,
OptimizationComplete,
OptimizerNotReady,
)
from blackboxopt.base import (
MultiObjectiveOptimizer,
Objective,
SingleObjectiveOptimizer,
)
from blackboxopt.optimization_loops.utils import (
evaluation_function_wrapper,
init_max_evaluations_with_limit_logging,
)
class MinimalDaskScheduler:
def __init__(
self,
dask_client: dd.Client,
objectives: List[Objective],
logger: logging.Logger,
):
self.client = dask_client
self.objectives = objectives
self.logger = logger
self._not_done_futures: Set = set()
def shutdown(self):
return self.client.shutdown()
def has_capacity(self):
idle = [len(task_id) == 0 for task_id in self.client.processing().values()]
return sum(idle)
def has_running_jobs(self):
return len(self._not_done_futures) > 0
def submit(
self,
eval_function: Callable[[EvaluationSpecification], Evaluation],
eval_spec: EvaluationSpecification,
):
f = self.client.submit(
evaluation_function_wrapper,
evaluation_function=eval_function,
evaluation_specification=eval_spec,
objectives=self.objectives,
catch_exceptions_from_evaluation_function=True,
logger=self.logger,
)
f.bbo_eval_spec = eval_spec
self._not_done_futures.add(f)
def check_for_results(self, timeout_s: float = 5.0) -> List[Evaluation]:
try:
all_futures = dd.wait(
self._not_done_futures, timeout=timeout_s, return_when="FIRST_COMPLETED"
)
return_values: List[Evaluation] = []
for f in all_futures.done:
if f.status == "error":
return_values.append(
Evaluation(
objectives={o.name: None for o in self.objectives},
stacktrace=str(f.traceback()),
**f.bbo_eval_spec
)
)
else:
return_values.append(f.result())
self._not_done_futures = all_futures.not_done
except dd.TimeoutError:
return_values = []
return return_values
def run_optimization_loop(
optimizer: Union[SingleObjectiveOptimizer, MultiObjectiveOptimizer],
evaluation_function: Callable[[EvaluationSpecification], Evaluation],
dask_client: dd.Client,
timeout_s: float = float("inf"),
max_evaluations: int = None,
logger: logging.Logger = None,
) -> List[Evaluation]:
"""Convenience wrapper for an optimization loop that uses Dask to parallelize
optimization until a given timeout or maximum number of evaluations is reached.
This already handles signals from the optimizer in case there is no evaluation
specification available yet.
Args:
optimizer: The blackboxopt optimizer to run.
dask_client: A Dask Distributed client that is configured with workers.
evaluation_function: The function that is called with configuration, settings
and optimizer info dictionaries as arguments like provided by an evaluation
specification.
This is the function that encapsulates the actual execution of
a parametrized experiment (e.g. ML model training) and should return a
`blackboxopt.Evaluation` as a result.
timeout_s: If given, the optimization loop will terminate after the first
optimization step that exceeded the timeout (in seconds). Defaults to inf.
max_evaluations: If given, the optimization loop will terminate after the given
number of steps. Defaults to None.
logger: The logger to use for logging progress. Defaults to None.
Returns:
List of evluation specification and result for all evaluations.
"""
logger = logging.getLogger("blackboxopt") if logger is None else logger
objectives = (
optimizer.objectives
if isinstance(optimizer, MultiObjectiveOptimizer)
else [optimizer.objective]
)
evaluations: List[Evaluation] = []
dask_scheduler = MinimalDaskScheduler(
dask_client=dask_client, objectives=objectives, logger=logger
)
_max_evaluations = init_max_evaluations_with_limit_logging(
max_evaluations=max_evaluations, timeout_s=timeout_s, logger=logger
)
n_eval_specs = 0
start = time.time()
while time.time() - start < timeout_s and n_eval_specs < _max_evaluations:
if dask_scheduler.has_capacity():
try:
eval_spec = optimizer.generate_evaluation_specification()
dask_scheduler.submit(evaluation_function, eval_spec)
n_eval_specs += 1
continue
except OptimizerNotReady:
logger.info("Optimizer is not ready yet; will retry after short pause.")
except OptimizationComplete:
logger.info("Optimization is complete")
break
new_evaluations = dask_scheduler.check_for_results(timeout_s=20)
optimizer.report(new_evaluations)
evaluations.extend(new_evaluations)
while dask_scheduler.has_running_jobs():
new_evaluations = dask_scheduler.check_for_results(timeout_s=20)
optimizer.report(new_evaluations)
evaluations.extend(new_evaluations)
return evaluations
| 34.742857
| 88
| 0.659539
| 662
| 6,080
| 5.870091
| 0.314199
| 0.018528
| 0.014153
| 0.02316
| 0.099331
| 0.099331
| 0.081832
| 0.081832
| 0.081832
| 0.057643
| 0
| 0.003626
| 0.274178
| 6,080
| 174
| 89
| 34.942529
| 0.876954
| 0.232072
| 0
| 0.139344
| 0
| 0
| 0.045862
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057377
| false
| 0
| 0.081967
| 0.016393
| 0.188525
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66349dcef473158c517a2f6336f11fae97c3b0b4
| 2,156
|
py
|
Python
|
app/Duration.py
|
joshuaMarple/phase-vocoder
|
d652a594da2b526eae758c4d0ca6e87e0497b89d
|
[
"Apache-2.0"
] | null | null | null |
app/Duration.py
|
joshuaMarple/phase-vocoder
|
d652a594da2b526eae758c4d0ca6e87e0497b89d
|
[
"Apache-2.0"
] | null | null | null |
app/Duration.py
|
joshuaMarple/phase-vocoder
|
d652a594da2b526eae758c4d0ca6e87e0497b89d
|
[
"Apache-2.0"
] | null | null | null |
"""
Authors: Fernando (UPDATE HIS INFO)
License: GPL 3.0
Description: This file contains functions that allows the user to
change the pitch of a .wav
Comments: None.
"""
import subprocess
import os
from sys import platform as _platform
from lib import pydub
def changeDuration(filename,percent):
"""
Input: filename , tones
filename (string): the path to the soundfile
tones (integer): the number of semitones to change(from negative number to positive number)
Outputs: pitchoutput.wav
Description: This function will change the pitch of a soundfile
"""
tempochange = "-tempo="+str(percent)
if _platform == "linux" or _platform == "linux2":
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'soundpitch')
elif _platform == "darwin":
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'soundpitchmac')
elif _platform == "win32":
fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'soundpitchwin32.exe')
subprocess.call([fn,filename, "duroutput.wav","-speech", tempochange])
return "duroutput.wav"
def changeGapDuration(filename,gaptime,gapduration,percentage):
"""
Input: filename , gaptime, gapduration , tones
filename (string): the path to the soundfile
gaptime (float): the time to begin changing the pitch
gapduration (float): the amount of sound to be changed(from the gaptime start to the end of this length)
tones (integer): the number of semitones to change(from negative number to positive number)
Outputs: processefile.wav
Description: This function will change the pitch of a soundfile
"""
file = pydub.AudioSegment.from_wav(filename)
newdurationpart = file[int((gaptime* 1000)) : int(((gaptime+gapduration) * 1000))]
first = file[:int(gaptime * 1000)]
last = file[int((gaptime+gapduration) * 1000):]
newdurationpart.export("durinput.wav", format="wav")
changeDuration("durinput.wav",percentage)
newdurationpart = pydub.AudioSegment.from_wav("duroutput.wav")
newfile = first + newdurationpart + last
newfile.export(filename, format="wav")
os.remove("durinput.wav")
os.remove("duroutput.wav")
return newfile
| 34.222222
| 106
| 0.729128
| 281
| 2,156
| 5.52669
| 0.362989
| 0.034771
| 0.027044
| 0.030908
| 0.320026
| 0.309079
| 0.309079
| 0.309079
| 0.257566
| 0.257566
| 0
| 0.01261
| 0.153989
| 2,156
| 62
| 107
| 34.774194
| 0.838816
| 0.392857
| 0
| 0
| 0
| 0
| 0.137161
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.148148
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6634c188bd630f307a7ad6ecabe91c09bac5af47
| 12,711
|
py
|
Python
|
network-manager/varanuspy/utils.py
|
netx-ulx/varanus
|
7df5ec70563253d72a4287566b1fbb6bdf804a4c
|
[
"Apache-2.0"
] | null | null | null |
network-manager/varanuspy/utils.py
|
netx-ulx/varanus
|
7df5ec70563253d72a4287566b1fbb6bdf804a4c
|
[
"Apache-2.0"
] | null | null | null |
network-manager/varanuspy/utils.py
|
netx-ulx/varanus
|
7df5ec70563253d72a4287566b1fbb6bdf804a4c
|
[
"Apache-2.0"
] | null | null | null |
from Queue import Empty, Queue
import collections
import ipaddress
import netifaces
import pwd
import signal
import socket
from subprocess import Popen
from threading import Thread
################################################################################
#### String utils
def newline( *args ):
class SingleLine( object ):
def __init__( self, words ):
self.words = words
def __iter__( self ):
return SingleLineIterator( self.words )
def __str__( self ):
return ' '.join( self )
if len( args ) == 1 and isinstance( args[0], list ):
args_list = args[0]
else:
args_list = list( args )
return SingleLine( args_list )
class SingleLineIterator( object ):
def __init__( self, words ):
self.words = words
self.final_idx = len( words ) - 1
self.curr_idx = 0
def __iter__( self ):
return self
def __next__( self ):
i = self.curr_idx
if i < self.final_idx:
self.curr_idx = i + 1
return str( self.words[i] )
elif i == self.final_idx:
self.curr_idx = i + 1
return '{}\n'.format( self.words[i] )
else:
raise StopIteration
next = __next__
def multiline( *args ):
class MultiLine( object ):
def __init__( self, lines ):
self.lines = lines
def __iter__( self ):
return self.lines
def __str__( self ):
return '\n'.join( self.lines )
if len( args ) == 1 and isinstance( args[0], list ):
args_list = args[0]
else:
args_list = list( args )
return MultiLine( args_list )
################################################################################
#### Function utils
def call_until( func, stop_condition ):
""" Calls the provided function (if defined) until the provided stop
condition function returns True.
- func : a callable to be called repeatedly
- stop_condition: a callable that returns True when no more calls to
func should be made
"""
stop_condition = as_callable( stop_condition, name='stop_condition' )
if is_some( func ):
func = as_callable( func, name='func' )
while stop_condition() is not True:
func()
################################################################################
#### OS utils
def user_exists( username ):
try:
pwd.getpwnam( username )
return True
except KeyError:
return False
def get_user_home( username ):
try:
return pwd.getpwnam( username ).pw_dir
except KeyError:
raise ValueError( 'unknown user' )
class AsyncProcess( object ):
""" A wrapper for a Popen object that allows for asynchronous access to its
output one line at a time.
"""
def __init__( self, popen, cmd=None ):
""" Creates a new AsyncProcess object that wraps the provided Popen
object and consumes its output in a separate thread.
"""
self.popen = as_a( popen, instance_of=Popen )
self.queue = Queue()
if is_some( cmd ):
if isinstance( cmd, list ):
cmd = ' '.join( cmd )
self.cmd = cmd
else:
self.cmd = None
t = Thread( target=self.__consume_output )
t.daemon = True
t.start()
def readline( self, block=True, timeout=None ):
block = as_bool( block, name='block' )
try:
return self.queue.get( block=block, timeout=timeout )
except Empty:
return None
def readline_nowait( self ):
return self.readline( block=False )
def read_available_lines( self ):
return list( iter( self.readline_nowait, None ) )
def interrupt( self ):
self.popen.send_signal( signal.SIGINT )
def terminate( self ):
self.popen.terminate()
def kill( self ):
self.popen.kill()
def wait_to_finish( self ):
return self.popen.wait()
def is_finished( self ):
return is_some( self.get_return_code() )
def get_return_code( self ):
return self.popen.poll()
def __consume_output( self ):
with self.popen.stdout as output:
for line in iter( output.readline, b'' ):
line = line.rstrip( '\n\r' )
self.queue.put( line )
################################################################################
#### Network utils
def resolve( hostname ):
hostname = some( hostname, name='hostname' )
try:
return str( ipaddress.ip_address( hostname ) )
except ValueError:
return socket.gethostbyname( hostname )
def ipv4address_of( intf, index=0 ):
index = as_int( index, minim=0 )
ipv4addrs = ipv4addresses_of( intf )
naddrs = len( ipv4addrs )
if naddrs == 0:
raise ValueError( 'interface {} has 0 assigned IPv4 addresses'.format( intf ) )
elif index >= naddrs:
if naddrs == 1:
raise ValueError( 'address index {} is too high; only one address is available'.format( index ) )
else:
raise ValueError( 'address index {} is too high; only {} addresses are available'.format( index, naddrs ) )
else:
return ipv4addrs[index]['addr']
def ipv4addresses_of( intf ):
intf = as_str( some( intf, name='interface' ) )
if not intf in netifaces.interfaces():
raise ValueError( 'unknown interface "{}"'.format( intf ) )
else:
addrs = netifaces.ifaddresses( intf )
if not netifaces.AF_INET in addrs:
raise ValueError( 'interface {} does not have any assigned IPv4 addresses'.format( intf ) )
else:
return addrs[netifaces.AF_INET]
def send_bytes( sock, buf, exit_check=None, exit_on_timeout=False ):
""" Sends some bytes to the provided socket. Returns 'True' on success.
- sock : a socket object
- buf : a buffer containing bytes to send
- exit_check : an optional callable indicating if the send operation should be aborted;
if the callable is defined and returns 'True', and this function detects it,
then the send operation is aborted and 'False' is returned (defaults to
'None')
- exit_on_timeout: if 'True' then raised socket.timeout exceptions are propagated to the
caller, otherwise they are ignored (defaults to 'False')
"""
sock = some( sock, name='sock' )
buf = some( buf, name='buf' )
exit_check = as_callable( exit_check, name='exit_check' ) if is_some( exit_check ) else lambda : False
exit_on_timeout = as_bool( exit_on_timeout, name='exit_on_timeout' )
while not exit_check():
try:
sock.sendall( buf )
return True
except socket.timeout: # no bytes were sent
if exit_on_timeout:
raise # abort
else:
pass # try again
else:
return False
def recv_bytes( sock, nbytes, exit_check=None, exit_on_timeout=False ):
""" Returns a received number of bytes from the provided socket.
- sock : a socket object
- nbytes : the number of bytes to receive
- exit_check : an optional callable indicating if the receive operation should be aborted;
if the callable is defined and returns 'True', and this function detects it,
then the receive operation is aborted and 'None' is returned (defaults to
'None')
- exit_on_timeout: if 'True' then raised socket.timeout exceptions are propagated to the
caller, otherwise they are ignored (defaults to 'False')
An IOError is raised if the socket receives an EOF.
"""
sock = some( sock, name='sock' )
nbytes = as_int( nbytes, minim=0, name='nbytes' )
exit_check = as_callable( exit_check, name='exit_check' ) if is_some( exit_check ) else lambda : False
exit_on_timeout = as_bool( exit_on_timeout, name='exit_on_timeout' )
buf = bytearray( nbytes )
if nbytes == 0:
return buf
bufview = memoryview( buf )
while not exit_check():
try:
nread = sock.recv_into( bufview )
if nread == 0: # EOF
raise IOError( 'remote side terminated the connection' )
else:
bufview = bufview[nread:]
if len( bufview ) == 0:
return buf
except socket.timeout: # no bytes were received
if exit_on_timeout:
raise # abort
else:
pass # try again
else:
return None
################################################################################
#### Value testing utils
def is_some( value ):
return False if value is None else True
def is_somestr( value, allow_empty=False ):
if value is None:
return False
else:
svalue = str( value )
if svalue is None:
return False
elif allow_empty == False and len( svalue ) == 0:
return False
else:
return True
def is_iterable( value ):
if not isinstance( value, collections.Iterable ):
try:
iter( value )
except TypeError:
return False
return True
def is_mapping( value ):
return isinstance( value, collections.Mapping )
################################################################################
#### Value checking utils
def some( value, name='value' ):
if value is None:
raise ValueError( 'expected {} to be defined (not None)'.format( name ) )
else:
return value
def as_oneof( value, container, valname='value', containername='container' ):
if value not in container:
raise ValueError( 'expected {} to be in {}'.format( valname, containername ) )
else:
return value
def as_bool( value, name='value' ):
if value is not True and value is not False:
raise ValueError( 'expected {} to be a boolean'.format( name ) )
else:
return value
def as_int( value, minim=None, maxim=None, name='value' ):
try:
ivalue = int( value )
except ValueError:
raise ValueError( 'expected {} to be an integer'.format( name ) )
if minim is not None and ivalue < minim:
raise ValueError( 'expected {} to be at least {}'.format( name, minim ) )
elif maxim is not None and ivalue > maxim:
raise ValueError( 'expected {} to be at most {}'.format( name, maxim ) )
else:
return ivalue
def as_str( value, allow_empty=False, name='value' ):
if not is_somestr( value, allow_empty ):
raise ValueError( 'expected {} to be a valid string'.format( name ) )
else:
return str( value )
def as_the( value, other, valname='value', othername='other' ):
if value is not other:
raise ValueError( 'expected {} to be the same as {}'.format( valname, othername ) )
else:
return value
def as_a( value, instance_of=None, subclass_of=None, name='value' ):
if instance_of is not None and not isinstance( value, instance_of ):
raise ValueError( 'expected {} to be an instance of {}'.format( name, instance_of ) )
elif subclass_of is not None and not issubclass( value, subclass_of ):
raise ValueError( 'expected {} to be a subclass of {}'.format( name, subclass_of ) )
else:
return value
def as_callable( value, name='value' ):
if not callable( value ):
raise ValueError( 'expected {} to be a callable'.format( name ) )
else:
return value
################################################################################
#### Misc. utils
def fallback( value, default ):
if value is None:
return default
else:
return value
def optional( value, mapper ):
if is_some( value ):
value = mapper( value )
return value
def optional_bool( value ):
return optional( value, as_bool )
def optional_int( value, minim=None, maxim=None ):
return optional( value, lambda x : as_int( x, minim=minim, maxim=maxim ) )
def optional_str( value, allow_empty=False ):
return optional( value, lambda x : as_str( x, allow_empty=allow_empty ) )
def check_duplicate( container, value, containername='container', valname='value' ):
if value in container:
raise ValueError( 'found duplicate {} in {}'.format( valname, containername ) )
| 29.908235
| 119
| 0.571001
| 1,489
| 12,711
| 4.750168
| 0.175957
| 0.038173
| 0.022056
| 0.03888
| 0.337763
| 0.261275
| 0.193412
| 0.176163
| 0.142797
| 0.142797
| 0
| 0.00314
| 0.298482
| 12,711
| 424
| 120
| 29.978774
| 0.790064
| 0.15404
| 0
| 0.350746
| 0
| 0
| 0.084157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.179104
| false
| 0.007463
| 0.033582
| 0.05597
| 0.41791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66373663ff85fccb4333f1b45aed30d0b9af2c3c
| 8,824
|
py
|
Python
|
load/DBPLoadController.py
|
faithcomesbyhearing/dbp-etl
|
ffd849111e8e2d40e9b07663408d31b5a2d15ce7
|
[
"MIT"
] | null | null | null |
load/DBPLoadController.py
|
faithcomesbyhearing/dbp-etl
|
ffd849111e8e2d40e9b07663408d31b5a2d15ce7
|
[
"MIT"
] | 4
|
2021-03-10T22:20:29.000Z
|
2022-03-23T22:18:00.000Z
|
load/DBPLoadController.py
|
faithcomesbyhearing/dbp-etl
|
ffd849111e8e2d40e9b07663408d31b5a2d15ce7
|
[
"MIT"
] | 1
|
2021-03-10T20:49:43.000Z
|
2021-03-10T20:49:43.000Z
|
# DBPLoadController.py
# 1) Run Validate on the files to process
# 2) Move any Fileset that is accepted to uploading
# 3) Perform upload
# 4) Move any fully uploaded fileset to database
# 5) Update fileset related tables
# 6) Move updated fileset to complete
import os
from Config import *
from RunStatus import *
from LPTSExtractReader import *
from Log import *
from InputFileset import *
from Validate import *
from S3Utility import *
from SQLBatchExec import *
from UpdateDBPFilesetTables import *
from UpdateDBPBiblesTable import *
from UpdateDBPLPTSTable import *
from UpdateDBPVideoTables import *
from UpdateDBPBibleFilesSecondary import *
class DBPLoadController:
def __init__(self, config, db, lptsReader):
self.config = config
self.db = db
self.lptsReader = lptsReader
self.s3Utility = S3Utility(config)
self.stockNumRegex = re.compile("__[A-Z0-9]{8}")
## This corrects filesets that have stock number instead of damId in the filename.
def repairAudioFileNames(self, inputFilesets):
for inp in inputFilesets:
for index in range(len(inp.files)):
file = inp.files[index]
if file.name.endswith(".mp3"):
namePart = file.name.split(".")[0]
damId = namePart[-10:]
if self.stockNumRegex.match(damId):
inp.files[index].name = namePart[:-10] + inp.filesetId[:10] + ".mp3"
def validate(self, inputFilesets):
validate = Validate(self.config, self.db)
validate.process(inputFilesets)
for inp in inputFilesets:
if os.path.isfile(inp.csvFilename):
InputFileset.upload.append(inp)
else:
RunStatus.set(inp.filesetId, False)
def updateBibles(self):
dbOut = SQLBatchExec(self.config)
bibles = UpdateDBPBiblesTable(self.config, self.db, dbOut, self.lptsReader)
bibles.process()
#dbOut.displayStatements()
dbOut.displayCounts()
success = dbOut.execute("bibles")
RunStatus.set(RunStatus.BIBLE, success)
return success
def upload(self, inputFilesets):
self.s3Utility.uploadAllFilesets(inputFilesets)
secondary = UpdateDBPBibleFilesSecondary(self.config, None, None)
secondary.createAllZipFiles(inputFilesets)
Log.writeLog(self.config)
def updateFilesetTables(self, inputFilesets):
inp = inputFilesets
dbOut = SQLBatchExec(self.config)
update = UpdateDBPFilesetTables(self.config, self.db, dbOut)
video = UpdateDBPVideoTables(self.config, self.db, dbOut)
for inp in inputFilesets:
hashId = update.processFileset(inp)
if inp.typeCode == "video":
video.processFileset(inp.filesetPrefix, inp.filenames(), hashId)
dbOut.displayCounts()
success = dbOut.execute(inp.batchName())
RunStatus.set(inp.filesetId, success)
if success:
InputFileset.complete.append(inp)
else:
print("********** Fileset Table %s Update Failed **********" % (inp.filesetId))
def updateLPTSTables(self):
dbOut = SQLBatchExec(self.config)
lptsDBP = UpdateDBPLPTSTable(self.config, dbOut, self.lptsReader)
lptsDBP.process()
#dbOut.displayStatements()
dbOut.displayCounts()
success = dbOut.execute("lpts")
RunStatus.set(RunStatus.LPTS, success)
return success
if (__name__ == '__main__'):
config = Config()
AWSSession.shared() # ensure AWSSession init
db = SQLUtility(config)
lptsReader = LPTSExtractReader(config.filename_lpts_xml)
ctrl = DBPLoadController(config, db, lptsReader)
if len(sys.argv) != 2:
InputFileset.validate = InputFileset.filesetCommandLineParser(config, AWSSession.shared().s3Client, lptsReader)
ctrl.repairAudioFileNames(InputFileset.validate)
ctrl.validate(InputFileset.validate)
if ctrl.updateBibles():
ctrl.upload(InputFileset.upload)
ctrl.updateFilesetTables(InputFileset.database)
ctrl.updateLPTSTables()
for inputFileset in InputFileset.complete:
print("Completed: ", inputFileset.filesetId)
else:
ctrl.updateBibles()
ctrl.updateLPTSTables()
RunStatus.exit()
# Get currrent lpts-dbp.xml
# aws --profile DBP_DEV s3 cp s3://dbp-etl-upload-newdata-fiu49s0cnup1yr0q/lpts-dbp.xml /Volumes/FCBH/bucket_data/lpts-dbp.xml
# Clean up filesets in dbp-stating and dbp-vid-staging
# Prepare by getting some local data into a test bucket
# aws s3 --profile dbp-etl-dev sync --acl bucket-owner-full-control /Volumes/FCBH/all-dbp-etl-test/audio/UNRWFW/UNRWFWP1DA s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/UNRWFWP1DA
# aws s3 --profile dbp-etl-dev sync --acl bucket-owner-full-control /Volumes/FCBH/all-dbp-etl-test/HYWWAVN2ET s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/HYWWAVN2ET
# aws s3 --profile dbp-etl-dev sync --acl bucket-owner-full-control /Volumes/FCBH/all-dbp-etl-test/ENGESVP2DV s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/ENGESVP2DV
# No parameter, should execute only bible and lpts updates
# time python3 load/DBPLoadController.py test
# Successful tests with source on local drive
# time python3 load/TestCleanup.py test HYWWAV
# time python3 load/TestCleanup.py test HYWWAVN_ET-usx
# time python3 load/TestCleanup.py test ENGESVP2DV
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ HYWWAVN2ET
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ ENGESVP2DV
# Successful tests with source on s3
# time python3 load/TestCleanup.py test UNRWFWP1DA
# time python3 load/TestCleanup.py test UNRWFWP1DA-opus16
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr UNRWFWP1DA
# time python3 load/TestCleanup.py test HYWWAV
# time python3 load/TestCleanup.py test HYWWAVN_ET-usx
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr HYWWAVN2ET
# time python3 load/TestCleanup.py test ENGESVP2DV
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr ENGESVP2DV
# Combined test of two dissimilar filesets on s3
# time python3 load/TestCleanup.py test UNRWFWP1DA
# time python3 load/TestCleanup.py test HYWWAV
# time python3 load/TestCleanup.py test HYWWAVN_ET-usx
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr UNRWFWP1DA HYWWAVN2ET
# Some video uploads
# time python3 load/TestCleanup.py test ENGESVP2DV
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ ENGESVP2DV
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ video/ENGESV/ENGESVP2DV
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ video/ENGESX/ENGESVP2DV
# Successful tests with source on local drive and full path
# time python3 load/TestCleanup.py test GNWNTM
# time python3 load/TestCleanup.py test GNWNTMN_ET-usx
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ GNWNTMN2ET
# time python3 load/TestCleanup.py test GNWNTM
# time python3 load/TestCleanup.py test GNWNTMN_ET-usx
# time python3 load/DBPLoadController.py test /Volumes/FCBH/all-dbp-etl-test/ text/GNWNTM/GNWNTMN2ET
### prepare test data in bucket
### aws --profile DBP_DEV s3 sync /Volumes/FCBH/TextStockNo/Barai_N2BBBWBT_USX/ s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/Barai_N2BBBWBT_USX/
### aws --profile DBP_DEV s3 sync /Volumes/FCBH/TextStockNo/Orma_N2ORCBTL_USX/ s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/Orma_N2ORCBTL_USX/
### aws --profile DBP_DEV s3 sync /Volumes/FCBH/TextStockNo/Urdu_N2URDPAK_USX/ s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/Urdu_N2URDPAK_USX/
# Test stock number upload from Drive with path
# time python3 load/TestCleanup.py test BBBWBT
# time python3 load/TestCleanup.py test BBBWBTN_ET-usx
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/ Barai_N2BBBWBT_USX
# time python3 load/TestCleanup.py test ORCBTL
# time python3 load/TestCleanup.py test ORCBTLN_ET-usx
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/ Orma_N2ORCBTL_USX
# time python3 load/TestCleanup.py test URDPAK
# time python3 load/TestCleanup.py test URDPAKN_ET-usx
# time python3 load/DBPLoadController.py test s3://dbp-etl-upload-dev-zrg0q2rhv7shv7hr/ Urdu_N2URDPAK_USX
# python3 load/TestCleanup.py test ABIWBT
# python3 load/TestCleanup.py test ABIWBTN_ET-usx
# python3 load/DBPLoadController.py test s3://dbp-etl-mass-batch "Abidji N2ABIWBT/05 DBP & GBA/Abidji_N2ABIWBT/Abidji_N2ABIWBT_USX"
# This one BiblePublisher has two copies of 1CO:16, but I can only find one in the USX file.
# python3 load/TestCleanup.py test ACHBSU
# python3 load/TestCleanup.py test ACHBSUN_ET-usx
# python3 load/DBPLoadController.py test s3://dbp-etl-mass-batch "Acholi N2ACHBSU/05 DBP & GBA/Acholi_N2ACHBSU - Update/Acholi_N2ACHBSU_USX"
# python3 load/TestCleanup.py test CRXWYI
# python3 load/TestCleanup.py test CRXWYIP_ET-usx
# python3 load/TestCleanup.py test CRXWYIN_ET-usx
# python3 load/DBPLoadController.py test s3://dbp-etl-mass-batch "Carrier, Central N2CRXWYI/05 DBP & GBA/Carrier, Central_P1CRXWYI/Carrier, Central_P1CRXWYI_USX"
| 40.477064
| 174
| 0.779012
| 1,185
| 8,824
| 5.750211
| 0.210127
| 0.075873
| 0.08145
| 0.102143
| 0.514529
| 0.461697
| 0.423687
| 0.368506
| 0.349134
| 0.315527
| 0
| 0.026631
| 0.119107
| 8,824
| 217
| 175
| 40.663594
| 0.849994
| 0.589869
| 0
| 0.168421
| 0
| 0
| 0.030656
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073684
| false
| 0
| 0.147368
| 0
| 0.252632
| 0.021053
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6637a11d7303e4cb1649f5cb8e8396c1af55c974
| 671
|
py
|
Python
|
examples/misc/30-macro_call_in_background.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 501
|
2018-12-06T23:48:17.000Z
|
2022-03-31T11:53:18.000Z
|
examples/misc/30-macro_call_in_background.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 710
|
2018-11-26T22:04:52.000Z
|
2022-03-30T03:53:12.000Z
|
examples/misc/30-macro_call_in_background.py
|
robert-anderson/pyscf
|
cdc56e168cb15f47e8cdc791a92d689fa9b655af
|
[
"Apache-2.0"
] | 273
|
2018-11-26T10:10:24.000Z
|
2022-03-30T12:25:28.000Z
|
#!/usr/bin/env python
'''
This example shows how to use the call_in_background macro
'''
from pyscf import lib
import time
def fa():
print('a')
time.sleep(0.5)
def fb():
print('b')
time.sleep(0.8)
print('type 1')
w0 = time.time()
with lib.call_in_background(fa) as afa, lib.call_in_background(fb) as afb:
for i in range(3):
afa()
afb()
print('total time = %.1f s = [fb]0.8 * 3 seconds' % (time.time() - w0))
print('type 2')
w0 = time.time()
with lib.call_in_background(fa, fb) as (afa, afb):
for i in range(3):
afa()
afb()
print('total time = %.1f s = ([fa]0.5 + [fb]0.8) * 3 seconds' % (time.time() - w0))
| 20.333333
| 84
| 0.584203
| 117
| 671
| 3.282051
| 0.393162
| 0.0625
| 0.166667
| 0.148438
| 0.494792
| 0.494792
| 0.494792
| 0.494792
| 0.380208
| 0.197917
| 0
| 0.042718
| 0.232489
| 671
| 32
| 85
| 20.96875
| 0.702913
| 0.117735
| 0
| 0.363636
| 0
| 0.045455
| 0.188356
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.181818
| 0.272727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
663b9f98e1aef6a83eac108928f75e32ee2a2b00
| 859
|
py
|
Python
|
problems/287.Find_the_Duplicate_Number/solution-36801_binary.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
problems/287.Find_the_Duplicate_Number/solution-36801_binary.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
problems/287.Find_the_Duplicate_Number/solution-36801_binary.py
|
subramp-prep/leetcode
|
d125201d9021ab9b1eea5e5393c2db4edd84e740
|
[
"Unlicense"
] | null | null | null |
class Solution(object):
def findDuplicate(self, nums):
low = 0
high = len(nums) - 1
mid = (high + low) / 2
while high - low > 1:
count = 0
for k in nums:
if mid < k <= high:
count += 1
if count > high - mid:
low = mid
else:
high = mid
mid = (high + low) / 2
return high
# The difficulty in this problem lies in O(1) space, and many solution using O(n) space can also be accepted by OJ.
# The solution is applying bi-search in the range[1, n] by counting the element which falls in sub range(n/2, n].
# If the number is bigger than capacity of that sub range, it means the duplicated integer falls in the sub-range.
# Otherwise the duplicated integer falls in the other half sub range.
| 37.347826
| 115
| 0.563446
| 126
| 859
| 3.84127
| 0.492063
| 0.066116
| 0.041322
| 0.045455
| 0.123967
| 0.123967
| 0
| 0
| 0
| 0
| 0
| 0.018315
| 0.364377
| 859
| 22
| 116
| 39.045455
| 0.868132
| 0.472643
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
663cc62975bf84eb8f4aa1091e6cd8fcbbd6b541
| 1,826
|
py
|
Python
|
src/data/sets.py
|
HninPwint/adv_dsi_lab_2
|
0c350444046bf0f468985437b59d2cae3516817a
|
[
"MIT"
] | null | null | null |
src/data/sets.py
|
HninPwint/adv_dsi_lab_2
|
0c350444046bf0f468985437b59d2cae3516817a
|
[
"MIT"
] | null | null | null |
src/data/sets.py
|
HninPwint/adv_dsi_lab_2
|
0c350444046bf0f468985437b59d2cae3516817a
|
[
"MIT"
] | null | null | null |
def subset_x_y(target, features, start_index:int, end_index:int):
'''
'''
return features[start_index:end_index], target[start_index:end_index]
def split_sets_by_time(df, target_col, test_ratio=0.2):
'''
'''
df_copy = df.copy()
target = df_copy.pop(target_col)
cutoff = int(len(target)/5)
X_train, y_train = subset_x_y(target=target, features=df_copy, start_index=0, end_index=-cutoff*2)
X_val, y_val = subset_x_y(target=target, features=df_copy, start_index=-cutoff*2, end_index=-cutoff)
X_test, y_test = subset_x_y(target=target, features=df_copy, start_index=-cutoff, end_index=len(target))
return X_train, y_train, X_val, y_val, X_test, y_test
def save_sets(X_train=None, y_train=None, X_val=None, y_val=None, X_test=None, y_test=None, path='../data/processed/'):
"""Save the different sets locally
Parameters
----------
X_train: Numpy Array
Features for the training set
y_train: Numpy Array
Target for the training set
X_val: Numpy Array
Features for the validation set
y_val: Numpy Array
Target for the validation set
X_test: Numpy Array
Features for the testing set
y_test: Numpy Array
Target for the testing set
path : str
Path to the folder where the sets will be saved (default: '../data/processed/')
Returns
-------
"""
import numpy as np
if X_train is not None:
np.save(f'{path}X_train', X_train)
if X_val is not None:
np.save(f'{path}X_val', X_val)
if X_test is not None:
np.save(f'{path}X_test', X_test)
if y_train is not None:
np.save(f'{path}y_train', y_train)
if y_val is not None:
np.save(f'{path}y_val', y_val)
if y_test is not None:
np.save(f'{path}y_test', y_test)
| 32.607143
| 119
| 0.654984
| 308
| 1,826
| 3.642857
| 0.194805
| 0.037433
| 0.048128
| 0.058824
| 0.385027
| 0.262032
| 0.262032
| 0.262032
| 0.128342
| 0.128342
| 0
| 0.004264
| 0.229463
| 1,826
| 56
| 120
| 32.607143
| 0.793177
| 0.266156
| 0
| 0
| 0
| 0
| 0.072816
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.041667
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
663eb6c24c50403fabcf3408feba6dc17b7e3aa6
| 853
|
py
|
Python
|
Bio_III/DPChange.py
|
BlackAdder84/Bioinformatics
|
6cc662b6c4a3349a89f6fdd26f05f1f6228bd912
|
[
"MIT"
] | 1
|
2017-06-09T03:06:21.000Z
|
2017-06-09T03:06:21.000Z
|
Bio_III/DPChange.py
|
BlackAdder84/Bioinformatics
|
6cc662b6c4a3349a89f6fdd26f05f1f6228bd912
|
[
"MIT"
] | null | null | null |
Bio_III/DPChange.py
|
BlackAdder84/Bioinformatics
|
6cc662b6c4a3349a89f6fdd26f05f1f6228bd912
|
[
"MIT"
] | 1
|
2017-05-01T21:15:11.000Z
|
2017-05-01T21:15:11.000Z
|
# Works
"""
Given a quantity of money and a list of coins, return the minimum number of coins.
"""
def DPChange(money, coins):
MinNumCoins = [0]*(money+1)
MinNumCoins[0] = 0
for m in range(1, money+1):
MinNumCoins[m] = 100000
for i in range(0, len(coins)):
if m >= coins[i]: # only take coins not greater than money
if MinNumCoins[m-coins[i]] + 1 < MinNumCoins[m]:
MinNumCoins[m] = MinNumCoins[m-coins[i]] + 1
return MinNumCoins[money]
def test():
in_ = (40, [50,25,20,10,5,1])
out_ = 2
assert(DPChange(*in_) == out_), "Test 1 FAILED"
in_ = (8074, [24,13,12,7,5,3,1])
out_ = 338
assert(DPChange(*in_) == out_), "Test 2 FAILED"
if __name__ == "__main__":
money = 16807
coins = [18,17,16,7,6,5,3,1]
print(DPChange(money, coins))
| 25.088235
| 82
| 0.570926
| 129
| 853
| 3.651163
| 0.44186
| 0.127389
| 0.044586
| 0.076433
| 0.178344
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101613
| 0.273154
| 853
| 33
| 83
| 25.848485
| 0.658065
| 0.150059
| 0
| 0
| 0
| 0
| 0.047619
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.095238
| false
| 0
| 0
| 0
| 0.142857
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66400ba0baa5a1b25e3daf468b7b3e31fa7361b6
| 30,286
|
py
|
Python
|
venv/lib/python3.6/site-packages/feedgen/entry.py
|
jannahuang/blog
|
e1d8cfa9d79ac06097a0e55531bba9421fcbf283
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/feedgen/entry.py
|
jannahuang/blog
|
e1d8cfa9d79ac06097a0e55531bba9421fcbf283
|
[
"MIT"
] | null | null | null |
venv/lib/python3.6/site-packages/feedgen/entry.py
|
jannahuang/blog
|
e1d8cfa9d79ac06097a0e55531bba9421fcbf283
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
feedgen.entry
~~~~~~~~~~~~~
:copyright: 2013-2020, Lars Kiesow <lkiesow@uos.de>
:license: FreeBSD and LGPL, see license.* for more details.
'''
from datetime import datetime
import dateutil.parser
import dateutil.tz
import warnings
from lxml.etree import CDATA # nosec - adding CDATA entry is safe
from feedgen.compat import string_types
from feedgen.util import ensure_format, formatRFC2822, xml_fromstring, xml_elem
def _add_text_elm(entry, data, name):
"""Add a text subelement to an entry"""
if not data:
return
elm = xml_elem(name, entry)
type_ = data.get('type')
if data.get('src'):
if name != 'content':
raise ValueError("Only the 'content' element of an entry can "
"contain a 'src' attribute")
elm.attrib['src'] = data['src']
elif data.get(name):
# Surround xhtml with a div tag, parse it and embed it
if type_ == 'xhtml':
xhtml = '<div xmlns="http://www.w3.org/1999/xhtml">' \
+ data.get(name) + '</div>'
elm.append(xml_fromstring(xhtml))
elif type_ == 'CDATA':
elm.text = CDATA(data.get(name))
# Parse XML and embed it
elif type_ and (type_.endswith('/xml') or type_.endswith('+xml')):
elm.append(xml_fromstring(data[name]))
# Embed the text in escaped form
elif not type_ or type_.startswith('text') or type_ == 'html':
elm.text = data.get(name)
# Everything else should be included base64 encoded
else:
raise NotImplementedError(
'base64 encoded {} is not supported at the moment. '
'Pull requests adding support are welcome.'.format(name)
)
# Add type description of the content
if type_:
elm.attrib['type'] = type_
class FeedEntry(object):
'''FeedEntry call representing an ATOM feeds entry node or an RSS feeds item
node.
'''
def __init__(self):
# ATOM
# required
self.__atom_id = None
self.__atom_title = None
self.__atom_updated = datetime.now(dateutil.tz.tzutc())
# recommended
self.__atom_author = None
self.__atom_content = None
self.__atom_link = None
self.__atom_summary = None
# optional
self.__atom_category = None
self.__atom_contributor = None
self.__atom_published = None
self.__atom_source = None
self.__atom_rights = None
# RSS
self.__rss_author = None
self.__rss_category = None
self.__rss_comments = None
self.__rss_description = None
self.__rss_content = None
self.__rss_enclosure = None
self.__rss_guid = {}
self.__rss_link = None
self.__rss_pubDate = None
self.__rss_source = None
self.__rss_title = None
# Extension list:
self.__extensions = {}
self.__extensions_register = {}
def atom_entry(self, extensions=True):
'''Create an ATOM entry and return it.'''
entry = xml_elem('entry')
if not (self.__atom_id and self.__atom_title and self.__atom_updated):
raise ValueError('Required fields not set')
id = xml_elem('id', entry)
id.text = self.__atom_id
title = xml_elem('title', entry)
title.text = self.__atom_title
updated = xml_elem('updated', entry)
updated.text = self.__atom_updated.isoformat()
# An entry must contain an alternate link if there is no content
# element.
if not self.__atom_content:
links = self.__atom_link or []
if not [l for l in links if l.get('rel') == 'alternate']:
raise ValueError('Entry must contain an alternate link or ' +
'a content element.')
# Add author elements
for a in self.__atom_author or []:
# Atom requires a name. Skip elements without.
if not a.get('name'):
continue
author = xml_elem('author', entry)
name = xml_elem('name', author)
name.text = a.get('name')
if a.get('email'):
email = xml_elem('email', author)
email.text = a.get('email')
if a.get('uri'):
uri = xml_elem('uri', author)
uri.text = a.get('uri')
_add_text_elm(entry, self.__atom_content, 'content')
for l in self.__atom_link or []:
link = xml_elem('link', entry, href=l['href'])
if l.get('rel'):
link.attrib['rel'] = l['rel']
if l.get('type'):
link.attrib['type'] = l['type']
if l.get('hreflang'):
link.attrib['hreflang'] = l['hreflang']
if l.get('title'):
link.attrib['title'] = l['title']
if l.get('length'):
link.attrib['length'] = l['length']
_add_text_elm(entry, self.__atom_summary, 'summary')
for c in self.__atom_category or []:
cat = xml_elem('category', entry, term=c['term'])
if c.get('scheme'):
cat.attrib['scheme'] = c['scheme']
if c.get('label'):
cat.attrib['label'] = c['label']
# Add author elements
for c in self.__atom_contributor or []:
# Atom requires a name. Skip elements without.
if not c.get('name'):
continue
contrib = xml_elem('contributor', entry)
name = xml_elem('name', contrib)
name.text = c.get('name')
if c.get('email'):
email = xml_elem('email', contrib)
email.text = c.get('email')
if c.get('uri'):
uri = xml_elem('uri', contrib)
uri.text = c.get('uri')
if self.__atom_published:
published = xml_elem('published', entry)
published.text = self.__atom_published.isoformat()
if self.__atom_rights:
rights = xml_elem('rights', entry)
rights.text = self.__atom_rights
if self.__atom_source:
source = xml_elem('source', entry)
if self.__atom_source.get('title'):
source_title = xml_elem('title', source)
source_title.text = self.__atom_source['title']
if self.__atom_source.get('link'):
xml_elem('link', source, href=self.__atom_source['link'])
if extensions:
for ext in self.__extensions.values() or []:
if ext.get('atom'):
ext['inst'].extend_atom(entry)
return entry
def rss_entry(self, extensions=True):
'''Create a RSS item and return it.'''
entry = xml_elem('item')
if not (self.__rss_title or
self.__rss_description or
self.__rss_content):
raise ValueError('Required fields not set')
if self.__rss_title:
title = xml_elem('title', entry)
title.text = self.__rss_title
if self.__rss_link:
link = xml_elem('link', entry)
link.text = self.__rss_link
if self.__rss_description and self.__rss_content:
description = xml_elem('description', entry)
description.text = self.__rss_description
XMLNS_CONTENT = 'http://purl.org/rss/1.0/modules/content/'
content = xml_elem('{%s}encoded' % XMLNS_CONTENT, entry)
content.text = CDATA(self.__rss_content['content']) \
if self.__rss_content.get('type', '') == 'CDATA' \
else self.__rss_content['content']
elif self.__rss_description:
description = xml_elem('description', entry)
description.text = self.__rss_description
elif self.__rss_content:
description = xml_elem('description', entry)
description.text = CDATA(self.__rss_content['content']) \
if self.__rss_content.get('type', '') == 'CDATA' \
else self.__rss_content['content']
for a in self.__rss_author or []:
author = xml_elem('author', entry)
author.text = a
if self.__rss_guid.get('guid'):
guid = xml_elem('guid', entry)
guid.text = self.__rss_guid['guid']
permaLink = str(self.__rss_guid.get('permalink', False)).lower()
guid.attrib['isPermaLink'] = permaLink
for cat in self.__rss_category or []:
category = xml_elem('category', entry)
category.text = cat['value']
if cat.get('domain'):
category.attrib['domain'] = cat['domain']
if self.__rss_comments:
comments = xml_elem('comments', entry)
comments.text = self.__rss_comments
if self.__rss_enclosure:
enclosure = xml_elem('enclosure', entry)
enclosure.attrib['url'] = self.__rss_enclosure['url']
enclosure.attrib['length'] = self.__rss_enclosure['length']
enclosure.attrib['type'] = self.__rss_enclosure['type']
if self.__rss_pubDate:
pubDate = xml_elem('pubDate', entry)
pubDate.text = formatRFC2822(self.__rss_pubDate)
if self.__rss_source:
source = xml_elem('source', entry, url=self.__rss_source['url'])
source.text = self.__rss_source['title']
if extensions:
for ext in self.__extensions.values() or []:
if ext.get('rss'):
ext['inst'].extend_rss(entry)
return entry
def title(self, title=None):
'''Get or set the title value of the entry. It should contain a human
readable title for the entry. Title is mandatory for both ATOM and RSS
and should not be blank.
:param title: The new title of the entry.
:returns: The entriess title.
'''
if title is not None:
self.__atom_title = title
self.__rss_title = title
return self.__atom_title
def id(self, id=None):
'''Get or set the entry id which identifies the entry using a
universally unique and permanent URI. Two entries in a feed can have
the same value for id if they represent the same entry at different
points in time. This method will also set rss:guid with permalink set
to False. Id is mandatory for an ATOM entry.
:param id: New Id of the entry.
:returns: Id of the entry.
'''
if id is not None:
self.__atom_id = id
self.__rss_guid = {'guid': id, 'permalink': False}
return self.__atom_id
def guid(self, guid=None, permalink=False):
'''Get or set the entries guid which is a string that uniquely
identifies the item. This will also set atom:id.
:param guid: Id of the entry.
:param permalink: If this is a permanent identifier for this item
:returns: Id and permalink setting of the entry.
'''
if guid is not None:
self.__atom_id = guid
self.__rss_guid = {'guid': guid, 'permalink': permalink}
return self.__rss_guid
def updated(self, updated=None):
'''Set or get the updated value which indicates the last time the entry
was modified in a significant way.
The value can either be a string which will automatically be parsed or
a datetime.datetime object. In any case it is necessary that the value
include timezone information.
:param updated: The modification date.
:returns: Modification date as datetime.datetime
'''
if updated is not None:
if isinstance(updated, string_types):
updated = dateutil.parser.parse(updated)
if not isinstance(updated, datetime):
raise ValueError('Invalid datetime format')
if updated.tzinfo is None:
raise ValueError('Datetime object has no timezone info')
self.__atom_updated = updated
self.__rss_lastBuildDate = updated
return self.__atom_updated
def author(self, author=None, replace=False, **kwargs):
'''Get or set author data. An author element is a dict containing a
name, an email address and a uri. Name is mandatory for ATOM, email is
mandatory for RSS.
This method can be called with:
- the fields of an author as keyword arguments
- the fields of an author as a dictionary
- a list of dictionaries containing the author fields
An author has the following fields:
- *name* conveys a human-readable name for the person.
- *uri* contains a home page for the person.
- *email* contains an email address for the person.
:param author: Dict or list of dicts with author data.
:param replace: Add or replace old data.
Example::
>>> author({'name':'John Doe', 'email':'jdoe@example.com'})
[{'name':'John Doe','email':'jdoe@example.com'}]
>>> author([{'name': 'Mr. X'}, {'name': 'Max'}])
[{'name':'John Doe','email':'jdoe@example.com'},
{'name':'John Doe'}, {'name':'Max'}]
>>> author(name='John Doe', email='jdoe@example.com', replace=True)
[{'name':'John Doe','email':'jdoe@example.com'}]
'''
if author is None and kwargs:
author = kwargs
if author is not None:
if replace or self.__atom_author is None:
self.__atom_author = []
self.__atom_author += ensure_format(author,
set(['name', 'email', 'uri']),
set())
self.__rss_author = []
for a in self.__atom_author:
if a.get('email'):
if a.get('name'):
self.__rss_author.append('%(email)s (%(name)s)' % a)
else:
self.__rss_author.append('%(email)s' % a)
return self.__atom_author
def content(self, content=None, src=None, type=None):
'''Get or set the content of the entry which contains or links to the
complete content of the entry. Content must be provided for ATOM
entries if there is no alternate link, and should be provided if there
is no summary. If the content is set (not linked) it will also set
rss:description.
:param content: The content of the feed entry.
:param src: Link to the entries content.
:param type: If type is CDATA content would not be escaped.
:returns: Content element of the entry.
'''
if src is not None:
self.__atom_content = {'src': src}
elif content is not None:
self.__atom_content = {'content': content}
self.__rss_content = {'content': content}
if type is not None:
self.__atom_content['type'] = type
self.__rss_content['type'] = type
return self.__atom_content
def link(self, link=None, replace=False, **kwargs):
'''Get or set link data. An link element is a dict with the fields
href, rel, type, hreflang, title, and length. Href is mandatory for
ATOM.
This method can be called with:
- the fields of a link as keyword arguments
- the fields of a link as a dictionary
- a list of dictionaries containing the link fields
A link has the following fields:
- *href* is the URI of the referenced resource (typically a Web page)
- *rel* contains a single link relationship type. It can be a full URI,
or one of the following predefined values (default=alternate):
- *alternate* an alternate representation of the entry or feed, for
example a permalink to the html version of the entry, or the
front page of the weblog.
- *enclosure* a related resource which is potentially large in size
and might require special handling, for example an audio or video
recording.
- *related* an document related to the entry or feed.
- *self* the feed itself.
- *via* the source of the information provided in the entry.
- *type* indicates the media type of the resource.
- *hreflang* indicates the language of the referenced resource.
- *title* human readable information about the link, typically for
display purposes.
- *length* the length of the resource, in bytes.
RSS only supports one link with nothing but a URL. So for the RSS link
element the last link with rel=alternate is used.
RSS also supports one enclusure element per entry which is covered by
the link element in ATOM feed entries. So for the RSS enclusure element
the last link with rel=enclosure is used.
:param link: Dict or list of dicts with data.
:param replace: Add or replace old data.
:returns: List of link data.
'''
if link is None and kwargs:
link = kwargs
if link is not None:
if replace or self.__atom_link is None:
self.__atom_link = []
self.__atom_link += ensure_format(
link,
set(['href', 'rel', 'type', 'hreflang', 'title', 'length']),
set(['href']),
{'rel': ['alternate', 'enclosure', 'related', 'self', 'via']},
{'rel': 'alternate'})
# RSS only needs one URL. We use the first link for RSS:
for l in self.__atom_link:
if l.get('rel') == 'alternate':
self.__rss_link = l['href']
elif l.get('rel') == 'enclosure':
self.__rss_enclosure = {'url': l['href']}
self.__rss_enclosure['type'] = l.get('type')
self.__rss_enclosure['length'] = l.get('length') or '0'
# return the set with more information (atom)
return self.__atom_link
def summary(self, summary=None, type=None):
'''Get or set the summary element of an entry which conveys a short
summary, abstract, or excerpt of the entry. Summary is an ATOM only
element and should be provided if there either is no content provided
for the entry, or that content is not inline (i.e., contains a src
attribute), or if the content is encoded in base64. This method will
also set the rss:description field if it wasn't previously set or
contains the old value of summary.
:param summary: Summary of the entries contents.
:returns: Summary of the entries contents.
'''
if summary is not None:
# Replace the RSS description with the summary if it was the
# summary before. Not if it is the description.
if not self.__rss_description or (
self.__atom_summary and
self.__rss_description == self.__atom_summary.get("summary")
):
self.__rss_description = summary
self.__atom_summary = {'summary': summary}
if type is not None:
self.__atom_summary['type'] = type
return self.__atom_summary
def description(self, description=None, isSummary=False):
'''Get or set the description value which is the item synopsis.
Description is an RSS only element. For ATOM feeds it is split in
summary and content. The isSummary parameter can be used to control
which ATOM value is set when setting description.
:param description: Description of the entry.
:param isSummary: If the description should be used as content or
summary.
:returns: The entries description.
'''
if description is not None:
self.__rss_description = description
if isSummary:
self.__atom_summary = description
else:
self.__atom_content = {'content': description}
return self.__rss_description
def category(self, category=None, replace=False, **kwargs):
'''Get or set categories that the entry belongs to.
This method can be called with:
- the fields of a category as keyword arguments
- the fields of a category as a dictionary
- a list of dictionaries containing the category fields
A categories has the following fields:
- *term* identifies the category
- *scheme* identifies the categorization scheme via a URI.
- *label* provides a human-readable label for display
If a label is present it is used for the RSS feeds. Otherwise the term
is used. The scheme is used for the domain attribute in RSS.
:param category: Dict or list of dicts with data.
:param replace: Add or replace old data.
:returns: List of category data.
'''
if category is None and kwargs:
category = kwargs
if category is not None:
if replace or self.__atom_category is None:
self.__atom_category = []
self.__atom_category += ensure_format(
category,
set(['term', 'scheme', 'label']),
set(['term']))
# Map the ATOM categories to RSS categories. Use the atom:label as
# name or if not present the atom:term. The atom:scheme is the
# rss:domain.
self.__rss_category = []
for cat in self.__atom_category:
rss_cat = {}
rss_cat['value'] = cat.get('label', cat['term'])
if cat.get('scheme'):
rss_cat['domain'] = cat['scheme']
self.__rss_category.append(rss_cat)
return self.__atom_category
def contributor(self, contributor=None, replace=False, **kwargs):
'''Get or set the contributor data of the feed. This is an ATOM only
value.
This method can be called with:
- the fields of an contributor as keyword arguments
- the fields of an contributor as a dictionary
- a list of dictionaries containing the contributor fields
An contributor has the following fields:
- *name* conveys a human-readable name for the person.
- *uri* contains a home page for the person.
- *email* contains an email address for the person.
:param contributor: Dictionary or list of dictionaries with contributor
data.
:param replace: Add or replace old data.
:returns: List of contributors as dictionaries.
'''
if contributor is None and kwargs:
contributor = kwargs
if contributor is not None:
if replace or self.__atom_contributor is None:
self.__atom_contributor = []
self.__atom_contributor += ensure_format(
contributor, set(['name', 'email', 'uri']), set(['name']))
return self.__atom_contributor
def published(self, published=None):
'''Set or get the published value which contains the time of the initial
creation or first availability of the entry.
The value can either be a string which will automatically be parsed or
a datetime.datetime object. In any case it is necessary that the value
include timezone information.
:param published: The creation date.
:returns: Creation date as datetime.datetime
'''
if published is not None:
if isinstance(published, string_types):
published = dateutil.parser.parse(published)
if not isinstance(published, datetime):
raise ValueError('Invalid datetime format')
if published.tzinfo is None:
raise ValueError('Datetime object has no timezone info')
self.__atom_published = published
self.__rss_pubDate = published
return self.__atom_published
def pubDate(self, pubDate=None):
'''Get or set the pubDate of the entry which indicates when the entry
was published. This method is just another name for the published(...)
method.
'''
return self.published(pubDate)
def pubdate(self, pubDate=None):
'''Get or set the pubDate of the entry which indicates when the entry
was published. This method is just another name for the published(...)
method.
pubdate(…) is deprecated and may be removed in feedgen ≥ 0.8. Use
pubDate(…) instead.
'''
warnings.warn('pubdate(…) is deprecated and may be removed in feedgen '
'≥ 0.8. Use pubDate(…) instead.')
return self.published(pubDate)
def rights(self, rights=None):
'''Get or set the rights value of the entry which conveys information
about rights, e.g. copyrights, held in and over the entry. This ATOM
value will also set rss:copyright.
:param rights: Rights information of the feed.
:returns: Rights information of the feed.
'''
if rights is not None:
self.__atom_rights = rights
return self.__atom_rights
def comments(self, comments=None):
'''Get or set the value of comments which is the URL of the comments
page for the item. This is a RSS only value.
:param comments: URL to the comments page.
:returns: URL to the comments page.
'''
if comments is not None:
self.__rss_comments = comments
return self.__rss_comments
def source(self, url=None, title=None):
'''Get or set the source for the current feed entry.
Note that ATOM feeds support a lot more sub elements than title and URL
(which is what RSS supports) but these are currently not supported.
Patches are welcome.
:param url: Link to the source.
:param title: Title of the linked resource
:returns: Source element as dictionaries.
'''
if url is not None and title is not None:
self.__rss_source = {'url': url, 'title': title}
self.__atom_source = {'link': url, 'title': title}
return self.__rss_source
def enclosure(self, url=None, length=None, type=None):
'''Get or set the value of enclosure which describes a media object
that is attached to the item. This is a RSS only value which is
represented by link(rel=enclosure) in ATOM. ATOM feeds can furthermore
contain several enclosures while RSS may contain only one. That is why
this method, if repeatedly called, will add more than one enclosures to
the feed. However, only the last one is used for RSS.
:param url: URL of the media object.
:param length: Size of the media in bytes.
:param type: Mimetype of the linked media.
:returns: Data of the enclosure element.
'''
if url is not None:
self.link(href=url, rel='enclosure', type=type, length=length)
return self.__rss_enclosure
def ttl(self, ttl=None):
'''Get or set the ttl value. It is an RSS only element. ttl stands for
time to live. It's a number of minutes that indicates how long a
channel can be cached before refreshing from the source.
:param ttl: Integer value representing the time to live.
:returns: Time to live of of the entry.
'''
if ttl is not None:
self.__rss_ttl = int(ttl)
return self.__rss_ttl
def load_extension(self, name, atom=True, rss=True):
'''Load a specific extension by name.
:param name: Name of the extension to load.
:param atom: If the extension should be used for ATOM feeds.
:param rss: If the extension should be used for RSS feeds.
'''
# Check loaded extensions
if not isinstance(self.__extensions, dict):
self.__extensions = {}
if name in self.__extensions.keys():
raise ImportError('Extension already loaded')
# Load extension
extname = name[0].upper() + name[1:] + 'EntryExtension'
try:
supmod = __import__('feedgen.ext.%s_entry' % name)
extmod = getattr(supmod.ext, name + '_entry')
except ImportError:
# Use FeedExtension module instead
supmod = __import__('feedgen.ext.%s' % name)
extmod = getattr(supmod.ext, name)
ext = getattr(extmod, extname)
self.register_extension(name, ext, atom, rss)
def register_extension(self, namespace, extension_class_entry=None,
atom=True, rss=True):
'''Register a specific extension by classes to a namespace.
:param namespace: namespace for the extension
:param extension_class_entry: Class of the entry extension to load.
:param atom: If the extension should be used for ATOM feeds.
:param rss: If the extension should be used for RSS feeds.
'''
# Check loaded extensions
# `load_extension` ignores the "Extension" suffix.
if not isinstance(self.__extensions, dict):
self.__extensions = {}
if namespace in self.__extensions.keys():
raise ImportError('Extension already loaded')
if not extension_class_entry:
raise ImportError('No extension class')
extinst = extension_class_entry()
setattr(self, namespace, extinst)
# `load_extension` registry
self.__extensions[namespace] = {
'inst': extinst,
'extension_class_entry': extension_class_entry,
'atom': atom,
'rss': rss
}
| 40.982409
| 80
| 0.589777
| 3,779
| 30,286
| 4.574755
| 0.112728
| 0.036094
| 0.014577
| 0.008908
| 0.323751
| 0.259139
| 0.207138
| 0.176654
| 0.158665
| 0.135875
| 0
| 0.001803
| 0.322327
| 30,286
| 738
| 81
| 41.03794
| 0.839846
| 0.361289
| 0
| 0.111399
| 0
| 0
| 0.095028
| 0.001191
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064767
| false
| 0
| 0.033679
| 0
| 0.158031
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
66421e56797abbadff0d086947c6577be6113573
| 3,165
|
py
|
Python
|
tools/reduce_dimension.py
|
AtsushiHashimoto/exp_idc
|
9b79a02d3a8fc3950b5fe775e75353b37dc612cc
|
[
"BSD-2-Clause"
] | null | null | null |
tools/reduce_dimension.py
|
AtsushiHashimoto/exp_idc
|
9b79a02d3a8fc3950b5fe775e75353b37dc612cc
|
[
"BSD-2-Clause"
] | null | null | null |
tools/reduce_dimension.py
|
AtsushiHashimoto/exp_idc
|
9b79a02d3a8fc3950b5fe775e75353b37dc612cc
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
DESCRIPTION="This script reduce dimensionality of input data."
#from memory_profiler import profile
import numpy as np
import argparse
from sklearn.decomposition import PCA
from sklearn.decomposition import NMF
import logging
import sys
from os.path import dirname
sys.path.append(dirname(__file__))
from my_target_counter import TargetCounter
logger = logging.getLogger(__file__)
#@profile
def main(args):
src_dir = args.src_dir
dest_dir = args.dest_dir
src_pat = "X_(\d{3}).csv$"
tar_template = "X_%s.csv"
tc=TargetCounter(src_pat,tar_template,src_dir,dest_dir)
target_ids,src_files = tc.listup_targets()
n_targets = len(target_ids)
if args.count_targets:
print(len(target_ids))
sys.exit()
if n_targets==0:
logger.warn("There are no before-process src files in '%s'"%src_dir)
sys.exit()
if args.algorithm == "pca":
model = PCA(args.dimensions)
elif args.algorithm == "nmf":
model = NMF(args.dimensions,max_iter=args.max_iter)
else:
logger.warn("Unknown algorithm '%s'"%args.algorithm)
sys.exit()
for id,src_file in zip(target_ids,src_files):
dest_file = "%s/%s"%(args.dest_dir,tc.id2destfile(id))
#print(id,src_file,dest_file)
X=np.loadtxt(src_file,delimiter=",")
model.fit(X)
X_ = model.transform(X)
np.savetxt(dest_file,X_,delimiter=",")
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('dimensions', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=int, \
choices=None, \
help='Dimensionality of target projection subspace', \
metavar=None)
parser.add_argument('src_dir', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=str, \
choices=None, \
help='Directory path where the source data are located.', \
metavar=None)
parser.add_argument('dest_dir', \
action='store', \
nargs=None, \
const=None, \
default=None, \
type=str, \
choices=None, \
help='Directory path where the dimension-reduced data will be located.', \
metavar=None)
parser.add_argument('-a', '--algorithm', \
action='store', \
nargs='?', \
const=None, \
default='pca', \
type=str, \
choices=None, \
help='Algorithm for dimension reduction. pca|nmf are supported (default: pca)', \
metavar=None)
parser.add_argument('-M', '--max_iter', \
action='store', \
nargs='?', \
const=None, \
default=1000, \
type=int, \
choices=None, \
help='Maximum iteration number. (default: 1000)', \
metavar=None)
parser.add_argument('--count_targets',\
action="store_true", default=False, help='count processing targets, and exit.')
if __name__ == '__main__':
args = parser.parse_args()
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
logger.addHandler(sh)
main(args)
| 27.521739
| 89
| 0.618957
| 388
| 3,165
| 4.881443
| 0.353093
| 0.028511
| 0.053854
| 0.052798
| 0.26188
| 0.184794
| 0.114044
| 0.114044
| 0.114044
| 0.090813
| 0
| 0.005046
| 0.248657
| 3,165
| 114
| 90
| 27.763158
| 0.791421
| 0.033175
| 0
| 0.391304
| 0
| 0
| 0.185597
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01087
| false
| 0
| 0.086957
| 0
| 0.097826
| 0.01087
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6649fd3abee8b975b14bebfd695b9dead71ff67f
| 834
|
py
|
Python
|
coursebuilder/tests/test_views.py
|
gcallah/CourseBuilder
|
68f3d175aba43e62b20e6c1c4c6a604601c2c60a
|
[
"Apache-2.0"
] | 2
|
2019-10-07T23:03:54.000Z
|
2019-10-15T23:18:08.000Z
|
coursebuilder/tests/test_views.py
|
gcallah/CourseBuilder
|
68f3d175aba43e62b20e6c1c4c6a604601c2c60a
|
[
"Apache-2.0"
] | 3
|
2019-12-11T15:49:38.000Z
|
2021-06-10T22:04:41.000Z
|
coursebuilder/tests/test_views.py
|
gcallah/CourseBuilder
|
68f3d175aba43e62b20e6c1c4c6a604601c2c60a
|
[
"Apache-2.0"
] | 1
|
2019-12-02T06:08:11.000Z
|
2019-12-02T06:08:11.000Z
|
from django.test import TestCase
from django.urls import reverse
class CourseBuilderViewTest(TestCase):
def test_landing_page(self):
url = reverse("coursebuilder:landing_page")
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "landing_page.html")
def test_about_page(self):
url = reverse("coursebuilder:dynamic_about")
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "dynamic_about.html")
def test_glossary_page(self):
url = reverse("coursebuilder:dynamic_gloss")
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "dynamic_gloss.html")
| 36.26087
| 63
| 0.711031
| 94
| 834
| 6.148936
| 0.297872
| 0.036332
| 0.057093
| 0.093426
| 0.681661
| 0.628028
| 0.49654
| 0.49654
| 0.49654
| 0.49654
| 0
| 0.013314
| 0.189448
| 834
| 22
| 64
| 37.909091
| 0.841716
| 0
| 0
| 0.333333
| 0
| 0
| 0.159472
| 0.095923
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.166667
| false
| 0
| 0.111111
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
664a09ed3a949b821074a7ea5edcf32f555375e2
| 336
|
py
|
Python
|
LC/249.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | 2
|
2018-02-24T17:20:02.000Z
|
2018-02-24T17:25:43.000Z
|
LC/249.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | null | null | null |
LC/249.py
|
szhu3210/LeetCode_Solutions
|
64747eb172c2ecb3c889830246f3282669516e10
|
[
"MIT"
] | null | null | null |
class Solution(object):
def groupStrings(self, strings):
"""
:type strings: List[str]
:rtype: List[List[str]]
"""
d=collections.defaultdict(list)
for s in strings:
d[tuple([((ord(s[i])-ord(s[0]))%26) for i in range(len(s))])].append(s)
return [d[key] for key in d]
| 33.6
| 83
| 0.529762
| 47
| 336
| 3.787234
| 0.574468
| 0.078652
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012658
| 0.294643
| 336
| 10
| 84
| 33.6
| 0.738397
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
664b6b48aac6edc74751d03dafd588e100ea7322
| 2,460
|
py
|
Python
|
cfcc/cogs/stats.py
|
denuoweb/tipbot-v2
|
bd25205fd4a0440ffe11b7e94905881d4c799282
|
[
"Apache-2.0"
] | 2
|
2020-03-05T00:08:38.000Z
|
2020-12-21T04:34:31.000Z
|
cfcc/cogs/stats.py
|
denuoweb/tipbot-v2
|
bd25205fd4a0440ffe11b7e94905881d4c799282
|
[
"Apache-2.0"
] | null | null | null |
cfcc/cogs/stats.py
|
denuoweb/tipbot-v2
|
bd25205fd4a0440ffe11b7e94905881d4c799282
|
[
"Apache-2.0"
] | 2
|
2020-08-30T23:33:20.000Z
|
2022-01-28T22:52:58.000Z
|
import discord, os
from discord.ext import commands
from utils import checks, output
from aiohttp import ClientSession
import urllib.request
import json
class Stats:
def __init__(self, bot: discord.ext.commands.Bot):
self.bot = bot
@commands.command()
async def stats(self, amount=1):
"""
Show stats about HTMLCOIN
"""
headers={"user-agent" : "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.78 Safari/537.36"}
try:
async with ClientSession() as session:
async with session.get("https://api.coingecko.com/api/v3/coins/htmlcoin", headers=headers) as response:
responseRaw = await response.read()
priceData = json.loads(responseRaw)
for item in priceData:
embed = discord.Embed(color=0x00FF00)
embed.set_author(name='HTMLCOIN Coin Information', icon_url="i.ibb.co/GkBSpV3/logo-icon-no-txt-32x32.png")
embed.add_field(name="current_price", value="${}".format(item['usd']))
#embed.add_field(name="Price (BTC)", value="{} BTC".format(item['btc']))
#embed.add_field(name='\Altmarkets',value='\altmarkets')
#embed.add_field(name="Volume (USD)", value="${}".format(item['24h_volume_usd']))
#embed.add_field(name="Market Cap", value="${}".format(item['market_cap_usd']))
#embed.add_field(name='\u200b',value='\u200b')
#embed.add_field(name="% 1h", value="{}%".format(item['percent_change_1h']))
#embed.add_field(name="% 24h", value="{}%".format(item['percent_change_24h']))
#embed.add_field(name="% 7d", value="{}%".format(item['percent_change_7d']))
#embed.add_field(name="Circulating Supply", value="{} HTMLCOIN".format(item['available_supply']))
#embed.add_field(name="Total Supply", value="{} HTMLCOIN".format(item['total_supply']))
embed.set_footer(text="https://www.coingecko.com/en/coins/htmlcoin", icon_url="i.ibb.co/GkBSpV3/logo-icon-no-txt-32x32.png")
await self.bot.say(embed=embed)
except:
await self.bot.say(":warning: Error fetching prices!")
def setup(bot):
bot.add_cog(Stats(bot))
| 54.666667
| 148
| 0.579268
| 291
| 2,460
| 4.780069
| 0.415808
| 0.063264
| 0.102804
| 0.134436
| 0.20417
| 0.05895
| 0.05895
| 0.05895
| 0.05895
| 0.05895
| 0
| 0.033296
| 0.26748
| 2,460
| 44
| 149
| 55.909091
| 0.738624
| 0.3
| 0
| 0
| 0
| 0.037037
| 0.220084
| 0.051714
| 0
| 0
| 0.004811
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
6651c8c1061de1521ef9b16fc164b0b76ba7d323
| 5,563
|
py
|
Python
|
src/reinigung.py
|
infiniteloop-youth/reinigung
|
1a16603ab97989b4180d51813c203ee81d9d7f5e
|
[
"MIT"
] | null | null | null |
src/reinigung.py
|
infiniteloop-youth/reinigung
|
1a16603ab97989b4180d51813c203ee81d9d7f5e
|
[
"MIT"
] | null | null | null |
src/reinigung.py
|
infiniteloop-youth/reinigung
|
1a16603ab97989b4180d51813c203ee81d9d7f5e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- Coding: utf-8 -*-
from argparse import ArgumentParser
from os import environ, makedirs
from datetime import datetime
from os.path import abspath, join, dirname, exists, splitext
from time import time
from dotenv import load_dotenv
import slack
DIR = dirname(dirname(abspath(__file__)))
load_dotenv(join(DIR, ".env"))
ADMIN_SLACK_TOKEN = environ.get("ADMIN_SLACK_TOKEN")
POST_SLACK_TOKEN = environ.get("POST_SLACK_TOKEN")
TARGET_CHANNEL = environ.get("TARGET_CHANNEL")
TARGET_AGO = int(environ.get("TARGET_AGO"))
DOWNLOAD_PATH = environ.get("DOWNLOAD_PATH")
REPORT_CHANNEL = environ.get("REPORT_CHANNEL")
def normalization(text, char):
"""
Replace symbols which cant use in filename
"""
symbols = list(range(0, 33)) + [34, 39] + list(range(42, 48)) + list(range(58, 64)) + list(range(91, 95)) + [96] + list(range(123, 128))
for symbol in symbols:
text = text.replace(chr(symbol), char)
return text
def main(is_dry, is_all):
"""
Knock knock
"""
started_at = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
delete_to = int(time() - TARGET_AGO)
print("Reinigung")
print("started_at:"+started_at)
if is_dry:
print("Dry run")
# Read settings
is_all = True if TARGET_CHANNEL == "" else False
print("All channel" if is_all else "")
all_channels = slack.get_channels(ADMIN_SLACK_TOKEN, exclude_archived=True, exclude_members=True)["channels"]
users = slack.get_users(ADMIN_SLACK_TOKEN)["members"]
# set find range
if is_all:
channels = [channel for channel in all_channels if channel["is_channel"] and not channel["is_private"]]
else:
channels = [channel for channel in all_channels if channel["name"] == TARGET_CHANNEL]
report_log = ""
total_count = 0
# in channel
for channel in channels:
channel_count = 0
report_log += "#{}({}) - {}\n\n".format(
channel["name"],
channel["id"],
channel["purpose"]["value"]
)
folder_path = abspath(join(DIR, DOWNLOAD_PATH, channel["name"]))
print("in #{}".format(channel["name"]))
# make folder
if not exists(folder_path) and not is_dry:
makedirs(folder_path)
files = slack.get_files(
ADMIN_SLACK_TOKEN,
channel=channel["id"],
ts_to=delete_to
)["files"]
# in file
for file in files:
# make file name
file_name = "{}-{}-{}-{}{}-{}{}".format(
datetime.fromtimestamp(int(file["timestamp"])).strftime("%Y%m%d%H%M%S"),
file["id"],
[user["name"] for user in users if user["id"] == file["user"]][0],
normalization(file["title"], "_")[:10],
"-"+normalization(file["initial_comment"]["comment"], "_")[:30] if "initial_comment" in file else "",
normalization(splitext(file["name"])[0], "_"),
splitext(file["name"])[1]
)
file_path = abspath(join(folder_path, file_name))
if not is_dry:
# download
file_content = slack.get_file(ADMIN_SLACK_TOKEN, file["id"])
with open(file_path, "wb") as save_file:
save_file.write(file_content)
# delete
deleted = slack.delete_file(ADMIN_SLACK_TOKEN, file["id"])
# increment channel counter
channel_count += 1
# add log
report_log += "- {} @{} {} - {} {}\n - {}\n\n".format(
datetime.fromtimestamp(int(file["timestamp"])).strftime("%Y/%m/%d %H:%M:%S"),
[user["name"] for user in users if user["id"] == file["user"]][0],
file["title"],
file["initial_comment"]["comment"].replace("\n","") if "initial_comment" in file else "",
file["name"],
file_name
)
print("- {}".format(file_path))
# increment total counter
total_count += channel_count
report_log += "Total : {} files\n\n".format(channel_count)
finished_at = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
# make great report
report = """
Reinigung - Auto clean up slack files
===== Settings report =====
All delete? : {}
Dry run? : {}
Target channel : {}
Delete before : {}
Started at : {}
Finished at : {}
===== Running report =====
Total delete : {} files
===== Running log =====
{}
===== End of report ======
""".format(
"Yes" if is_all else "No",
"Yes" if is_dry else "No",
TARGET_CHANNEL,
datetime.fromtimestamp(delete_to).strftime("%Y/%m/%d %H:%M:%S"),
started_at,
finished_at,
total_count,
report_log
)
slack.post_file(
POST_SLACK_TOKEN,
report,
channels=slack.get_channel_id(ADMIN_SLACK_TOKEN, REPORT_CHANNEL),
filename="reinigung-report-{}.txt".format(datetime.now().strftime("%Y-%m-%d-%H-%M-%S")),
filetype="text",
title="Reinigung report"
)
print("finished_at:"+finished_at)
print("done")
if __name__ == "__main__":
# Parse arguments
parser = ArgumentParser(description="Auto clean up slack files")
parser.add_argument("-d", "--dry", help="Testing mode", action="store_true")
parser.add_argument("-a", "--all", help="Remove in all channel", action="store_true")
args = parser.parse_args()
# Call main function
main(is_dry=args.dry, is_all=args.all)
| 30.398907
| 140
| 0.580262
| 680
| 5,563
| 4.560294
| 0.241176
| 0.035472
| 0.038697
| 0.021283
| 0.168655
| 0.155111
| 0.122219
| 0.117704
| 0.117704
| 0.079329
| 0
| 0.009508
| 0.262628
| 5,563
| 182
| 141
| 30.565934
| 0.746465
| 0.053748
| 0
| 0.016393
| 0
| 0
| 0.193561
| 0.004408
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016393
| false
| 0
| 0.057377
| 0
| 0.081967
| 0.065574
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0728c4bb800f77d5ab35c76ddb2e7148e9e9c1b
| 6,856
|
py
|
Python
|
main.py
|
KunalKatiyar/ChatBox-App
|
2a53c7ed155aeb56a5304acf546762bf1c62569a
|
[
"MIT"
] | 1
|
2020-04-03T09:07:35.000Z
|
2020-04-03T09:07:35.000Z
|
main.py
|
KunalKatiyar/ChatBox-App
|
2a53c7ed155aeb56a5304acf546762bf1c62569a
|
[
"MIT"
] | null | null | null |
main.py
|
KunalKatiyar/ChatBox-App
|
2a53c7ed155aeb56a5304acf546762bf1c62569a
|
[
"MIT"
] | null | null | null |
import kivy
from kivy.app import App
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
from kivy.uix.textinput import TextInput
from kivy.uix.button import Button
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.uix.scrollview import ScrollView
import os
import socket_client
import sys
kivy.require("1.11.1")
class ScrollableLabel(ScrollView):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.layout = GridLayout(cols=1, size_hint_y=None)
self.add_widget(self.layout)
self.chat_history = Label(size_hint_y=None, markup =True)
self.scroll_to_point = Label()
self.layout.add_widget(self.chat_history)
self.layout.add_widget(self.scroll_to_point)
def update_chat_history(self, message):
self.chat_history.text += '\n' + message
self.layout.height =self.chat_history.texture_size[1] + 15
self.chat_history.height = self.chat_history.texture_size[1]
self.chat_history.text_size =(self.chat_history.width*0.98,None)
self.scroll_to(self.scroll_to_point)
def update_chat_history_layout(self, _=None):
self.layout.height = self.chat_history.texture_size[1] + 15
self.chat_history.height = self.chat_history.texture_size[1]
self.chat_history.text_size = (self.chat_history.width * 0.98, None)
class ConnectPage(GridLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cols = 2
if os.path.isfile("prev_details.txt"):
with open("prev_details.txt", "r") as f:
d= f.read().split(",")
prev_ip = d[0]
prev_port = d[1]
prev_username = d[2]
else:
prev_ip = ""
prev_port = ""
prev_username = ""
self.add_widget(Label(text = "IP:"))
self.ip = TextInput(text = prev_ip,multiline= False)
self.add_widget(self.ip)
self.add_widget(Label(text = "Port:"))
self.port = TextInput(text = prev_port,multiline= False)
self.add_widget(self.port)
self.add_widget(Label(text = "Username:"))
self.username = TextInput(text = prev_username,multiline= False)
self.add_widget(self.username)
self.join = Button(text="Join")
self.join.bind(on_press=self.join_button)
self.add_widget(Label())
self.add_widget(self.join)
def join_button(self, instance):
port =self.port.text
ip = self.ip.text
username = self.username.text
with open("prev_details.txt","w") as f:
f.write(f"{ip},{port},{username}")
info = f"Attempting to join {ip}:{port} as {username}"
chat_app.info_page.update_info(info)
chat_app.screen_manager.current = "Info"
Clock.schedule_once(self.connect, 1)
def connect(self, _):
port = int(self.port.text)
ip = self.ip.text
username = self.username.text
if not socket_client.connect(ip , port, username, show_error):
return
chat_app.create_chat_page()
chat_app.screen_manager.current = "Chat"
class InfoPage(GridLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cols = 1
self.message = Label(halign="center", valign="middle", font_size=30)
self.message.bind(width=self.update_text_width)
self.add_widget(self.message)
def update_info(self,message):
self.message.text = message
def update_text_width(self,*_):
self.message.text_size = (self.message.width*0.9, None)
class ChatPage(GridLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.cols = 1
self.rows =2
self.history = ScrollableLabel(height=Window.size[1]*0.9, size_hint_y=None)
self.add_widget(self.history)
self.new_message = TextInput(width=Window.size[0]*0.8, size_hint_x=None, multiline = False)
self.send = Button(text="Send")
self.send.bind(on_press = self.send_message)
bottom_line =GridLayout(cols =2)
bottom_line.add_widget(self.new_message)
bottom_line.add_widget(self.send)
self.add_widget(bottom_line)
Window.bind(on_key_down=self.on_key_down)
Clock.schedule_once(self.focus_text_input, 1)
socket_client.start_listening(self.incoming_message, show_error)
self.bind(size=self.adjust_fields)
def adjust_fields(self, *_):
if Window.size[1] * 0.1 < 50:
new_height = Window.size[1] - 50
else:
new_height = Window.size[1] * 0.9
self.history.height = new_height
if Window.size[0] * 0.2 < 160:
new_width = Window.size[0] - 160
else:
new_width = Window.size[0] * 0.8
self.new_message.width = new_width
Clock.schedule_once(self.history.update_chat_history_layout, 0.01)
def on_key_down(self, instance, keyboard, keycode , text , modifiers):
if keycode == 40:
self.send_message(None)
def send_message(self, _):
message = self.new_message.text
self.new_message.text = ""
if message:
self.history.update_chat_history(f"[color=dd2020]{chat_app.connect_page.username.text}[/color] > {message}")
socket_client.send(message)
Clock.schedule_once(self.focus_text_input, 0.1)
def focus_text_input(self, _):
self.new_message_focus =True
def incoming_message(self, username, message):
self.history.update_chat_history(f"[color=20dd20]{username}[/color] > {message}")
class EpicApp(App):
def build(self):
self.screen_manager = ScreenManager()
self.connect_page = ConnectPage()
screen = Screen(name="Connect")
screen.add_widget(self.connect_page)
self.screen_manager.add_widget(screen)
self.info_page = InfoPage()
screen = Screen(name="Info")
screen.add_widget(self.info_page)
self.screen_manager.add_widget(screen)
return self.screen_manager
def create_chat_page(self):
self.chat_page = ChatPage()
screen = Screen(name = "Chat")
screen.add_widget(self.chat_page)
self.screen_manager.add_widget(screen)
def show_error(message):
chat_app.info_page.update_info(message)
chat_app.screen_manager.current = "Info"
Clock.schedule_once(sys.exit, 10)
if __name__ == "__main__":
chat_app = EpicApp()
chat_app.run()
| 35.895288
| 121
| 0.625292
| 883
| 6,856
| 4.608154
| 0.157418
| 0.048661
| 0.044728
| 0.029246
| 0.389039
| 0.315802
| 0.262472
| 0.218727
| 0.156795
| 0.133202
| 0
| 0.015804
| 0.261669
| 6,856
| 191
| 122
| 35.895288
| 0.788028
| 0
| 0
| 0.178344
| 0
| 0
| 0.046798
| 0.016949
| 0
| 0
| 0
| 0
| 0
| 1
| 0.11465
| false
| 0
| 0.082803
| 0
| 0.242038
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0747e6f6d9760d681c73f7d16222536711564c9
| 4,607
|
py
|
Python
|
parlai/agents/recosa/recosa.py
|
ying-A/RED
|
27971a5a6d6768e8a7052d8fba06e5056da7619e
|
[
"MIT"
] | 3
|
2020-12-04T07:29:18.000Z
|
2021-04-08T06:23:20.000Z
|
parlai/agents/recosa/recosa.py
|
ying-A/RED
|
27971a5a6d6768e8a7052d8fba06e5056da7619e
|
[
"MIT"
] | null | null | null |
parlai/agents/recosa/recosa.py
|
ying-A/RED
|
27971a5a6d6768e8a7052d8fba06e5056da7619e
|
[
"MIT"
] | 1
|
2020-12-04T07:29:04.000Z
|
2020-12-04T07:29:04.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from parlai.core.agents import Agent
from parlai.core.utils import warn_once
from parlai.core.utils import padded_3d
from parlai.core.torch_generator_agent import TorchGeneratorAgent
from .modules import RecosaGeneratorModel
import torch
warn_once(
"Public release transformer models are currently in beta. The name of "
"command line options may change or disappear before a stable release. We "
"welcome your feedback. Please file feedback as issues at "
"https://github.com/facebookresearch/ParlAI/issues/new"
)
def add_common_cmdline_args(argparser):
argparser.add_argument('-esz', '--embedding-size', type=int, default=300,
help='Size of all embedding layers')
argparser.add_argument('-nl', '--n-layers', type=int, default=2)
argparser.add_argument('-hid', '--ffn-size', type=int, default=300,
help='Hidden size of the FFN layers')
argparser.add_argument('--attention-dropout', type=float, default=0.0)
argparser.add_argument('--relu-dropout', type=float, default=0.0)
argparser.add_argument('--n-heads', type=int, default=2,
help='Number of multihead attention heads')
argparser.add_argument('--learn-positional-embeddings', type='bool', default=False)
argparser.add_argument('--embeddings-scale', type='bool', default=True)
argparser.add_argument('--n-positions', type=int, default=None, hidden=True,
help='Number of positional embeddings to learn. Defaults '
'to truncate or 1024 if not provided.')
class Recosa(Agent):
"""
Placeholder class, which just throws an error telling the user to specify
whether they want the ranker or the generator.
"""
def __init__(self, opt, shared=None):
raise RuntimeError(
"`--model recosa` is not a valid choice. Please select "
" `--model recosa/generator' "
)
class RecosaGeneratorAgent(TorchGeneratorAgent):
@classmethod
def add_cmdline_args(cls, argparser):
"""Add command-line arguments specifically for this agent."""
agent = argparser.add_argument_group('Transformer Arguments')
agent.add_argument('-ord', '--order', default='no',
choices=['no', '1_order', '2_order', '3_order','full'],
help='Choices: no_order, 1_order, 2_order, 3_order,full_order.')
agent.add_argument('-dli_in_dim','--dli_input_dim',default=300,type=int, help='size of the dli input dim')
agent.add_argument('-dli_rnn_hid','--dli_rnn_hiddensize',default=64,type=int, help='size of the dli rnn hidden dim')
agent.add_argument('-dli_ffn_dim','--dli_ffn_dimension',default=128,type=int, help='size of the dli ffn dim')
agent.add_argument('-rnn_hid','--rnn_hiddensize',default=300,type=int, help='size of the rnn input embedding')
agent.add_argument('-rnn_esz','--rnn_embeddingsize',default=300,type=int, help='size of the rnn hidden layers')
agent.add_argument('-rnn_nlayers','--rnn_numlayers',default=2,type=int, help='the number of rnn hidden layers')
agent.add_argument('-rnn_cls','--rnn_class',default='gru',choices=['lstm','gru','rnn'], help='rnn class for utterance encoder')
agent.add_argument('-rnn_bi','--rnn_bidirectional',default=False,type=bool, help='whether use bi-dir rnn')
agent.add_argument('--rnn_dropout',default=0.0,type=float, help='dropout for rnn hidden layers')
agent.add_argument('--input_dropout',default=0.0,type=float, help='input dropout for inputs')
agent.add_argument('--max_turns',default=30,type=int, help='the max number of history turns')
agent.add_argument('--max_single_seq_len',default=50,type=int, help='the max length of single history utterance')
add_common_cmdline_args(agent)
cls.dictionary_class().add_cmdline_args(argparser)
super(RecosaGeneratorAgent, cls).add_cmdline_args(argparser)
return agent
def build_model(self, states=None):
self.model = RecosaGeneratorModel(self.opt, self.dict)
if self.opt['embedding_type'] != 'random':
self._copy_embeddings(
self.model.encoder.embeddings.weight, self.opt['embedding_type']
)
if self.use_cuda:
self.model.cuda()
return self.model
| 51.188889
| 135
| 0.674191
| 608
| 4,607
| 4.963816
| 0.320724
| 0.08383
| 0.06892
| 0.037773
| 0.202452
| 0.16004
| 0.133201
| 0.06163
| 0.05169
| 0
| 0
| 0.012507
| 0.20165
| 4,607
| 89
| 136
| 51.764045
| 0.808048
| 0.075103
| 0
| 0
| 0
| 0
| 0.343174
| 0.006849
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.092308
| 0
| 0.215385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b075806cabe0b1312272705ea0a85e81c75f2115
| 20,972
|
py
|
Python
|
dataloading/nvidia.py
|
UT-ADL/lidar-as-camera
|
daccb2ae21b4899ecfd8611b7a27f91681617383
|
[
"Apache-2.0"
] | null | null | null |
dataloading/nvidia.py
|
UT-ADL/lidar-as-camera
|
daccb2ae21b4899ecfd8611b7a27f91681617383
|
[
"Apache-2.0"
] | null | null | null |
dataloading/nvidia.py
|
UT-ADL/lidar-as-camera
|
daccb2ae21b4899ecfd8611b7a27f91681617383
|
[
"Apache-2.0"
] | null | null | null |
import sys
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
import torchvision
from torchvision import transforms
import torchvision.transforms.functional as F
from skimage.util import random_noise
from dataloading.camera import Camera
class NvidiaResizeAndCrop(object):
def __call__(self, data):
xmin = 186
ymin = 600
scale = 6.0
width = 258
height = 66
scaled_width = int(width * scale)
scaled_height = int(height * scale)
cropped = transforms.functional.resized_crop(data["image"], ymin, xmin, scaled_height, scaled_width,
(height, width))
data["image"] = cropped
return data
class NvidiaCropWide(object):
def __init__(self, x_delta=0):
self.x_delta = x_delta
def __call__(self, data):
xmin = 300
xmax = 1620
ymin = 520
ymax = 864
scale = 0.2
height = ymax - ymin
width = xmax - xmin
cropped = F.resized_crop(data["image"], ymin, xmin + self.x_delta, height, width,
(int(scale * height), int(scale * width)))
data["image"] = cropped
return data
class CropViT(object):
def __call__(self, data):
xmin = 540
xmax = 1260
ymin = 244
ymax = 964
scale = 0.312
height = ymax - ymin
width = xmax - xmin
cropped = F.resized_crop(data["image"], ymin, xmin, height, width,
(int(scale * height), int(scale * width)))
data["image"] = cropped
return data
class NvidiaSideCameraZoom(object):
def __init__(self, zoom_ratio):
self.zoom_ratio = zoom_ratio
def __call__(self, data):
width = 1920
height = 1208
xmin = int(self.zoom_ratio * width)
ymin = int(self.zoom_ratio * height)
scaled_width = width - (2 * xmin)
scaled_height = height - (2 * ymin)
cropped = F.resized_crop(data["image"], ymin, xmin, scaled_height, scaled_width,
(height, width))
data["image"] = cropped
return data
class AugmentationConfig:
def __init__(self, color_prob=0.0, noise_prob=0.0, blur_prob=0.0):
self.color_prob = color_prob
self.noise_prob = noise_prob
self.blur_prob = blur_prob
class AugmentImage:
def __init__(self, augment_config):
print(f"augmentation: color_prob={augment_config.color_prob}, "
f"noise_prob={augment_config.noise_prob}, "
f"blur_prob={augment_config.blur_prob}")
self.augment_config = augment_config
def __call__(self, data):
if np.random.random() <= self.augment_config.color_prob:
jitter = transforms.ColorJitter(contrast=0.5, saturation=0.5, brightness=0.5)
data["image"] = jitter(data["image"])
if np.random.random() <= self.augment_config.noise_prob:
if np.random.random() > 0.5:
data["image"] = torch.tensor(random_noise(data["image"], mode='gaussian', mean=0, var=0.005, clip=True),
dtype=torch.float)
else:
data["image"] = torch.tensor(random_noise(data["image"], mode='salt', amount=0.005),
dtype=torch.float)
if np.random.random() <= self.augment_config.blur_prob:
blurrer = transforms.GaussianBlur(kernel_size=(3, 3), sigma=(0.3, 1))
data["image"] = blurrer(data['image'])
return data
class Normalize(object):
def __call__(self, data, transform=None):
# normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
image = data["image"]
image = image / 255
# data["image"] = normalize(image)
data["image"] = image
return data
class NvidiaDataset(Dataset):
#CAP_WAYPOINTS = 30
def __init__(self, dataset_paths, transform=None, camera="front_wide", name="Nvidia dataset",
filter_turns=False, output_modality="steering_angle", n_branches=1, n_waypoints=6,
metadata_file="nvidia_frames.csv", color_space="rgb", side_cameras_weight=0.33):
self.name = name
self.metadata_file = metadata_file
self.color_space = color_space
self.dataset_paths = dataset_paths
if transform:
self.transform = transform
else:
self.transform = transforms.Compose([Normalize()])
self.camera_name = camera
self.output_modality = output_modality
self.n_waypoints = n_waypoints
self.side_cameras_weight = side_cameras_weight
if self.output_modality == "waypoints":
self.target_size = 2 * self.n_waypoints
elif self.output_modality == "steering_angle":
self.target_size = 1
else:
print(f"Unknown output modality {self.output_modality}")
sys.exit()
self.n_branches = n_branches
if camera == 'all':
datasets = [self.read_dataset(dataset_path, "left") for dataset_path in dataset_paths] + \
[self.read_dataset(dataset_path, "right") for dataset_path in dataset_paths] + \
[self.read_dataset(dataset_path, "front_wide") for dataset_path in dataset_paths]
else:
datasets = [self.read_dataset(dataset_path, camera) for dataset_path in dataset_paths]
self.frames = pd.concat(datasets)
if filter_turns:
print("Filtering turns with blinker signal")
self.frames = self.frames[self.frames.turn_signal == 1]
def __getitem__(self, idx):
frame = self.frames.iloc[idx]
if self.color_space == "rgb":
image = torchvision.io.read_image(frame["image_path"])
elif self.color_space == "bgr":
image = cv2.imread(frame["image_path"])
image = torch.tensor(image, dtype=torch.uint8).permute(2, 0, 1)
else:
print(f"Unknown color space: ", self.color_space)
sys.exit()
# TODO replace if-else with map
if self.camera_name == Camera.LEFT.value:
steering_angle = np.array(frame["steering_angle_left"])
elif self.camera_name == Camera.RIGHT.value:
steering_angle = np.array(frame["steering_angle_right"])
else:
steering_angle = np.array(frame["steering_angle"])
data = {
'image': image,
'steering_angle': steering_angle,
'vehicle_speed': np.array(frame["vehicle_speed"]),
'autonomous': np.array(frame["autonomous"]),
'position_x': np.array(frame["position_x"]),
'position_y': np.array(frame["position_y"]),
'yaw': np.array(frame["yaw"]),
'turn_signal': np.array(frame["turn_signal"]),
'row_id': np.array(frame["row_id"]),
}
turn_signal = int(frame["turn_signal"])
if self.output_modality == "waypoints":
waypoints = []
for i in np.arange(1, self.n_waypoints + 1):
waypoints.append(frame[f"wp{i}_{self.camera_name}_x"])
waypoints.append(frame[f"wp{i}_{self.camera_name}_y"])
data['waypoints'] = np.array(waypoints)
target_values = waypoints
else:
target_values = frame["steering_angle"]
if self.transform:
data = self.transform(data)
if self.n_branches > 1:
target = np.zeros((self.n_branches, self.target_size))
target[turn_signal, :] = target_values
conditional_mask = np.zeros((self.n_branches, self.target_size))
conditional_mask[turn_signal, :] = 1
else:
target = np.zeros((self.n_branches, self.target_size))
target[0, :] = target_values
conditional_mask = np.ones((self.n_branches, self.target_size))
return data, target.reshape(-1), conditional_mask.reshape(-1)
def __len__(self):
return len(self.frames.index)
def get_waypoints(self):
wp_x_cols = [f"wp{i}_{self.camera_name}_x" for i in np.arange(1, self.n_waypoints + 1)]
wp_y_cols = [f"wp{i}_{self.camera_name}_y" for i in np.arange(1, self.n_waypoints + 1)]
waypoint_cols = np.column_stack((wp_x_cols, wp_y_cols)).reshape(-1)
return self.frames[waypoint_cols].to_numpy()
def read_dataset(self, dataset_path, camera):
if type(dataset_path) is dict:
frames_df = pd.read_csv(dataset_path['path'] / self.metadata_file)
len_before_filtering = len(frames_df)
frames_df = frames_df.iloc[dataset_path['start']:dataset_path['end']]
dataset_path = dataset_path['path']
else:
frames_df = pd.read_csv(dataset_path / self.metadata_file)
len_before_filtering = len(frames_df)
frames_df["row_id"] = frames_df.index
# temp hack
if "autonomous" not in frames_df.columns:
frames_df["autonomous"] = False
# frames_df["autonomous"] = False
frames_df = frames_df[frames_df['steering_angle'].notna()] # TODO: one steering angle is NaN, why?
if camera != Camera.FRONT_WIDE.value:
frames_df = frames_df[frames_df['steering_angle_left'].notna()]
frames_df = frames_df[frames_df['steering_angle_right'].notna()]
frames_df = frames_df[frames_df['vehicle_speed'].notna()]
frames_df = frames_df[frames_df[f'{camera}_filename'].notna()]
frames_df["turn_signal"].fillna(1, inplace=True)
frames_df["turn_signal"] = frames_df["turn_signal"].astype(int)
# Removed frames marked as skipped
frames_df = frames_df[frames_df["turn_signal"] != -1] # TODO: remove magic values.
if self.output_modality == "waypoints":
frames_df = frames_df[frames_df[f"position_x"].notna()]
frames_df = frames_df[frames_df[f"position_y"].notna()]
for i in np.arange(1, self.n_waypoints + 1):
frames_df = frames_df[frames_df[f"wp{i}_{camera}_x"].notna()]
frames_df = frames_df[frames_df[f"wp{i}_{camera}_y"].notna()]
frames_df["yaw_delta"] = np.abs(frames_df["yaw"]) - np.abs(frames_df["yaw"]).shift(-1)
frames_df = frames_df[np.abs(frames_df["yaw_delta"]) < 0.1]
# if self.calculate_waypoints:
#
# vehicle_x = frames_df["position_x"]
# vehicle_y = frames_df["position_y"]
#
# for i in np.arange(1, self.N_WAYPOINTS + 1):
# wp_global_x = frames_df["position_x"].shift(-i * self.CAP_WAYPOINTS)
# wp_global_y = frames_df["position_y"].shift(-i * self.CAP_WAYPOINTS)
# frames_df[f"x_{i}"] = wp_global_x
# frames_df[f"y_{i}"] = wp_global_y
# yaw = frames_df["yaw"]
# #frames_df["yaw"] = yaw
#
# wp_local_x = (wp_global_x - vehicle_x) * np.cos(yaw) + (wp_global_y - vehicle_y) * np.sin(yaw)
# wp_local_y = -(wp_global_x - vehicle_x) * np.sin(yaw) + (wp_global_y - vehicle_y) * np.cos(yaw)
# frames_df[f"x_{i}_offset"] = wp_local_x
# frames_df[f"y_{i}_offset"] = wp_local_y
#
# # Remove rows without trajectory offsets, should be last N_WAYPOINTS rows
# frames_df = frames_df[frames_df[f"x_{i}_offset"].notna()]
#
# # frames_df["yaw_delta"] = np.abs(frames_df["yaw"]) - np.abs(frames_df["yaw"]).shift(-1)
# # frames_df = frames_df[np.abs(frames_df["yaw_delta"]) < 0.1]
# #
# # frames_df["x_1_delta"] = frames_df["x_1_offset"] - frames_df["x_1_offset"].shift(-1)
# # frames_df = frames_df[np.abs(frames_df["x_1_delta"]) < 0.1]
# #
# # frames_df["y_1_delta"] = frames_df["y_1_offset"] - frames_df["y_1_offset"].shift(-1)
# # frames_df = frames_df[np.abs(frames_df["y_1_delta"]) < 0.1]
#
# # frames_df = frames_df[np.abs(frames_df["steering_angle"]) < 2.0]
len_after_filtering = len(frames_df)
camera_images = frames_df[f"{camera}_filename"].to_numpy()
frames_df["image_path"] = [str(dataset_path / image_path) for image_path in camera_images]
if self.output_modality == "waypoints":
for i in np.arange(1, self.n_waypoints + 1):
frames_df[f"wp{i}_all_x"] = frames_df[f"wp{i}_{camera}_x"]
frames_df[f"wp{i}_all_y"] = frames_df[f"wp{i}_{camera}_y"]
frames_df["camera_type"] = camera
print(f"{dataset_path}: lenght={len(frames_df)}, filtered={len_before_filtering-len_after_filtering}")
frames_df.reset_index(inplace=True)
return frames_df
def steering_angles_degrees(self):
return self.frames.steering_angle.to_numpy() / np.pi * 180
class NvidiaTrainDataset(NvidiaDataset):
def __init__(self, root_path, output_modality="steering_angle", n_branches=3, n_waypoints=6,
camera="front_wide", augment_conf=AugmentationConfig(), metadata_file="nvidia_frames.csv"):
self.dataset_paths = [
root_path / "2021-05-20-12-36-10_e2e_sulaoja_20_30",
root_path / "2021-05-20-12-43-17_e2e_sulaoja_20_30",
root_path / "2021-05-20-12-51-29_e2e_sulaoja_20_30",
root_path / "2021-05-20-13-44-06_e2e_sulaoja_10_10",
root_path / "2021-05-20-13-51-21_e2e_sulaoja_10_10",
root_path / "2021-05-20-13-59-00_e2e_sulaoja_10_10",
root_path / "2021-05-28-15-07-56_e2e_sulaoja_20_30",
root_path / "2021-05-28-15-17-19_e2e_sulaoja_20_30",
{'path': root_path / "2021-06-09-13-14-51_e2e_rec_ss2", 'start': 125, 'end': 49725},
{'path': root_path / "2021-06-09-13-55-03_e2e_rec_ss2_backwards", 'start': 150, 'end': 53625},
{'path': root_path / "2021-06-09-14-58-11_e2e_rec_ss3", 'start': 175, 'end': 43775},
{'path': root_path / "2021-06-09-15-42-05_e2e_rec_ss3_backwards", 'start': 100, 'end': 40625},
root_path / "2021-06-09-16-24-59_e2e_rec_ss13",
root_path / "2021-06-09-16-50-22_e2e_rec_ss13_backwards",
root_path / "2021-06-10-12-59-59_e2e_ss4",
root_path / "2021-06-10-13-19-22_e2e_ss4_backwards",
root_path / "2021-06-10-13-51-34_e2e_ss12",
root_path / "2021-06-10-14-02-24_e2e_ss12_backwards",
root_path / "2021-06-10-14-44-24_e2e_ss3_backwards",
root_path / "2021-06-10-15-03-16_e2e_ss3_backwards",
root_path / "2021-06-14-11-08-19_e2e_rec_ss14",
root_path / "2021-06-14-11-22-05_e2e_rec_ss14",
root_path / "2021-06-14-11-43-48_e2e_rec_ss14_backwards",
{'path': root_path / "2021-09-24-11-19-25_e2e_rec_ss10", 'start': 400, 'end': 34550},
{'path': root_path / "2021-09-24-11-40-24_e2e_rec_ss10_2", 'start': 150, 'end': 16000},
{'path': root_path / "2021-09-24-12-02-32_e2e_rec_ss10_3", 'start': 350, 'end': 8050},
root_path / "2021-09-24-12-21-20_e2e_rec_ss10_backwards",
root_path / "2021-09-24-13-39-38_e2e_rec_ss11",
{'path': root_path / "2021-09-30-13-57-00_e2e_rec_ss14", 'start': 100, 'end': 3200},
root_path / "2021-09-30-15-03-37_e2e_ss14_from_half_way",
root_path / "2021-09-30-15-20-14_e2e_ss14_backwards",
{'path': root_path / "2021-09-30-15-56-59_e2e_ss14_attempt_2", 'start': 80, 'end': 54600},
root_path / "2021-10-07-11-05-13_e2e_rec_ss3",
root_path / "2021-10-07-11-44-52_e2e_rec_ss3_backwards",
root_path / "2021-10-07-12-54-17_e2e_rec_ss4",
root_path / "2021-10-07-13-22-35_e2e_rec_ss4_backwards",
root_path / "2021-10-11-16-06-44_e2e_rec_ss2",
root_path / "2021-10-11-17-10-23_e2e_rec_last_part",
root_path / "2021-10-11-17-14-40_e2e_rec_backwards",
root_path / "2021-10-11-17-20-12_e2e_rec_backwards",
root_path / "2021-10-20-14-55-47_e2e_rec_vastse_ss13_17",
root_path / "2021-10-20-13-57-51_e2e_rec_neeruti_ss19_22",
root_path / "2021-10-20-14-15-07_e2e_rec_neeruti_ss19_22_back",
root_path / "2021-10-25-17-31-48_e2e_rec_ss2_arula",
root_path / "2021-10-25-17-06-34_e2e_rec_ss2_arula_back"
# '2021-11-08-11-24-44_e2e_rec_ss12_raanitsa.bag' \
# '2021-11-08-12-08-40_e2e_rec_ss12_raanitsa_backward.bag' \
]
tr = transforms.Compose([AugmentImage(augment_config=augment_conf), Normalize()])
super().__init__(self.dataset_paths, tr, camera=camera, output_modality=output_modality, n_branches=n_branches,
n_waypoints=n_waypoints, metadata_file=metadata_file)
class NvidiaValidationDataset(NvidiaDataset):
# todo: remove default parameters
def __init__(self, root_path, output_modality="steering_angle", n_branches=3, n_waypoints=6, camera="front_wide",
metadata_file="nvidia_frames.csv"):
self.dataset_paths = [
root_path / "2021-05-28-15-19-48_e2e_sulaoja_20_30",
root_path / "2021-06-07-14-20-07_e2e_rec_ss6",
root_path / "2021-06-07-14-06-31_e2e_rec_ss6",
root_path / "2021-06-07-14-09-18_e2e_rec_ss6",
root_path / "2021-06-07-14-36-16_e2e_rec_ss6",
root_path / "2021-09-24-14-03-45_e2e_rec_ss11_backwards",
root_path / "2021-10-26-10-49-06_e2e_rec_ss20_elva",
root_path / "2021-10-26-11-08-59_e2e_rec_ss20_elva_back",
root_path / "2021-10-20-15-11-29_e2e_rec_vastse_ss13_17_back",
{'path': root_path / "2021-10-11-14-50-59_e2e_rec_vahi", 'start': 100, 'end': 15000},
{'path': root_path / "2021-10-14-13-08-51_e2e_rec_vahi_backwards", 'start': 80, 'end': 13420}
]
tr = transforms.Compose([Normalize()])
super().__init__(self.dataset_paths, tr, camera=camera, output_modality=output_modality, n_branches=n_branches,
n_waypoints=n_waypoints, metadata_file=metadata_file)
class NvidiaWinterTrainDataset(NvidiaDataset):
def __init__(self, root_path, output_modality="steering_angle",
n_branches=3, n_waypoints=6, augment_conf=AugmentationConfig()):
train_paths = [
root_path / "2022-01-28-10-21-14_e2e_rec_peipsiaare_forward",
root_path / "2022-01-28-12-46-59_e2e_rec_peipsiaare_backward",
root_path / "2022-01-14-10-05-16_e2e_rec_raanitsa_forward",
root_path / "2022-01-14-10-50-05_e2e_rec_raanitsa_backward",
root_path / "2022-01-14-11-54-33_e2e_rec_kambja_forward2",
root_path / "2022-01-14-12-21-40_e2e_rec_kambja_forward2_continue",
root_path / "2022-01-14-13-09-05_e2e_rec_kambja_backward",
root_path / "2022-01-14-13-18-36_e2e_rec_kambja_backward_continue",
root_path / "2022-01-14-12-35-13_e2e_rec_neeruti_forward",
root_path / "2022-01-14-12-45-51_e2e_rec_neeruti_backward",
root_path / "2022-01-18-13-03-03_e2e_rec_arula_backward",
root_path / "2022-01-18-13-43-33_e2e_rec_otepaa_forward",
root_path / "2022-01-18-13-52-35_e2e_rec_otepaa_forward",
root_path / "2022-01-18-13-56-22_e2e_rec_otepaa_forward",
root_path / "2022-01-18-14-12-14_e2e_rec_otepaa_backward",
root_path / "2022-01-18-15-20-35_e2e_rec_kanepi_forward",
root_path / "2022-01-18-15-49-26_e2e_rec_kanepi_backwards",
]
tr = transforms.Compose([AugmentImage(augment_config=augment_conf), Normalize()])
super().__init__(train_paths, tr, output_modality=output_modality, n_branches=n_branches, n_waypoints=n_waypoints)
class NvidiaWinterValidationDataset(NvidiaDataset):
def __init__(self, root_path, output_modality="steering_angle", n_branches=3, n_waypoints=6):
valid_paths = [
root_path / "2022-01-18-12-37-01_e2e_rec_arula_forward",
root_path / "2022-01-18-12-47-32_e2e_rec_arula_forward_continue",
root_path / "2022-01-28-14-47-23_e2e_rec_elva_forward",
root_path / "2022-01-28-15-09-01_e2e_rec_elva_backward",
root_path / "2022-01-25-15-25-15_e2e_rec_vahi_forward",
root_path / "2022-01-25-15-34-01_e2e_rec_vahi_backwards",
]
tr = transforms.Compose([Normalize()])
super().__init__(valid_paths, tr, output_modality=output_modality, n_branches=n_branches, n_waypoints=n_waypoints)
| 44.716418
| 122
| 0.613961
| 2,907
| 20,972
| 4.099071
| 0.128311
| 0.06378
| 0.056395
| 0.040282
| 0.525848
| 0.454935
| 0.341725
| 0.27996
| 0.249245
| 0.212907
| 0
| 0.102562
| 0.257534
| 20,972
| 469
| 123
| 44.716418
| 0.662706
| 0.090359
| 0
| 0.168142
| 0
| 0
| 0.2395
| 0.177364
| 0
| 0
| 0
| 0.002132
| 0
| 1
| 0.058997
| false
| 0
| 0.035398
| 0.0059
| 0.162242
| 0.014749
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b076c6afbdbadd2bb1cb6f1a1f6c0c794d322733
| 7,206
|
py
|
Python
|
further/deployment.py
|
openfurther/further-open-fabric-deployment
|
d571d603dad43996c66d716c69a247547f6f0ba1
|
[
"Apache-2.0"
] | null | null | null |
further/deployment.py
|
openfurther/further-open-fabric-deployment
|
d571d603dad43996c66d716c69a247547f6f0ba1
|
[
"Apache-2.0"
] | null | null | null |
further/deployment.py
|
openfurther/further-open-fabric-deployment
|
d571d603dad43996c66d716c69a247547f6f0ba1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (C) [2013] [The FURTHeR Project]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from fabric.api import local, lcd, prompt
from os import walk
from os.path import join
from ConfigParser import ConfigParser
import fileinput
import sys
import string
import random
import re
def deployFurtherCore(environment):
"""Deploy further-core to a given environment where environment is represented as a folder with configuration. This command is meant to be run locally"""
version = prompt("FURTHeR version to deploy?")
config = _load_configuration(environment, 'further-core.cfg')
config['version'] = version
_replace_tokens('further-core/' + environment, config)
_deploy_further_configuration(environment)
def deployFurtherI2b2(environment):
"""Deploy further-core to a given environment where environment is represented as a folder with configuration. This command is meant to be run locally"""
config = _load_configuration(environment, 'further-i2b2.cfg')
_replace_tokens('further-i2b2/' + environment, config)
_deploy_i2b2_configuration(environment)
_deploy_further_i2b2_hook(environment)
_deploy_jboss_configuration(environment)
def _deploy_i2b2_configuration(environment):
"""Deploys the i2b2 configuration to the i2b2 server environment. This function is meant to be run locally and relies on $JBOSS_HOME, $TOMCAT_HOME, and SRC_HOME being configured"""
with lcd('further-i2b2'):
with lcd(environment):
with lcd('edu.harvard.i2b2.crc'):
local('cp *-ds.xml $JBOSS_HOME/server/default/deploy')
local('cp CRCApplicationContext.xml $JBOSS_HOME/server/default/conf/crcapp')
with lcd('edu.harvard.i2b2.crc.loader'):
local('cp CRCLoaderApplicationContext.xml $JBOSS_HOME/server/default/conf/crcloaderapp')
with lcd('edu.harvard.i2b2.ontology'):
local('cp *-ds.xml $JBOSS_HOME/server/default/deploy')
with lcd('edu.harvard.i2b2.pm'):
with lcd('database'):
local('cp hibernate.properties $TOMCAT_HOME/webapps/gridsphere/WEB-INF/CustomPortal/database')
with lcd('persistence'):
local('cp hibernate.properties $TOMCAT_HOME/webapps/default/WEB-INF/persistence')
local('cp secret.properties $TOMCAT_HOME/webapps/axis2/WEB-INF/classes/')
with lcd('edu.harvard.i2b2.workplace'):
local('cp *-ds.xml $JBOSS_HOME/server/default/deploy')
with lcd('i2b2-webclient'):
local('rm -rf /var/www/html/i2b2')
local('cp -R $SRC_HOME/i2b2-webclient/src/main/webapp/i2b2 /var/www/html')
local('cp i2b2config.ini.php /var/www/html/i2b2/includes')
def _deploy_further_i2b2_hook(environment):
"""Deploys the further-i2b2-hook that is responsible for sending i2b2 queries to be processed by FURTHeR. Relies on $JBOSS_HOME being configured"""
with lcd('further-i2b2'):
with lcd(environment):
with lcd('i2b2-hook'):
local('cp further.properties $JBOSS_HOME/server/default/deploy/i2b2.war/WEB-INF/classes')
# Remove old jars
with lcd('$JBOSS_HOME/server/default/deploy/i2b2.war/WEB-INF'):
local('rm -rf lib/core-*')
local('rm -rf lib/i2b2-hook-further*')
local('rm -rf lib/slf4j-*')
local('rm -rf lib/fqe-ds-api*')
with lcd('$SRC_HOME/i2b2-hook/i2b2-hook-further/target'):
tmp_dir = 'hook-tmp'
local('rm -rf ' + tmp_dir)
local('mkdir ' + tmp_dir)
local('cp i2b2-hook-further-bin.zip ' + tmp_dir);
with lcd(tmp_dir):
local('unzip i2b2-hook-further-bin.zip')
with lcd('i2b2-hook-further'):
local('mv *.jar $JBOSS_HOME/server/default/deploy/i2b2.war/WEB-INF/lib')
local('mv web.xml.further $JBOSS_HOME/server/default/deploy/i2b2.war/WEB-INF/web.xml')
def _deploy_jboss_configuration(environment):
with lcd('further-i2b2'):
with lcd(environment):
with lcd('jboss'):
with lcd('jmx-console'):
local('cp *.xml $JBOSS_HOME/server/default/deploy/jmx-console.war/WEB-INF')
with lcd('props'):
local('cp *.properties $JBOSS_HOME/server/default/conf/props')
with lcd('web-console'):
local('cp *.xml $JBOSS_HOME/server/default/deploy/management/console-mgr.sar/web-console.war/WEB-INF')
local('cp *.properties $JBOSS_HOME/server/default/deploy/management/console-mgr.sar/web-console.war/WEB-INF/classes')
def _deploy_further_configuration(environment):
"""Deploy the further-core configuration. Relies on $ESB_HOME being configured"""
with lcd('further-core'):
with lcd(environment):
local('cp *.cfg $ESB_HOME/etc')
local('cp *.properties $ESB_HOME/etc')
def _load_configuration(environment, path):
"""Loads a given configuration file specified by path and environment header (ini file).
returns a key value representing the configuration. Values enclosed in {} are automatically
decrypted using the $FURTHER_PASSWORD variable. Values that equal [RND] will be replaced with
a random string."""
# Read configuration file
parser = ConfigParser()
parser.read(path)
config = {}
for option in parser.options(environment):
value = parser.get(environment, option)
# Handle encrypted configuration
if (re.match(r'^\{.*\}$', value)):
encrypted_value = re.match(r'^\{(.*)\}$', value).group(1)
value = (local('decrypt.sh input="' + encrypted_value + '" password=$FURTHER_PASSWORD algorithm="PBEWithSHA1AndDESede" verbose="false"', capture=True))
# Handle random values
if (re.match(r'\[RND\]', value)):
value = _random_string()
config[option] = value;
return config
def _replace_tokens(path, config):
"""Recursively walks the given path and replaces any tokens (@value@) with
given values within the configuration"""
replace_tokens = config.keys()
for dirname, dirnames, filenames in walk(path):
for filename in filenames:
for line in fileinput.input(join(dirname, filename), inplace=True):
newline = line
for token in replace_tokens:
replace = '@' + token.upper() + '@'
if replace in line:
newline = line.replace(replace, config.get(token))
break
sys.stdout.write(newline)
def _random_string(characters=string.ascii_uppercase + string.ascii_lowercase + string.digits, size=32):
"""Generates a random string from all upper, lower, and digits"""
return ''.join(random.choice(characters) for x in range(size))
| 45.0375
| 181
| 0.684707
| 936
| 7,206
| 5.180556
| 0.272436
| 0.03609
| 0.040214
| 0.058981
| 0.306867
| 0.255723
| 0.222108
| 0.192823
| 0.192823
| 0.129099
| 0
| 0.014874
| 0.197613
| 7,206
| 159
| 182
| 45.320755
| 0.823763
| 0.250486
| 0
| 0.093458
| 0
| 0.065421
| 0.369352
| 0.226468
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084112
| false
| 0.009346
| 0.093458
| 0
| 0.196262
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b079078e2de1f33efb9b000ddc8a7958596a0333
| 1,461
|
py
|
Python
|
Ene-Jun-2021/aguilar-cedillo-jonathan-ivan/Examen Especial/app/api.py
|
jarmarj/DAS_Sistemas
|
36c876673e7abae503cc137c3f66585a0e45ed79
|
[
"MIT"
] | 41
|
2017-09-26T09:36:32.000Z
|
2022-03-19T18:05:25.000Z
|
Ene-Jun-2021/aguilar-cedillo-jonathan-ivan/Examen Especial/app/api.py
|
jarmarj/DAS_Sistemas
|
36c876673e7abae503cc137c3f66585a0e45ed79
|
[
"MIT"
] | 67
|
2017-09-11T05:06:12.000Z
|
2022-02-14T04:44:04.000Z
|
Ene-Jun-2021/aguilar-cedillo-jonathan-ivan/Examen Especial/app/api.py
|
jarmarj/DAS_Sistemas
|
36c876673e7abae503cc137c3f66585a0e45ed79
|
[
"MIT"
] | 210
|
2017-09-01T00:10:08.000Z
|
2022-03-19T18:05:12.000Z
|
from flask import Flask, render_template, jsonify
from mongo import mongo
import json
# from persona import persona
import time
app = Flask(__name__, static_url_path='/static')
cliente=mongo()
peoples=cliente['mydb']['people'].find()
def parsePerson(person):
return {
'id':person['id'],
'first_name':person['first_name'],
'last_name':person['last_name'],
'company':person['company'],
'email':person['email'],
'ip_address':person['ip_address'],
'phone_number':person['phone_number']
}
def getPeoplesFromDB(peoples):
x = []
for p in peoples:
x.append(
parsePerson(p)
)
return x
def getPersonFromDB(id):
person=cliente['mydb']['people'].find_one({"id": str(id)})
return parsePerson(person)
x=getPeoplesFromDB(peoples)
@app.route('/')
def index():
return render_template('index.html',personas =x)
@app.route('/people')
def people():
return jsonify(x)
@app.route('/people/<string:idPerson>')
def heroById(idPerson):
if(idPerson.isdigit()):
if(0<int(idPerson)<1001):
try:
persona=getPersonFromDB(idPerson)
return persona
except:
return jsonify({'status':500})
return jsonify({'status':500})
if __name__ == "__main__":
app.run(host="0.0.0.0", port=7777)
| 25.631579
| 63
| 0.576318
| 160
| 1,461
| 5.10625
| 0.39375
| 0.029376
| 0.041616
| 0.051408
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017958
| 0.275838
| 1,461
| 57
| 64
| 25.631579
| 0.754253
| 0.01848
| 0
| 0.043478
| 0
| 0
| 0.151779
| 0.018155
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.086957
| 0.065217
| 0.391304
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b07eaf9c823c7f4577dd939bb64e4ed246d82116
| 1,775
|
py
|
Python
|
tests/test_main.py
|
openalto/alto
|
294f4e1d45d5e3f0cc476a2f0cbb85164c7d32ae
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
openalto/alto
|
294f4e1d45d5e3f0cc476a2f0cbb85164c7d32ae
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
openalto/alto
|
294f4e1d45d5e3f0cc476a2f0cbb85164c7d32ae
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2021 OpenALTO Community
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# - Jensen Zhang <jingxuan.n.zhang@gmail.com>
import pytest
from alto.main import fib, main
__author__ = "OpenALTO"
__copyright__ = "OpenALTO"
__license__ = "MIT"
def test_fib():
"""API Tests"""
assert fib(1) == 1
assert fib(2) == 1
assert fib(7) == 13
with pytest.raises(AssertionError):
fib(-10)
def test_main(capsys):
"""CLI Tests"""
# capsys is a pytest fixture that allows asserts agains stdout/stderr
# https://docs.pytest.org/en/stable/capture.html
main(["7"])
captured = capsys.readouterr()
assert "The 7-th Fibonacci number is 13" in captured.out
| 34.134615
| 80
| 0.729014
| 261
| 1,775
| 4.904215
| 0.555556
| 0.06875
| 0.020313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012448
| 0.185352
| 1,775
| 51
| 81
| 34.803922
| 0.872752
| 0.727324
| 0
| 0
| 0
| 0
| 0.114094
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.133333
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0808b108456f6e159e093505f69f5f9b21e00dc
| 2,501
|
py
|
Python
|
tradedate.py
|
Anton-Mu/finance_sentiment_analysis
|
e319073646f8b11a3f6b5140137a7f0205918c19
|
[
"MIT"
] | null | null | null |
tradedate.py
|
Anton-Mu/finance_sentiment_analysis
|
e319073646f8b11a3f6b5140137a7f0205918c19
|
[
"MIT"
] | null | null | null |
tradedate.py
|
Anton-Mu/finance_sentiment_analysis
|
e319073646f8b11a3f6b5140137a7f0205918c19
|
[
"MIT"
] | 1
|
2022-02-08T06:11:51.000Z
|
2022-02-08T06:11:51.000Z
|
import re
import requests
import json
# 从百度的php接口中获取到数据
def catch_url_from_baidu(calcultaion_year, month):
header = {
"Content-Type": "application/json;charset=UTF-8"
}
param = {
"query": str(calcultaion_year) + "年" + month + "月",
"resource_id": "39043",
"t": "1604395059555",
"ie": "utf8",
"oe": "gbk",
"format": "json",
"tn": "wisetpl",
"cb": ""
}
# 抓取位置:百度搜索框搜索日历,上面的日历的接口,可以在页面上进行核对
r = requests.get(url="https://sp0.baidu.com/8aQDcjqpAAV3otqbppnN2DJv/api.php",
headers=header, params=param).text
month_data = json.loads(r)["data"][0]["almanac"]
work_day = []
for one in month_data:
if (one["cnDay"] != '日' and one["cnDay"] != '六'
and ('status' not in one)):
work_day.append(one)
work_days = output_info(work_day)
return work_days
# 输出格式,可以修改成insert语句进行输出
def output_info(work_day):
work_days = []
for one in work_day:
date = one["year"] + '-' + one["month"] + '-' + one["day"]
work_days.append(date)
return work_days
# 先抓取全年交易日历,再提取需要的时间段
def trade_date(start_year, start_month, start_day, end_year, end_month, end_day):
# 此处只能算当年之前的,因为国务院是每年12月份才会发布第二年的放假计划,所以此接口对于下一年的统计是错的。
# eg:2020年11月4日,国务院没有发布21年的放假计划,那查询2021年元旦的时候,元旦那天不显示休息
tradedates = []
for year in range(start_year, end_year + 1):
calculation_year = year
# 因该接口传入的时间,查询了前一个月,当前月和后一个月的数据,所以只需要2、5、8、11即可全部获取到。比如查询5月份,则会查询4,5,6月分的数据
calculation_month = ["2", "5", "8", "11"]
for one_month in calculation_month:
work_days = catch_url_from_baidu(calculation_year, one_month)
for work_day in work_days:
tradedates.append(work_day)
start_date = str(start_year) + "-" + str(start_month) + "-" + str(start_day)
end_date = str(end_year) + "-" + str(end_month) + "-" + str(end_day)
for i in range(len(tradedates)):
if start_date == tradedates[i]:
start_num = i
elif end_date == tradedates[i]:
end_num = i
try:
date_get = tradedates[start_num:end_num]
return date_get
except:
print("输入的数字不合规或不在工作日范围内。")
print("起始日期应在如下日期中:", tradedates)
# 分别按年月日输入起始的工作日日期,一位数不需要加0
if __name__ == '__main__':
date_get = trade_date(2021, 11, 1, 2022, 1, 28)
print(date_get)
| 30.13253
| 84
| 0.587765
| 294
| 2,501
| 4.765306
| 0.411565
| 0.034975
| 0.017131
| 0.024268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038932
| 0.281088
| 2,501
| 82
| 85
| 30.5
| 0.740267
| 0.120352
| 0
| 0.034483
| 0
| 0
| 0.121313
| 0.014272
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0
| 0.051724
| 0
| 0.155172
| 0.051724
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b080d8609691c7b1384b852df88e37fc68699acb
| 1,377
|
py
|
Python
|
config/trainer_configs/model_config/mnist_conv_auto.py
|
jwspaeth/FAA-Project
|
afa9d3bec10deead48c4b17dff69df2e02691e41
|
[
"MIT"
] | null | null | null |
config/trainer_configs/model_config/mnist_conv_auto.py
|
jwspaeth/FAA-Project
|
afa9d3bec10deead48c4b17dff69df2e02691e41
|
[
"MIT"
] | 2
|
2019-10-20T00:42:40.000Z
|
2019-10-30T18:06:11.000Z
|
config/trainer_configs/model_config/mnist_conv_auto.py
|
jwspaeth/FAA-Project
|
afa9d3bec10deead48c4b17dff69df2e02691e41
|
[
"MIT"
] | null | null | null |
from yacs.config import CfgNode as CN
from lib.classes.dataset_classes.SubjectDataset import SubjectDataset
from config.model_helper import get_size_input
_C = CN()
_C.framework = "Keras"
_C.model_type = "ConvAutoencoder"
_C.build_type = "subclass"
# Define encoder parameters
_C.Encoder = CN()
_C.Encoder.n_filters_list = [32, 32, 16, 16, 8, 8]
_C.Encoder.kernel_size_list = [(3, 3), (2, 2), (3, 3), (2, 2), (3, 3), (2, 2)]
_C.Encoder.activation_type_list = ["elu", "elu", "elu", "elu", "elu", "sigmoid"]
_C.Encoder.n_strides_list = [(2, 2), (1, 1), (2, 2), (1, 1), (2, 2), (1, 1)]
_C.Encoder.padding_list = ["same", "same", "same", "same", "same", "same"]
# Define encoder parameters
_C.Decoder = CN()
_C.Decoder.n_filters_list = [8, 16, 32, 1]
_C.Decoder.kernel_size_list = [(2, 2), (3, 3), (3, 3), (2, 2)]
_C.Decoder.activation_type_list = ["elu", "elu", "elu", "elu", "elu", "sigmoid"]
_C.Decoder.n_strides_list = [(2, 2), (2, 2), (2, 2), (1,1)]
_C.Decoder.padding_list = ["same", "same", "same", "same", "same", "same"]
_C.Decoder.output_padding = [(1, 1), None, None, None]
# Define noise parameters
_C.Noise = CN()
_C.Noise.dummy_val = 0
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _C.clone()
| 35.307692
| 80
| 0.659405
| 227
| 1,377
| 3.784141
| 0.330396
| 0.030268
| 0.111758
| 0.111758
| 0.251455
| 0.204889
| 0.204889
| 0.204889
| 0.09546
| 0.09546
| 0
| 0.05017
| 0.14597
| 1,377
| 38
| 81
| 36.236842
| 0.680272
| 0.173566
| 0
| 0
| 0
| 0
| 0.106477
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.125
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b080e30a075631abb8736fd8c977482b5bcb3a76
| 577
|
py
|
Python
|
src/graphviz/graphviz_5.py
|
lrodrin/TFG
|
b4199b5970deb7b7394ecac1c950acaa3cfde695
|
[
"MIT"
] | 2
|
2017-01-16T12:08:34.000Z
|
2017-01-16T13:00:12.000Z
|
src/graphviz/graphviz_5.py
|
lrodrin/TFG
|
b4199b5970deb7b7394ecac1c950acaa3cfde695
|
[
"MIT"
] | null | null | null |
src/graphviz/graphviz_5.py
|
lrodrin/TFG
|
b4199b5970deb7b7394ecac1c950acaa3cfde695
|
[
"MIT"
] | null | null | null |
import pydot
callgraph = pydot.Dot(graph_type='digraph', fontname="Verdana", compound='true')
cluster_foo = pydot.Cluster('foo', label='foo')
callgraph.add_subgraph(cluster_foo)
node_foo = pydot.Node('foo_method_1', label='method_1')
cluster_foo.add_node(node_foo)
cluster_bar = pydot.Cluster('bar', label='Component1')
callgraph.add_subgraph(cluster_bar)
node_bar = pydot.Node('bar_method_a')
cluster_bar.add_node(node_bar)
callgraph.add_edge(pydot.Edge(node_foo, node_bar, ltail=cluster_foo.get_name(), lhead=cluster_bar.get_name()))
callgraph.write('graphviz_5.dot')
| 28.85
| 110
| 0.785095
| 89
| 577
| 4.775281
| 0.337079
| 0.117647
| 0.094118
| 0.127059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007421
| 0.065858
| 577
| 19
| 111
| 30.368421
| 0.781076
| 0
| 0
| 0
| 0
| 0
| 0.143847
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0833cf5f5755370727e1adbc3f27b3a23bd86c3
| 12,544
|
py
|
Python
|
lfa.py
|
jwbensley/pyFRR
|
7f5792586b528c9f7c0d84f263eaae5f1c718661
|
[
"MIT"
] | 2
|
2021-08-12T06:57:59.000Z
|
2021-09-09T19:28:04.000Z
|
lfa.py
|
jwbensley/pyFRR
|
7f5792586b528c9f7c0d84f263eaae5f1c718661
|
[
"MIT"
] | 1
|
2021-11-19T16:32:54.000Z
|
2021-11-19T16:32:54.000Z
|
lfa.py
|
jwbensley/pyFRR
|
7f5792586b528c9f7c0d84f263eaae5f1c718661
|
[
"MIT"
] | null | null | null |
import networkx as nx
import os
from diagram import Diagram
from spf import spf
class lfa:
"""This class provides RFC5286 lfa calculations"""
def __init__(self, debug=0):
"""
Init the lfa class.
:param int debug: debug level, 0 is disabled.
:return None: __init__ shouldn't return anything
:rtype: None
"""
self.debug = debug
self.diagram = Diagram(debug=self.debug)
self.path_types = ["lfas_dstream", "lfas_link", "lfas_node"]
self.spf = spf(debug=self.debug)
def draw(self, graph, outdir, topology):
"""
Loop over the generated topologies and render them as diagram files.
:param networkx.Graph graph: NetworkX graph object
:param str outdir: string of the root output directory path
:param dict topology: topology paths dict
:return bool True: True if all diagrams rendered otherwise False
:rtype: bool
"""
self.diagram.gen_sub_dirs(graph, outdir, self.path_types, topology)
for src, dst in [
(s, d) for d in graph.nodes for s in graph.nodes if s != d
]:
for path_type in self.path_types:
if path_type not in topology[src][dst]:
continue
if len(topology[src][dst][path_type]) > 0:
frr_graph = graph.copy()
# Highlight the failed first-hop link(s) as red
for path in topology[src][dst]["spf_metric"]:
frr_graph = self.diagram.highlight_fh_link(
"red",
frr_graph,
path,
)
# Highlight the failed first-hop node(s) as red
if path_type == "lfas_dstream":
for path in topology[src][dst]["spf_metric"]:
frr_graph = self.diagram.highlight_fh_node(
"red",
frr_graph,
path,
)
elif path_type == "lfas_node":
for path in topology[src][dst]["spf_metric"]:
frr_graph = self.diagram.highlight_fh_node(
"red",
frr_graph,
path,
)
for path in topology[src][dst][path_type]:
frr_graph = self.diagram.highlight_links(
"green",
frr_graph,
path,
)
frr_graph = self.diagram.highlight_nodes(
"green",
frr_graph,
path,
)
frr_graph = self.diagram.highlight_src_dst(
"lightblue", dst, frr_graph, src
)
# Add labels to links showing their cost
frr_graph = self.diagram.label_link_weights(frr_graph)
self.diagram.gen_diagram(
(src + "_" + dst + "_" + path_type),
frr_graph,
os.path.join(outdir, src, path_type),
)
def gen_metric_paths(self, dst, graph, src):
"""
Return all lfa paths between the src and dst nodes in graph, based on
link metric (not hop count), which provide link, downstream, or node
protection, and return all alternate paths in a dict of lists keyed by
lfa path protection type.
:param str dst: Destination node name in graph
:param networkx.Graph graph: NetworkX graph object
:param str src: Source node name in graph
:return lfas: dict of lists keyed by lfa type
:rtype: dict
"""
lfas = {"lfas_link": [], "lfas_dstream": [], "lfas_node": []}
if self.debug > 0:
print(f"Calculating for lfa paths from {src} to {dst}")
s_d_paths = self.spf.gen_metric_paths(dst=dst, graph=graph, src=src)
# There are no paths between this src,dst pair
if not s_d_paths:
return lfas
# Loop over each neighbour to check if each one is an lfa candidate
for nei in graph.neighbors(src):
# If dst is directly connceted
if nei == dst:
continue
if self.debug > 1:
print(f"Checking for lfa paths via {nei}")
# This nei is the next-hop for the current best path(s)
if nei in [path[1] for path in s_d_paths]:
if self.debug > 1:
print(
f"Rejected lfas via next-hop {nei}, it is a next-hop "
f"in the current best path(s): {s_d_paths}"
)
continue
"""
ECMP may be used meaning src has multiple equal cost best paths to
dst. And/or, nei may have multiple equal cost best paths to dst.
Regardless, of the number of paths, they are the same cost, so only
check the cost of the first best path of src against the first best
path of nei.
"""
nh = s_d_paths[0][1]
try:
n_d_cost = nx.dijkstra_path_length(graph, source=nei, target=dst)
n_s_cost = nx.dijkstra_path_length(graph, source=nei, target=src)
s_d_cost = nx.dijkstra_path_length(graph, source=src, target=dst)
n_nh_cost = nx.dijkstra_path_length(graph, source=nei, target=nh)
nh_d_cost = nx.dijkstra_path_length(graph, source=nh, target=dst)
except nx.exception.NetworkXNoPath:
# There isn't connectivity between the nodes; src, dst, nh, nei
continue
if self.debug > 1:
print(
f"{nei} -> {dst}: {n_d_cost}\n"
f"{nei} -> {src}: {n_s_cost}\n"
f"{src} -> {dst}: {s_d_cost}\n"
f"{nei} -> {nh}: {n_nh_cost}\n"
f"{nh} -> {dst}: {nh_d_cost}"
)
link_prot = False
down_prot = False
node_prot = False
"""
RFC5286:
Inequality 1: Loop-Free Criterion
A neighbor N of source S can provide a loop-free alternate (lfa)
toward destination D, that is link protecting, iff:
Distance_opt(N, D) < Distance_opt(N, S) + Distance_opt(S, D)
In this scenario, N's cost to D is lower than N's cost to S + S's
cost to D, so N must have an alternative path to D not via S, but
S and N might be sharing the same next-hop router, and N simply
has another link to that shared next-hop router, so it is link
protecting only, for S's link to it's next-hop.
"""
if n_d_cost < (n_s_cost + s_d_cost):
if self.debug > 1:
print(
f"{nei} to {dst} < ({nei} to {src} + {src} to {dst}), "
f"{n_d_cost} < {n_s_cost+s_d_cost}"
)
# nei protects src against link failure to next-hop toward dst
link_prot = True
"""
RFC5286:
Inequality 2: Downstream Path Criterion
A neighbor N of source S can provide a loop-free alternate (lfa)
to downstream paths of D, which could be link or node protecting,
iff:
Distance_opt(N, D) < Distance_opt(S, D)
In this scenario, N's cost to D is lower than S's so N won't route
back to S. This is basic loop avoidance gaurenteed but it doesn't
restrict the lfa path to be link protecting or node protecting.
This scenario is usually used to provide protection for a specific
downstream prefix of node D rather than S's next-hop node or link
toward D.
"""
if n_d_cost < (s_d_cost):
if self.debug > 1:
print(
f"{nei} to {dst} < {src} to {dst}: "
f"{n_d_cost} < {n_s_cost}"
)
# nei protects src against failure of link or node toward dst
down_prot = True
"""
RFC5286:
Inequality 3: Criteria for a Node-Protecting Loop-Free Alternate
For an alternate next-hop N to protect against node failure of a
primary neighbor E for destination D, N must be loop-free with
respect to both E and D.
Distance_opt(N, D) < Distance_opt(N, E) + Distance_opt(E, D)
In this scenario, neighbour N of source router S, uses a different
next-hop router toward destination D, than router E which is S's
next-hop router toward D. This provides node protection against S's
next-hop router E.
"""
if n_d_cost < (n_nh_cost + nh_d_cost):
if self.debug > 1:
print(
f"{nei} to {dst} < ({nei} to {nh} + {nh} to {dst}), "
f"{n_d_cost} < {n_nh_cost+nh_d_cost}"
)
# nei protects src against next-hop node failure toward dst
node_prot = True
# nei might have multiple equal-cost best paths to dst
n_d_paths = self.spf.gen_metric_paths(
dst=dst, graph=graph, src=nei
)
for n_d_path in n_d_paths:
if link_prot:
# Append src to n_d_path because it starts from nei
if n_d_path[0] != src:
n_d_path.insert(0, src)
lfas["lfas_link"].append(n_d_path)
if self.debug > 1:
print(
f"New link protecting lfa from {src} to "
f"{dst} via {nei}, protects against link "
f"{src}-{nh}: {n_d_path}"
)
if down_prot:
# Append src to n_d_path because it starts from nei
if n_d_path[0] != src:
n_d_path.insert(0, src)
lfas["lfas_dstream"].append(n_d_path)
if self.debug > 1:
print(f"New downstream protecting lfa: {n_d_path}")
if node_prot:
"""
In order to protect pre-failure ECMP best-paths, check that
this node protecting path doesn't overlap with any of the
ECMP next-hop nodes
"""
s_d_fhs = [path[1] for path in s_d_paths]
overlap = [
fh
for fh in s_d_fhs
for n_d_path in n_d_paths
if fh in n_d_path
]
if overlap:
if self.debug > 1:
print(
f"lfa path {n_d_path} is not node protecting "
f"against {overlap} from {src} to {dst}"
)
continue
lfas["lfas_node"].append(n_d_path)
if self.debug > 1:
print(
f"New node protecting path from {src} to {dst} "
f"via {nei}, protects against node {nh}: "
f"{n_d_path}"
)
return lfas
def init_topo(self, graph, topo):
"""
Create empty dict keys for all possible paths this class can generate
:return None:
:rtype: None
"""
for src in graph.nodes:
for dst in graph.nodes:
if src == dst:
continue
for path_type in self.path_types:
if path_type not in topo[src][dst]:
topo[src][dst][path_type] = []
| 39.949045
| 81
| 0.479034
| 1,519
| 12,544
| 3.817643
| 0.156682
| 0.010347
| 0.016555
| 0.020693
| 0.383342
| 0.341438
| 0.310226
| 0.289878
| 0.247112
| 0.176237
| 0
| 0.005913
| 0.447226
| 12,544
| 313
| 82
| 40.076677
| 0.830401
| 0.137755
| 0
| 0.309942
| 0
| 0.011696
| 0.127963
| 0.002648
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023392
| false
| 0
| 0.023392
| 0
| 0.064327
| 0.064327
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0836df3831634d69eba10f383e0ec13d3b01887
| 4,523
|
py
|
Python
|
acapy_client/models/presentation_definition.py
|
dbluhm/acapy-client
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
[
"Apache-2.0"
] | 4
|
2021-08-05T09:20:34.000Z
|
2021-08-08T19:37:29.000Z
|
acapy_client/models/presentation_definition.py
|
dbluhm/acapy-client
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
[
"Apache-2.0"
] | null | null | null |
acapy_client/models/presentation_definition.py
|
dbluhm/acapy-client
|
d92ef607ba2ff1152ec15429f2edb20976991424
|
[
"Apache-2.0"
] | 2
|
2021-08-12T18:18:45.000Z
|
2021-08-14T13:22:28.000Z
|
from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.claim_format import ClaimFormat
from ..models.input_descriptors import InputDescriptors
from ..models.submission_requirements import SubmissionRequirements
from ..types import UNSET, Unset
T = TypeVar("T", bound="PresentationDefinition")
@attr.s(auto_attribs=True)
class PresentationDefinition:
""" """
format_: Union[Unset, ClaimFormat] = UNSET
id: Union[Unset, str] = UNSET
input_descriptors: Union[Unset, List[InputDescriptors]] = UNSET
name: Union[Unset, str] = UNSET
purpose: Union[Unset, str] = UNSET
submission_requirements: Union[Unset, List[SubmissionRequirements]] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
format_: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.format_, Unset):
format_ = self.format_.to_dict()
id = self.id
input_descriptors: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.input_descriptors, Unset):
input_descriptors = []
for input_descriptors_item_data in self.input_descriptors:
input_descriptors_item = input_descriptors_item_data.to_dict()
input_descriptors.append(input_descriptors_item)
name = self.name
purpose = self.purpose
submission_requirements: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.submission_requirements, Unset):
submission_requirements = []
for submission_requirements_item_data in self.submission_requirements:
submission_requirements_item = submission_requirements_item_data.to_dict()
submission_requirements.append(submission_requirements_item)
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if format_ is not UNSET:
field_dict["format"] = format_
if id is not UNSET:
field_dict["id"] = id
if input_descriptors is not UNSET:
field_dict["input_descriptors"] = input_descriptors
if name is not UNSET:
field_dict["name"] = name
if purpose is not UNSET:
field_dict["purpose"] = purpose
if submission_requirements is not UNSET:
field_dict["submission_requirements"] = submission_requirements
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
_format_ = d.pop("format", UNSET)
format_: Union[Unset, ClaimFormat]
if isinstance(_format_, Unset):
format_ = UNSET
else:
format_ = ClaimFormat.from_dict(_format_)
id = d.pop("id", UNSET)
input_descriptors = []
_input_descriptors = d.pop("input_descriptors", UNSET)
for input_descriptors_item_data in _input_descriptors or []:
input_descriptors_item = InputDescriptors.from_dict(input_descriptors_item_data)
input_descriptors.append(input_descriptors_item)
name = d.pop("name", UNSET)
purpose = d.pop("purpose", UNSET)
submission_requirements = []
_submission_requirements = d.pop("submission_requirements", UNSET)
for submission_requirements_item_data in _submission_requirements or []:
submission_requirements_item = SubmissionRequirements.from_dict(submission_requirements_item_data)
submission_requirements.append(submission_requirements_item)
presentation_definition = cls(
format_=format_,
id=id,
input_descriptors=input_descriptors,
name=name,
purpose=purpose,
submission_requirements=submission_requirements,
)
presentation_definition.additional_properties = d
return presentation_definition
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| 36.475806
| 110
| 0.668804
| 494
| 4,523
| 5.827935
| 0.153846
| 0.138937
| 0.055575
| 0.031261
| 0.255644
| 0.159083
| 0.07711
| 0.045155
| 0.033345
| 0.033345
| 0
| 0
| 0.244307
| 4,523
| 123
| 111
| 36.772358
| 0.842305
| 0
| 0
| 0.085106
| 0
| 0
| 0.031222
| 0.015058
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074468
| false
| 0
| 0.06383
| 0.031915
| 0.276596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b085477d012b84ae2bc4a7c010866eb15378b909
| 12,344
|
py
|
Python
|
aiodata/server.py
|
Exahilosys/aiodata
|
b4e475eac055f5f391219ae8b893a3e508b51fd8
|
[
"MIT"
] | 1
|
2021-11-03T13:06:21.000Z
|
2021-11-03T13:06:21.000Z
|
aiodata/server.py
|
Exahilosys/aiodata
|
b4e475eac055f5f391219ae8b893a3e508b51fd8
|
[
"MIT"
] | null | null | null |
aiodata/server.py
|
Exahilosys/aiodata
|
b4e475eac055f5f391219ae8b893a3e508b51fd8
|
[
"MIT"
] | null | null | null |
"""
Launch a proxy for tranforming `/paths/like/these` to PostgREST filters.
Usage:
aiodata -h | --help
aiodata <file> [--host=<str>] [--port=<int>] [--query=<str>] [--state=<str>]
aiodata [--db-uri=<uri>] [--pr-uri=<uri>] [--host=<str>] [--port=<int>] [--schema=<str>] [--secret=<str>] [--query=<str>] [--state=<str>]
Options:
-h --help Show this screen.
file Path to the `.conf` file for PostgREST.
--db-uri=<uri> Uri to the PostgreSQL database. [default: postgres://admin@localhost/postgres]
--pr-uri=<uri> Uri to the PostgREST server. [default: http://localhost:3000]
--host=<str> Host to launch the proxy at. [default: localhost]
--port=<int> Port to launch the proxy at. [default: 4000]
--schema=<str> The exposed schema to describe. [default: api]
--secret=<str> Authenticates websocket tokens (claims dont matter).
--query=<str> Routing path to expose queries at. [default: /query]
--state=<str> Routing path to expose websockets at if applicable. [default: /state]
Queries are proxy-adjusted requests whose paths get trasformed to filters.
For example, `/table/val1/val2` turns into `/table?clm1=eq.val1&clm2=eq.val2`.
- There is no way to specify the returned columns.
- All responses use the `Prefer: return=representation` header.
- Binary values are not supported. Covert to base64 in the databse.
Websocket connections are iniated through `/state`.
Authorization is only enforced if `secret` is present. Claims are irrelevant.
Json data is sent upon any successful POST, PATCH or DELETE.
The payload itself is a 4-item array:
1: Name of the request method.
2: Name of the affected table.
3: Query used for this operation, eg {"clm1": "val1", "clm2": "val2"}.
4: The entries returned from the PostgREST response.
Send a `SIGUSR1` signal to reload the schema upon changes.
"""
import asyncio
import asyncpg
import aiohttp
import aiohttp.web
import yarl
import os
import aiofiles
import collections
import itertools
import jwt
import signal
import json
import warnings
import sys
import docopt
import configparser
import io
__all__ = ()
def connect(uri):
return asyncpg.create_pool(
host = uri.host,
port = uri.port,
user = uri.user,
password = uri.password,
database = uri.parts[1]
)
_NOTIFY = {'POST', 'PATCH', 'DELETE'}
_HDRS_PASS = {'Authorization', 'Range', 'Content-Type'}
_HDRS_SKIP = {'Content-Type'}
_anon = object()
class Server:
"""
Main means of launching the server proxy.
:param asyncpg.pool.Pool pool:
The connection pool.
:param str origin:
The PostgreSQL database uri.
:param str target:
The address to connect to.
:param str schema:
The schema exposed by PostgREST.
"""
__slots__ = ('_pool', '_session', '_origin', '_schema', '_script',
'_details', '_primaries', '_secret', '_websockets', '_ready')
path = '/{steps:.+}'
def __init__(self, pool, origin, schema, secret = None):
self._pool = pool
self._session = None
self._origin = origin
self._schema = schema
self._script = None
self._details = None
self._primaries = None
self._secret = secret
self._websockets = collections.defaultdict(list)
self._ready = asyncio.Event()
@property
def details(self):
return self._details
@property
def ready(self):
return self._ready
def _resolve_path(self, path):
"""
Get query and tables.
"""
(table, *values) = path.split('/')
names = self._primaries.get(table, ())
query = tuple(zip(names, values))
return (table, query)
def _resolve_query(self, query):
"""
Get PostgREST filter.
"""
return {name: f'eq.{value}' for (name, value) in query}
def _auth(self, headers):
token = headers.get('Authorization')
if self._secret and token:
token = token.split(' ')[-1] # - Bearer
claims = jwt.decode(token, self._secret)
return claims['role']
return _anon
async def query(self, request):
"""
Handle requests to querying the database.
"""
await self._ready.wait()
method = request.method
headers = request.headers.copy()
for key in tuple(headers.keys()):
if key in _HDRS_PASS:
continue
del headers[key]
headers['Prefer'] = 'return=representation'
path = request.match_info['steps']
(table, query) = self._resolve_path(path)
params = self._resolve_query(query)
uri = self._origin.with_path(table)
data = request.content
response = await self._session.request(
method,
uri,
params = params,
headers = headers,
data = data
)
if 200 <= response.status <= 201 and method in _NOTIFY:
entries = await response.json()
try:
(names, values) = zip(*query)
except ValueError:
values = ()
payload = json.dumps((method, table, values, entries))
apply = lambda websocket: websocket.send_str(payload)
try:
role = self._auth(headers)
except jwt.InvalidSignatureError:
warnings.warn('Secret could not validate accepted token.')
else:
websockets = self._websockets[role]
await asyncio.gather(*map(apply, websockets))
data = json.dumps(entries).encode()
else:
data = response.content
response = aiohttp.web.Response(
body = data,
headers = response.headers,
status = response.status,
)
response.enable_compression()
response.enable_chunked_encoding()
return response
async def state(self, request, id = None):
"""
Handle requests for connecting to the database.
"""
try:
role = self._auth(request.headers)
except jwt.InvalidSignatureError:
raise aiohttp.web.HTTPUnauthorized(reason = 'Invalid token.')
websockets = self._websockets[role]
websocket = aiohttp.web.WebSocketResponse(heartbeat = 30)
await websocket.prepare(request)
websockets.append(websocket)
try:
async for message in websocket:
pass # receiving does nothing
finally:
websockets.remove(websocket)
return websocket
async def describe(self):
"""
Create the schema description.
"""
self._ready.clear()
entries = await self._pool.fetch(self._script)
details = collections.defaultdict(dict)
primaries = collections.defaultdict(list)
for entry in map(dict, entries):
table = entry.pop('table')
field = entry.pop('field')
details[table][field] = entry
if entry['main']:
primaries[table].append(field)
self._details = dict(details)
self._primaries = dict(primaries)
self._ready.set()
async def _load(self, name = 'schema.psql'):
"""
Get the description script.
"""
path = os.path.realpath(__file__)
directory = os.path.dirname(path)
path = os.path.join(directory, name)
async with aiofiles.open(path) as file:
template = await file.read()
self._script = template.format(self._schema)
async def _setup(self):
self._session = aiohttp.ClientSession(skip_auto_headers = _HDRS_SKIP)
async def start(self):
"""
Start the client.
"""
await self._load()
await self._setup()
await self.describe()
async def stop(self):
"""
Stop the client.
"""
await self._session.close()
apply = lambda websocket: websocket.close()
websockets = itertools.chain.from_iterable(self._websockets.values())
await asyncio.gather(*map(apply, websockets))
self._websockets.clear()
async def make(pool,
uri ,
schema = 'api',
secret = None,
query = '/query',
state = '/state'):
routes = aiohttp.web.RouteTableDef()
server = Server(pool, uri, schema, secret = secret)
path = query + server.path
for verb in ('GET', 'POST', 'PATCH', 'DELETE'):
routes.route(verb, path)(server.query)
async def handle(request):
await server.ready.wait()
return aiohttp.web.json_response(server.details)
routes.route('GET', '/')(handle)
routes.route('GET', state)(server.state)
return (routes, server)
async def main(app, db_uri, pr_uri, host, port, **options):
"""
Start the proxy.
:param str db_uri:
URL for the PostgreSQL database.
:param str pr_uri:
URL for the PostgREST server.
:param str host:
Host to launch the proxy at.
:param int port:
Port to launch the proxy at.
:param str schema:
The exposed schema.
:param str secret:
Used for authenticating websocket tokens and use their ``role`` claim.
:param str query:
The path to expose queries at.
:param str state:
The path to expose websockets at if applicable.
"""
loop = asyncio.get_event_loop()
db_uri = yarl.URL(db_uri)
pool = await connect(db_uri)
pr_uri = yarl.URL(pr_uri)
(routes, server) = await make(pool, pr_uri, **options)
app.router.add_routes(routes)
reload = lambda: asyncio.ensure_future(server.describe())
loop.add_signal_handler(signal.SIGUSR1, reload)
await server.start()
runner = aiohttp.web.AppRunner(app)
await runner.setup()
site = aiohttp.web.TCPSite(runner, host, port)
await site.start()
try:
await loop.create_future()
except asyncio.CancelledError:
pass
await server.stop()
await site.stop()
await runner.cleanup()
def serve(env_prefix = 'AIODT_'):
"""
Console functionality.
"""
args = docopt.docopt(__doc__, argv = sys.argv[1:])
def geta(key):
try:
conkey = key.lstrip('-').replace('-', '_').upper()
return os.environ[env_prefix + conkey]
except KeyError:
pass
return args[key]
pr_uri = yarl.URL(geta('--pr-uri'))
path = args['<file>']
if path:
config = configparser.ConfigParser()
with open(path) as file:
data = file.read()
head = '_'
data = f'[{head}]\n{data}'
config.read_string(data)
config = config[head]
def getf(key, default = None):
try:
value = config[key]
except KeyError:
return default
return value.strip('"')
db_uri = getf('db-uri')
schema = getf('db-schema')
secret = getf('jwt-secret')
host = getf('server-host', None)
if host:
pr_uri = pr_uri.with_host(host)
port = getf('server-port', None)
if port:
pr_uri = pr_uri.with_port(int(port))
else:
db_uri = geta('--db-uri')
schema = geta('--schema')
secret = geta('--secret')
host = geta('--host')
port = geta('--port')
port = int(port)
query = geta('--query')
state = geta('--state')
loop = asyncio.get_event_loop()
app = aiohttp.web.Application()
task = loop.create_task(
main(
app, db_uri, pr_uri, host, port,
schema = schema, secret = secret,
query = query, state = state
)
)
try:
loop.run_until_complete(task)
except KeyboardInterrupt:
pass
task.cancel()
try:
loop.run_until_complete(task)
except asyncio.CancelledError:
pass
| 26.660907
| 141
| 0.577041
| 1,383
| 12,344
| 5.048445
| 0.248735
| 0.010026
| 0.006875
| 0.009166
| 0.076769
| 0.05328
| 0.026926
| 0.007161
| 0
| 0
| 0
| 0.004462
| 0.310029
| 12,344
| 462
| 142
| 26.718615
| 0.815311
| 0.196209
| 0
| 0.135135
| 0
| 0
| 0.051929
| 0.002355
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03861
| false
| 0.030888
| 0.065637
| 0.011583
| 0.173745
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0889dcb4a9fc459f520ba21dc747165ea106830
| 838
|
py
|
Python
|
urldownload/DataOutput.py
|
MisterZhouZhou/pythonLearn
|
8933c7a6d444d3d86a173984e6cf4c08dbf84039
|
[
"Apache-2.0"
] | 1
|
2019-07-09T09:59:39.000Z
|
2019-07-09T09:59:39.000Z
|
urldownload/DataOutput.py
|
MisterZhouZhou/pythonLearn
|
8933c7a6d444d3d86a173984e6cf4c08dbf84039
|
[
"Apache-2.0"
] | null | null | null |
urldownload/DataOutput.py
|
MisterZhouZhou/pythonLearn
|
8933c7a6d444d3d86a173984e6cf4c08dbf84039
|
[
"Apache-2.0"
] | null | null | null |
# coding:utf-8
import codecs
class DataOutput(object):
def __init__(self):
self.datas = []
def store_data(self, data):
if data is None:
return
self.datas.append(data)
def output_html(self, path, data):
fout = codecs.open(path, 'w+', encoding='utf-8')
fout.write('<html>')
fout.write('<body>')
for t_data in data:
fout.write(str(t_data))
fout.write('</body>')
fout.write('</html>')
fout.close()
# def output_html(self, **kwargs):
# fout = codecs.open('baike.html', 'w', encoding='utf-8')
# fout.writer('<html>')
# fout.writer('<body>')
# for data in self.datas:
# fout.writer(data)
# fout.writer('</body>')
# fout.writer('</html>')
# fout.close()
| 26.1875
| 65
| 0.516706
| 101
| 838
| 4.19802
| 0.356436
| 0.106132
| 0.061321
| 0.080189
| 0.080189
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005172
| 0.307876
| 838
| 32
| 66
| 26.1875
| 0.725862
| 0.336516
| 0
| 0
| 0
| 0
| 0.06044
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.058824
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b089aca6a94743358087e5f0bbece897c4de4d3c
| 2,600
|
py
|
Python
|
ga4gh/htsget/compliance/schema_validator.py
|
jmtcsngr/htsget-compliance
|
35521f0b1361307f887b22c559823f1ba9dd8052
|
[
"Apache-2.0"
] | 1
|
2020-01-22T17:11:59.000Z
|
2020-01-22T17:11:59.000Z
|
ga4gh/htsget/compliance/schema_validator.py
|
jmtcsngr/htsget-compliance
|
35521f0b1361307f887b22c559823f1ba9dd8052
|
[
"Apache-2.0"
] | null | null | null |
ga4gh/htsget/compliance/schema_validator.py
|
jmtcsngr/htsget-compliance
|
35521f0b1361307f887b22c559823f1ba9dd8052
|
[
"Apache-2.0"
] | 2
|
2020-02-06T10:29:16.000Z
|
2020-02-10T09:59:54.000Z
|
# -*- coding: utf-8 -*-
"""Validates htsget response matches JSON schema"""
import inspect
import json
import os
from jsonschema import validate
from jsonschema import RefResolver
from jsonschema.exceptions import ValidationError
from ga4gh.htsget.compliance.config import constants as c
class SchemaValidator(object):
"""Validates htsget response matches JSON schema
Attributes:
SUCCESS (int): constant. indicates successful validation
FAILURE (int): constant. indicates unsuccessful validation
schema_file (str): filename containing JSON schema
schema_dir (str): path to local dir containing htsget JSON schemas
schema_path (str): full path to htsget response JSON schema file
resolver (RefResolver): resolves external references to the schema dir
schema_json (dict): loaded htsget response JSON schema
"""
SUCCESS = 1
FAILURE = -1
def __init__(self):
"""Instantiates a SchemaValidator object"""
self.schema_file = c.SCHEMA_HTSGET_RESPONSE
self.schema_dir = os.path.join(
os.path.dirname(
os.path.dirname(inspect.getmodule(self).__file__)
),
"schemas"
)
self.schema_path = os.path.join(self.schema_dir, self.schema_file)
self.resolver = RefResolver('file://{}/'.format(self.schema_dir), None)
self.schema_json = json.loads(open(self.schema_path, 'r').read())
def validate_instance(self, instance_json):
"""Validate a JSON object/response against the htsget response schema
Args:
instance_json (dict): loaded JSON object to validate
Returns:
dict: contains success/failure of validation, and message
"""
# setup validation object
# test status initialized as passing
validation_result = {
"status": SchemaValidator.SUCCESS,
"exception_class": "",
"message": ""
}
try:
# api method to compare json instance to the schema
validate(instance=instance_json, schema=self.schema_json,
resolver=self.resolver)
except ValidationError as e:
# if the api method raises an error, the result dictionary set
# to include failure status and error message
validation_result["status"] = SchemaValidator.FAILURE
validation_result["exception_class"] = str(e.__class__.__name__)
validation_result["message"] = e.message
return validation_result
| 35.616438
| 79
| 0.65
| 286
| 2,600
| 5.762238
| 0.342657
| 0.054612
| 0.023665
| 0.036408
| 0.048544
| 0.048544
| 0
| 0
| 0
| 0
| 0
| 0.002112
| 0.271538
| 2,600
| 72
| 80
| 36.111111
| 0.868004
| 0.398077
| 0
| 0
| 0
| 0
| 0.050929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.2
| 0
| 0.371429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b08ef2d8e3033532d604bc5d0f9f77203768c894
| 16,116
|
py
|
Python
|
monitor_ed/ed_monitor.py
|
Andywang201605/astro_tools
|
e1ceaeaa17d1391cb2d7f1eb15f9bdeced42b534
|
[
"MIT"
] | null | null | null |
monitor_ed/ed_monitor.py
|
Andywang201605/astro_tools
|
e1ceaeaa17d1391cb2d7f1eb15f9bdeced42b534
|
[
"MIT"
] | null | null | null |
monitor_ed/ed_monitor.py
|
Andywang201605/astro_tools
|
e1ceaeaa17d1391cb2d7f1eb15f9bdeced42b534
|
[
"MIT"
] | 1
|
2021-02-15T06:00:47.000Z
|
2021-02-15T06:00:47.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 25 18:17:13 2021
@author: AndyWang
"""
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from datetime import datetime
from time import sleep
import pandas as pd
import numpy as np
import os
import subprocess
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
def _start_webdriver(browser, driverpath):
'''
Function to start a webdriver
Parameters
----------
browser : str
Type of the browser you want to open
driverpath : str
Path of the driver.
Returns
-------
selenium.webdriver
Webdriver object for further usage
'''
if browser.lower() == 'edge':
return webdriver.Edge(executable_path=driverpath)
elif browser.lower() == 'chrome':
return webdriver.Chrome(executable_path=driverpath)
else:
raise NotImplementedError(f'Code for {browser} is not implemented')
def _open_browser_cmd(port, cache_dir):
'''
Open chrome in debugging mode
'''
chrome_cmd = f'chrome.exe --remote-debugging-port={port} --user-data-dir="{cache_dir}"'
subprocess.Popen(chrome_cmd)
def _connect_selenium(driverpath, port, cache_dir):
'''
connect your browser to python
Returns
-------
driver: Selenium.webdriver object that is connected to your browser
'''
chrome_options = Options()
chrome_options.add_experimental_option("debuggerAddress", f"127.0.0.1:{port}")
driver = webdriver.Chrome(driverpath, options=chrome_options)
return driver
def _find_inputbox(driver, timeout=30):
'''
Find inputbox element in Ed analytics page
Parameters
----------
driver : selenium.webdriver
timeout : float/int, optional
Timeout limit for finding the element. The default is 15.
Raises
------
TimeoutError
Returns
-------
inputbox : selenium.webdriver.remote.webelement.WebElement
Input box for searching.
'''
tstart = datetime.now()
while True:
# break out the loop if
# 1) find the element successfully
# 2) reach the time limit
try:
inputbox = driver.find_element_by_tag_name('input')
return inputbox
except:
tnow = datetime.now()
if (tnow - tstart).total_seconds() > timeout:
raise TimeoutError('Check out your connection!')
sleep(0.3)
def _search_tut(inputbox, tutcode):
'''
Searching tut in Ed analytics page
Parameters
----------
inputbox : selenium.webdriver.remote.webelement.WebElement
Webelement for input box
tutcode : str
tutorial for searching.
Returns
-------
None.
'''
inputbox.clear()
inputbox.send_keys(tutcode)
def _get_header_use(thtag):
'''
Get header attribute from usetag
Parameters
----------
thtag : bs4.element.Tag
Table header tag.
Returns
-------
str
header attribute.
'''
usetag = thtag.findAll('use')
if len(usetag) == 0:
return '#'
return usetag[0].attrs['xlink:href']
def _get_tdstatus(tdtag):
'''
Get table cell content or status (for questions)
Parameters
----------
tdtag : bs4.element.Tag
table cell tag.
Returns
-------
str
table cell content or status.
'''
text = tdtag.text
if text:
if text != '\u200b':
return text
if 'class' in tdtag.attrs:
cellclass = tdtag.attrs['class']
if len(cellclass) > 1:
return cellclass[1].split('-')[-1]
return ''
def _get_tdlink(tdtag):
atags = tdtag.findAll('a')
if len(atags) > 0:
return 'https://edstem.org{}'.format(atags[0].attrs['href'])
return 'N/A'
def _get_analytics_table(driver):
'''
Get analytics table from driver
Parameters
----------
driver : selenium.webdriver
Driver that opens Ed analytics page.
Returns
-------
analytics_df : pandas.DataFrame
DataFrame for analytics table.
colattrs : list
A list of column's attribute.
'''
soup = BeautifulSoup(driver.page_source, 'lxml')
table = soup.findAll('table', attrs={'class':"lesson-analytics-table"})[0]
### get header and body tag
thead = table.findAll('thead')[0]
tbody = table.findAll('tbody')[0]
### extract info from html to list
### (Note: pandas.read_html doesn't work for this case)
# header
header = []
colattrs = []
for thtag in thead.findAll('th'):
header.append(thtag.text.strip())
colattrs.append(_get_header_use(thtag))
# body
tablecells = []
tablehtmls = []
trtags = tbody.findAll('tr')
for trtag in trtags:
rowcells = []
rowhtmls = []
tdtags = trtag.findAll('td')
for tdtag in tdtags:
rowcells.append(_get_tdstatus(tdtag))
rowhtmls.append(_get_tdlink(tdtag))
tablecells.append(rowcells)
tablehtmls.append(rowhtmls)
analytics_df = pd.DataFrame(tablecells, columns=header)
analytics_html = pd.DataFrame(tablehtmls, columns=header)
return analytics_df, analytics_html, colattrs
def _check_search_loaded(driver, tutcode):
df, _, _ = _get_analytics_table(driver)
tutcol = df['Tutorial'].apply(lambda x:x.lower())
if (tutcol != tutcode.lower()).sum() > 0:
return False
return True
def _get_online_students(analytics_df):
'''
Get students that are online
'''
opened_count = (analytics_df.iloc[:, 3:] != 'unopened').sum(axis=1)
return opened_count > 0
def _get_code_cols(colattrs):
'''
Get columns for code only
'''
code_check = []
for attr in colattrs:
if attr == '#lesson-slide-code' or attr == '#lesson-slide-postgres':
code_check.append(True)
else:
code_check.append(False)
return code_check
def _prepare_code_plotting(analytics_df, colattrs):
good_stu = _get_online_students(analytics_df)
code_check = _get_code_cols(colattrs)
cleaned_df = analytics_df.loc[good_stu, code_check]
### preparing statistics
### We use .iloc here to avoid same question in one week
stats = {'completed':[],
'attempted':[],
'opened':[],
'unopened':[],
}
for colidx in range(cleaned_df.shape[1]):
colseries = cleaned_df.iloc[:,colidx]
for status in stats:
stats[status].append((colseries == status).sum())
colnames = cleaned_df.columns.tolist()
### return values
return stats, colnames
def _plot_code_status(stats, colnames):
fig = plt.figure(figsize=(12, len(colnames)/2))
ax = fig.add_subplot(111)
ypos = range(len(colnames),0,-1)
left = np.zeros(len(colnames))
statuses = ['completed', 'attempted', 'opened', 'unopened']
barcolor = {'completed':'green',
'attempted':'orange',
'opened':'yellow',
'unopened':'white'
}
for status in statuses:
ax.barh(ypos, stats[status], left=left,
color=barcolor[status],
label=status,
edgecolor='black'
)
left = np.add(left, stats[status])
ax.set_yticks(ypos)
ax.set_yticklabels(colnames, fontsize=15)
ax.set_ylim(0.5, len(colnames)+0.5)
xlim_max = 5 * ((int(left[0]) // 5) + 1)
ax.set_xticks(range(0, xlim_max+1, 5))
ax.set_xlim(0, xlim_max)
ax.grid(axis='x', linestyle='--')
fig.savefig('Class_status.png', bbox_inches='tight', dpi=100)
plt.close()
### for printing
def _get_value_rowcol(df, value):
rowcols = []
for i in range(df.shape[0]):
for j in range(df.shape[1]):
if df.iloc[i, j] == value:
rowcols.append((i, j))
return rowcols
def _print_new_attempted(analytics_df, analytics_html, rowcols):
print('NEW ATTEMPTS'.center(70, '*'))
for row, col in rowcols:
print('{} attempted {}!\n{}\n'.format(analytics_df.iloc[row, 0],
analytics_df.columns[col],
analytics_html.iloc[row, col]
))
print('*'*70)
def _print_gone_attempted(analytics_df, rowcols):
print('THESE ATTEMPTS ARE SOLVED'.center(70, '*'))
for row, col in rowcols:
print('{} finished {}!'.format(analytics_df.iloc[row, 0],
analytics_df.columns[col]))
print('*'*70)
def _print_old_attempted(analytics_df, analytics_html, rowcols):
print('OLD ATTEMPTS'.center(70, '*'))
for row, col in rowcols:
print('{} is still trying {}!\n{}\n'.format(analytics_df.iloc[row, 0],
analytics_df.columns[col],
analytics_html.iloc[row, col]
))
print('*'*70)
def _compare_analytics_dfs(analytics_df, analytics_html, oldpath='./old_analytics_df.pickle'):
if not os.path.exists(oldpath):
rowcols = _get_value_rowcol(analytics_df, 'attempted')
_print_gone_attempted(analytics_df, [])
_print_old_attempted(analytics_df, analytics_html, [])
_print_new_attempted(analytics_df, analytics_html, rowcols)
else:
old_analytics_df = pd.read_pickle(oldpath)
oldatttab = old_analytics_df == 'attempted'
changetab = analytics_df != old_analytics_df
newatttab = analytics_df == 'attempted'
### attempts gone
goneatt_ = (oldatttab & changetab)
rowcols = _get_value_rowcol(goneatt_, True)
_print_gone_attempted(analytics_df, rowcols)
### old attempts
oldatt_ = (oldatttab & newatttab)
rowcols = _get_value_rowcol(oldatt_, True)
_print_old_attempted(analytics_df, analytics_html, rowcols)
### new attempts
newatt_ = (newatttab & changetab)
rowcols = _get_value_rowcol(newatt_, True)
_print_new_attempted(analytics_df, analytics_html, rowcols)
analytics_df.to_pickle(oldpath)
def _get_html_table(analytics_df, analytics_html, rowcols):
html_table = []
for row, col in rowcols:
name = analytics_df.iloc[row, 0]
question_name = analytics_df.columns[col]
url = analytics_html.iloc[row, col]
url = f'<a href="{url}" target="_blank">{url}</a>'
html_table.append([name, question_name, url])
return pd.DataFrame(html_table, columns=['NAME', 'QUESTION', 'WORKSPACE'])
def _make_html(analytics_df, analytics_html, oldpath):
html_content = ''
time_update = datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
### The basic information for the course
tut_info = f'''<h2>TUTCODE {TUTCODE} UPDATED @ {time_update}</h2><hr>\n'''
html_content += tut_info
# if there is no old pickle
if not os.path.exists(oldpath):
### new attempts
html_content += '<h3>NEW ATTEMPTS</h3>\n'
rowcols = _get_value_rowcol(analytics_df, 'attempted')
if len(rowcols) != 0:
newatt_ = _get_html_table(analytics_df, analytics_html, rowcols)
html_content += newatt_.to_html(escape=False)
else:
html_content += '<p> no new attempts</p>\n'
###
html_content += '<hr><h3>OLD ATTEMPTS</h3>\n'
html_content += '<p> no old attempts</p>\n'
### attempts are gone
html_content += '<hr><h3>ATTEMPTS SOLVED</h3>\n'
html_content += '<p> no old attempts solved</p>\n'
else:
old_analytics_df = pd.read_pickle(oldpath)
oldatttab = old_analytics_df == 'attempted'
changetab = analytics_df != old_analytics_df
newatttab = analytics_df == 'attempted'
### new attempts
html_content += '<h3>NEW ATTEMPTS</h3>\n'
newatt_ = (newatttab & changetab)
rowcols = _get_value_rowcol(newatt_, True)
if len(rowcols) != 0:
newatt_ = _get_html_table(analytics_df, analytics_html, rowcols)
html_content += newatt_.to_html(escape=False)
else:
html_content += '<p> no new attempts</p>\n'
###
html_content += '<hr><h3>OLD ATTEMPTS</h3>\n'
oldatt_ = (oldatttab & newatttab)
rowcols = _get_value_rowcol(oldatt_, True)
if len(rowcols) != 0:
oldatt_ = _get_html_table(analytics_df, analytics_html, rowcols)
html_content += oldatt_.to_html(escape=False)
else:
html_content += '<p> no old attempts</p>\n'
### attempts are gone
html_content += '<hr><h3>ATTEMPTS SOLVED</h3>\n'
goneatt_ = (oldatttab & changetab)
rowcols = _get_value_rowcol(goneatt_, True)
if len(rowcols) != 0:
goneatt_ = _get_html_table(analytics_df, analytics_html, rowcols)
html_content += goneatt_.to_html(escape=False)
else:
html_content += '<p> no old attempts solved</p>\n'
html_content += '<hr>\n'
html_content += '<h3>CLASS MONITORING</h3>\n'
html_content += '<a href="./Class_status.png"><img src="Class_status.png" width="1000"><\a>'
with open('monitor.html', 'w', encoding='utf-8') as fp:
fp.write(html_content)
def _check_login(driver):
if 'Log in to continue' in driver.page_source:
return True
return False
def _manually_check():
### read settings
with open('./setup.py') as fp:
code = fp.read()
exec(code, globals())
if os.path.exists(OLDPICKLEPATH):
os.remove(OLDPICKLEPATH)
### start!
if not OPEN_WITH_CACHE:
driver = _start_webdriver(BROWSER, DRIVERPATH)
elif BROWSER.lower() == 'chrome':
_open_browser_cmd(PORT, CACHE_DIR)
driver = _connect_selenium(DRIVERPATH, PORT, CACHE_DIR)
else:
raise NotImplementedError('NOT IMPLEMENTED')
driver.get(EDURL)
wait = input('Please wait till the webpage responds!')
while _check_login(driver):
status_code = input('Please Log in Ed first!!!'.center(70, '+'))
print(f'The Tutorial Code is {TUTCODE}')
# tutnew = input("Input the new TUTCODE if it is not correct, or press enter")
# if tutnew:
# TUTCODE = tutnew
### starting the loop!
break_sign = ''
while break_sign != 'q':
driver.refresh()
inputbox = _find_inputbox(driver)
_search_tut(inputbox, TUTCODE)
### get analytics dataframe
while not _check_search_loaded(driver, TUTCODE):
sleep(0.3)
analytics_df, analytics_html, colattrs = _get_analytics_table(driver)
stats, colnames = _prepare_code_plotting(analytics_df, colattrs)
_plot_code_status(stats, colnames)
_make_html(analytics_df, analytics_html, OLDPICKLEPATH)
_compare_analytics_dfs(analytics_df, analytics_html, OLDPICKLEPATH)
print("Please check './monitor.html' for a webpage version!")
break_sign = input('Type "q" to quit! Press Enter to continue! ')
print('\n\n')
driver.quit()
if CLEAN:
os.remove(OLDPICKLEPATH)
os.remove('./Class_status.png')
print('Thanks for using!'.center(70, '-'))
if __name__ == '__main__':
# pass
_manually_check()
| 32.491935
| 97
| 0.584636
| 1,828
| 16,116
| 4.954048
| 0.219365
| 0.060733
| 0.037544
| 0.045053
| 0.358989
| 0.295163
| 0.246577
| 0.215769
| 0.185844
| 0.122239
| 0
| 0.010756
| 0.296227
| 16,116
| 496
| 98
| 32.491935
| 0.787692
| 0.139737
| 0
| 0.254181
| 0
| 0.006689
| 0.12183
| 0.017759
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.036789
| 0
| 0.183946
| 0.073579
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b09026b586af08d54afda4ec0f2c75827cc35592
| 419
|
py
|
Python
|
Random-Programs/optimization/simonSays/simonSays.py
|
naumoff0/Archive
|
d4ad2da89abb1576dd5a7c72ded6bf9b45c3f610
|
[
"MIT"
] | null | null | null |
Random-Programs/optimization/simonSays/simonSays.py
|
naumoff0/Archive
|
d4ad2da89abb1576dd5a7c72ded6bf9b45c3f610
|
[
"MIT"
] | null | null | null |
Random-Programs/optimization/simonSays/simonSays.py
|
naumoff0/Archive
|
d4ad2da89abb1576dd5a7c72ded6bf9b45c3f610
|
[
"MIT"
] | null | null | null |
import random, time, os
def m():
l, t, d = 3,0.5,1
o=input("Opp\n0)Yes\n1)No\n:")
while l>0:
S=r(d,"")
print("Lives:"+str(l)+"\nSimon Says:"+str(S))
time.sleep(t*d)
os.system("clear")
if int(input("RPT\n>> "))!=S: l=(l - 1)
else: d+=1
print("PTS:"+str((d-3)))
def r(d,n):
for _ in range(d) : n = n + str(random.randint(0, 9))
return int(n)
m()
| 26.1875
| 57
| 0.46778
| 78
| 419
| 2.5
| 0.551282
| 0.020513
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.28401
| 419
| 16
| 58
| 26.1875
| 0.61
| 0
| 0
| 0
| 0
| 0
| 0.130952
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.25
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b090822888b5d41d4c5adb4d5a180180c92743de
| 1,572
|
py
|
Python
|
quantarhei/symbolic/lang.py
|
slamavl/quantarhei
|
d822bc2db86152c418e330a9152e7866869776f7
|
[
"MIT"
] | 14
|
2016-10-16T13:26:05.000Z
|
2021-11-09T11:40:52.000Z
|
quantarhei/symbolic/lang.py
|
slamavl/quantarhei
|
d822bc2db86152c418e330a9152e7866869776f7
|
[
"MIT"
] | 61
|
2016-09-19T10:45:56.000Z
|
2021-11-10T13:53:06.000Z
|
quantarhei/symbolic/lang.py
|
slamavl/quantarhei
|
d822bc2db86152c418e330a9152e7866869776f7
|
[
"MIT"
] | 21
|
2016-08-30T09:09:28.000Z
|
2022-03-30T03:16:35.000Z
|
# -*- coding: utf-8 -*-
def rename_function(ss, oldname, newname):
"""Replaces all occurences of a name by a new name
"""
#return ss.replace("conjugate","numpy.conj")
return ss.replace(oldname, newname)
def fce2array(sr, pat):
"""Converts functions into arrays
"""
se = "".join(sr)
so = ""
ln = len(se)
while ln > 0:
# find pattern
pos = se.find(pat)
if pos < 0:
break
# position just behind the pattern
pos += len(pat)
sl = list(se)
# exchange ( for [
if sl[pos] == "(":
sl[pos] = "["
se = "".join(sl)
# save everything in front of the pattern
so += se[0:pos]
se = se[pos:ln]
# find clossing braket
pos2 = se.find(")")
# echange ) for ]
sl = list(se)
if sl[pos2] == ")":
sl[pos2] = "]"
se = "".join(sl)
ln = len(se)
so += se
return so
def python_code(ss, arrays=None):
"""Generate Python code with numpy functions
"""
sr = rename_function(ss,"conjugate","numpy.conj")
sr = rename_function(sr,"exp","numpy.exp")
if arrays is not None:
for ar in arrays:
sr = fce2array(sr,ar)
return sr
def fortran_code(ss, arrays=None):
"""Generate Fortran code with numpy functions
"""
sr = rename_function(ss,"conjugate","conjg")
#sr = rename_function(sr,"exp","numpy.exp")
#for ar in arrays:
# sr = fce2array(sr,ar)
return sr
| 24.952381
| 57
| 0.512087
| 196
| 1,572
| 4.071429
| 0.346939
| 0.087719
| 0.080201
| 0.0401
| 0.345865
| 0.285714
| 0.285714
| 0.213033
| 0.213033
| 0.090226
| 0
| 0.009756
| 0.347964
| 1,572
| 62
| 58
| 25.354839
| 0.76878
| 0.316794
| 0
| 0.228571
| 0
| 0
| 0.047939
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0
| 0
| 0.228571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b093e8d1dc7ce6bd823606fd89573b9ddd010409
| 5,246
|
py
|
Python
|
fashion_segmentation/code/main.py
|
TayaPenskaya/Diploma
|
dee4e13eccdd0d0ddc4f667d2eb94260a7ed3847
|
[
"MIT"
] | null | null | null |
fashion_segmentation/code/main.py
|
TayaPenskaya/Diploma
|
dee4e13eccdd0d0ddc4f667d2eb94260a7ed3847
|
[
"MIT"
] | null | null | null |
fashion_segmentation/code/main.py
|
TayaPenskaya/Diploma
|
dee4e13eccdd0d0ddc4f667d2eb94260a7ed3847
|
[
"MIT"
] | null | null | null |
import argparse
import os
import numpy as np
import matplotlib.pyplot as plt
import torch
import yaml
import cv2
from PIL import Image, ImageFilter
from base64 import b64decode, b64encode
import io
from predictors.predictor import Predictor
from src.models.modnet import MODNet
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as transforms
class Segmentation:
def __init__(self):
config_path = './configs/config.yml'
with open(config_path) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
config['network']['use_cuda'] = config['network']['use_cuda'] and torch.cuda.is_available()
self.predictor = Predictor(config, checkpoint_path='./experiments/checkpoint_last.pth.tar')
self.modnet = MODNet(backbone_pretrained=False)
self.modnet = nn.DataParallel(self.modnet)
self.modnet.load_state_dict(torch.load('./pretrained/modnet_photographic_portrait_matting.ckpt', map_location=torch.device('cpu')))
self.modnet.eval()
def get_matte(self, im):
ref_size = 512
im_transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
]
)
im = np.asarray(im)
if len(im.shape) == 2:
im = im[:, :, None]
if im.shape[2] == 1:
im = np.repeat(im, 3, axis=2)
elif im.shape[2] == 4:
im = im[:, :, 0:3]
# convert image to PyTorch tensor
im = Image.fromarray(im)
im = im_transform(im)
# add mini-batch dim
im = im[None, :, :, :]
# resize image for input
im_b, im_c, im_h, im_w = im.shape
if max(im_h, im_w) < ref_size or min(im_h, im_w) > ref_size:
if im_w >= im_h:
im_rh = ref_size
im_rw = int(im_w / im_h * ref_size)
elif im_w < im_h:
im_rw = ref_size
im_rh = int(im_h / im_w * ref_size)
else:
im_rh = im_h
im_rw = im_w
im_rw = im_rw - im_rw % 32
im_rh = im_rh - im_rh % 32
im = F.interpolate(im, size=(im_rh, im_rw), mode='area')
# inference
_, _, matte = self.modnet(im, True)
# resize and save matte
matte = F.interpolate(matte, size=(im_h, im_w), mode='area')
matte = matte[0][0].data.cpu().numpy()
Image.fromarray(((matte * 255).astype('uint8')), mode='L').save('./results/matte.jpg')
return matte * 255
def get_image(self, image, matte):
image = np.asarray(image)
if len(image.shape) == 2:
image = image[:, :, None]
if image.shape[2] == 1:
image = np.repeat(image, 3, axis=2)
elif image.shape[2] == 4:
image = image[:, :, 0:3]
matte = np.repeat(np.asarray(matte)[:, :, None], 3, axis=2)
mt = Image.fromarray(np.uint8(matte)).convert("RGBA")
mt = mt.filter(ImageFilter.ModeFilter(size=30))
matte_blur = np.array(mt.getdata()) / 255
matte_blur = matte_blur[:, :3]
matte = matte / 255
foreground = image * matte + np.full(image.shape, 255) * (1 - matte)
img = Image.fromarray(np.uint8(foreground)).convert("RGBA")
datas = img.getdata()
newData = []
width, height, _ = foreground.shape
for x in range(width):
for y in range(height):
newData.append(
(255, 255, 255, 0) if np.all(matte_blur[x * height + y] < 0.1) else datas[x * height + y])
if img.mode in ("RGBA", "P"):
img = img.convert("RGB")
img.putdata(newData)
img.save('./results/segm.jpg')
return img
def get_segmentation(self, img):
imgdata = b64decode(str(img))
img = Image.open(io.BytesIO(imgdata))
#img = Image.open(img)
image, prediction = self.predictor.segment_image(img)
my_cm = plt.get_cmap('nipy_spectral')
plt.imsave('./results/tmp.jpg', prediction, cmap=my_cm)
prediction = cv2.imread('./results/tmp.jpg')
added_image = cv2.addWeighted(image.astype(int),0.5,prediction.astype(int),0.3,0)
added_image = cv2.cvtColor(np.uint8(added_image), cv2.COLOR_BGR2RGB)
cv2.imwrite('./results/res.jpg', added_image)
matte = self.get_matte(Image.open('./results/res.jpg'))
segm = self.get_image(Image.open('./results/res.jpg'), matte)
is_success, buffer = cv2.imencode(".jpg", cv2.imread('./results/segm.jpg'))
io_buf = io.BytesIO(buffer)
#return "ku"
return b64encode(io_buf.getvalue()).decode("utf-8")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='This is a program that gets image detailed segmentation.')
parser.add_argument('-i', '--image', help='image in base64')
args = parser.parse_args()
if args.image is None:
raise Exception('missing --image IMAGE')
else:
s = Segmentation()
print(s.get_segmentation(args.image))
| 32.993711
| 139
| 0.573199
| 694
| 5,246
| 4.193084
| 0.298271
| 0.009278
| 0.013746
| 0.006873
| 0.038144
| 0.017526
| 0.004124
| 0.004124
| 0.004124
| 0.004124
| 0
| 0.027755
| 0.292604
| 5,246
| 158
| 140
| 33.202532
| 0.7564
| 0.026115
| 0
| 0.034483
| 0
| 0
| 0.087238
| 0.01784
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.137931
| 0
| 0.206897
| 0.008621
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0975e691b95560eeb7040b6b852833062f50974
| 2,245
|
py
|
Python
|
mmdet3d/ops/dgcnn_modules/dgcnn_fa_module.py
|
maskjp/mmdetection3d
|
98f332372b1a4c82bc2d57588a5d764f4176c869
|
[
"Apache-2.0"
] | 5
|
2022-01-26T13:03:12.000Z
|
2022-01-27T03:59:09.000Z
|
mmdet3d/ops/dgcnn_modules/dgcnn_fa_module.py
|
maskjp/mmdetection3d
|
98f332372b1a4c82bc2d57588a5d764f4176c869
|
[
"Apache-2.0"
] | 1
|
2022-03-31T08:33:12.000Z
|
2022-03-31T08:35:55.000Z
|
mmdet3d/ops/dgcnn_modules/dgcnn_fa_module.py
|
maskjp/mmdetection3d
|
98f332372b1a4c82bc2d57588a5d764f4176c869
|
[
"Apache-2.0"
] | 1
|
2022-03-30T04:08:39.000Z
|
2022-03-30T04:08:39.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, force_fp32
from torch import nn as nn
class DGCNNFAModule(BaseModule):
"""Point feature aggregation module used in DGCNN.
Aggregate all the features of points.
Args:
mlp_channels (list[int]): List of mlp channels.
norm_cfg (dict, optional): Type of normalization method.
Defaults to dict(type='BN1d').
act_cfg (dict, optional): Type of activation method.
Defaults to dict(type='ReLU').
init_cfg (dict, optional): Initialization config. Defaults to None.
"""
def __init__(self,
mlp_channels,
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'),
init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.fp16_enabled = False
self.mlps = nn.Sequential()
for i in range(len(mlp_channels) - 1):
self.mlps.add_module(
f'layer{i}',
ConvModule(
mlp_channels[i],
mlp_channels[i + 1],
kernel_size=(1, ),
stride=(1, ),
conv_cfg=dict(type='Conv1d'),
norm_cfg=norm_cfg,
act_cfg=act_cfg))
@force_fp32()
def forward(self, points):
"""forward.
Args:
points (List[Tensor]): tensor of the features to be aggregated.
Returns:
Tensor: (B, N, M) M = mlp[-1], tensor of the output points.
"""
if len(points) > 1:
new_points = torch.cat(points[1:], dim=-1)
new_points = new_points.transpose(1, 2).contiguous() # (B, C, N)
new_points_copy = new_points
new_points = self.mlps(new_points)
new_fa_points = new_points.max(dim=-1, keepdim=True)[0]
new_fa_points = new_fa_points.repeat(1, 1, new_points.shape[-1])
new_points = torch.cat([new_fa_points, new_points_copy], dim=1)
new_points = new_points.transpose(1, 2).contiguous()
else:
new_points = points
return new_points
| 32.536232
| 77
| 0.559911
| 275
| 2,245
| 4.367273
| 0.367273
| 0.112406
| 0.041632
| 0.044963
| 0.283097
| 0.108243
| 0.071607
| 0.071607
| 0.071607
| 0.071607
| 0
| 0.018097
| 0.335412
| 2,245
| 68
| 78
| 33.014706
| 0.786863
| 0.279733
| 0
| 0.052632
| 0
| 0
| 0.014407
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.105263
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b098cf5dfcf4280fa0a7565a9bca4ad07ebf52e7
| 1,653
|
py
|
Python
|
dir_monch/dir_monch.py
|
shivaghose/nifty_tools
|
587ed6403222ee959f8e0789e66e61f6f187f80b
|
[
"MIT"
] | 1
|
2016-08-11T18:49:44.000Z
|
2016-08-11T18:49:44.000Z
|
dir_monch/dir_monch.py
|
shivaghose/nifty_tools
|
587ed6403222ee959f8e0789e66e61f6f187f80b
|
[
"MIT"
] | null | null | null |
dir_monch/dir_monch.py
|
shivaghose/nifty_tools
|
587ed6403222ee959f8e0789e66e61f6f187f80b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
'''
dir_monch: a utility to help shorten paths.
'''
from __future__ import print_function
from os.path import expanduser
from os import sep as os_sep
from sys import exit
import sys
def dir_monch(path):
# Substitute ~ for the home dir path when possible
HOME = expanduser("~")
if path.startswith(HOME):
path = path.replace(HOME, '~', 1)
MAX_PATH_LENGTH = 20
if len(path) < MAX_PATH_LENGTH:
return path
split_path = path.split(os_sep)
shortened_path = []
for directory in split_path:
end_idx = 1
short_name = directory[0:end_idx]
while (short_name in shortened_path) and end_idx < len(directory):
end_idx += 1
short_name = directory[0:end_idx]
shortened_path.append(short_name)
final_path = ''
for short_dir in shortened_path[0:-1]:
final_path += short_dir + os_sep
final_path += split_path[-1]
return final_path
def run_tests():
test_paths = ['/Users/shiva', '/Users/shiva/git', '/Users/shiva/anaconda2/bin/Assistant.app', \
'/etc/apache2/extra', '/bin', '/', '/A/A/A/A/A/A/A/A', 'aaa/aaa/aaa/aaa/aaa/', \
'/Users/shiva/this\ folder\ has\ spaces/folder']
expected_outputs = ['~', '~/git', '~/a/b/Assistant.app', '/etc/apache2/extra', '/bin', \
'/', '/A/A/A/A/A/A/A/A', 'a/aa/aaa/aaa/aaa/', '~/t/folder']
for input_str, expected_str in zip (test_paths, expected_outputs):
output = dir_monch(input_str)
assert output == expected_str
if __name__ == '__main__':
print(dir_monch(sys.argv[1]))
exit(0)
| 30.054545
| 99
| 0.61585
| 238
| 1,653
| 4.046218
| 0.344538
| 0.031153
| 0.040498
| 0.045691
| 0.146417
| 0.146417
| 0.146417
| 0.146417
| 0.145379
| 0.07892
| 0
| 0.011924
| 0.238959
| 1,653
| 54
| 100
| 30.611111
| 0.753577
| 0.068361
| 0
| 0.052632
| 0
| 0
| 0.178315
| 0.026127
| 0
| 0
| 0
| 0
| 0.026316
| 1
| 0.052632
| false
| 0
| 0.131579
| 0
| 0.236842
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b099d05e9015241447612382401295c1e700fde0
| 8,519
|
py
|
Python
|
ball_catching/easy_catch/planner.py
|
shoefer/ball_catching
|
46b2e95894659347b563123c1c23742437755993
|
[
"MIT"
] | 1
|
2017-07-22T11:36:02.000Z
|
2017-07-22T11:36:02.000Z
|
ball_catching/easy_catch/planner.py
|
shoefer/ball_catching
|
46b2e95894659347b563123c1c23742437755993
|
[
"MIT"
] | null | null | null |
ball_catching/easy_catch/planner.py
|
shoefer/ball_catching
|
46b2e95894659347b563123c1c23742437755993
|
[
"MIT"
] | null | null | null |
import casadi as ca
import casadi.tools as cat
__author__ = 'belousov'
class Planner:
# ========================================================================
# Simple planning
# ========================================================================
@classmethod
def create_plan(cls, model, warm_start=False,
x0=0, lam_x0=0, lam_g0=0):
# Degrees of freedom for the optimizer
V = cat.struct_symSX([
(
cat.entry('X', repeat=model.n+1, struct=model.x),
cat.entry('U', repeat=model.n, struct=model.u)
)
])
# Box constraints
[lbx, ubx] = cls._create_box_constraints(model, V)
# Force the catcher to always look forward
# lbx['U', :, 'theta'] = ubx['U', :, 'theta'] = 0
# Non-linear constraints
[g, lbg, ubg] = cls._create_nonlinear_constraints(model, V)
# Objective function
J = cls._create_objective_function(model, V, warm_start)
# Formulate non-linear problem
nlp = ca.SXFunction('nlp', ca.nlpIn(x=V), ca.nlpOut(f=J, g=g))
op = {# Linear solver
#'linear_solver': 'ma57',
# Acceptable termination
'acceptable_iter': 5}
if warm_start:
op['warm_start_init_point'] = 'yes'
op['fixed_variable_treatment'] = 'make_constraint'
# Initialize solver
solver = ca.NlpSolver('solver', 'ipopt', nlp, op)
# Solve
if warm_start:
sol = solver(x0=x0, lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg,
lam_x0=lam_x0, lam_g0=lam_g0)
else:
sol = solver(x0=x0, lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg)
return V(sol['x']), sol['lam_x'], sol['lam_g']
# sol = solver(x0=x0, lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg)
# return V(sol['x']), sol['lam_x'], sol['lam_g']
@staticmethod
def _create_nonlinear_constraints(model, V):
g, lbg, ubg = [], [], []
for k in range(model.n):
# Multiple shooting
[xk_next] = model.F([V['X', k], V['U', k]])
g.append(xk_next - V['X', k+1])
lbg.append(ca.DMatrix.zeros(model.nx))
ubg.append(ca.DMatrix.zeros(model.nx))
# Control constraints
constraint_k = model._set_constraint(V, k)
g.append(constraint_k)
lbg.append(-ca.inf)
ubg.append(0)
g = ca.veccat(g)
lbg = ca.veccat(lbg)
ubg = ca.veccat(ubg)
return [g, lbg, ubg]
@staticmethod
def _create_objective_function(model, V, warm_start):
[final_cost] = model.cl([V['X', model.n]])
running_cost = 0
for k in range(model.n):
[stage_cost] = model.c([V['X', k], V['U', k]])
# Encourage looking at the ball
d = ca.veccat([ca.cos(V['X', k, 'psi'])*ca.cos(V['X', k, 'phi']),
ca.cos(V['X', k, 'psi'])*ca.sin(V['X', k, 'phi']),
ca.sin(V['X', k, 'psi'])])
r = ca.veccat([V['X', k, 'x_b'] - V['X', k, 'x_c'],
V['X', k, 'y_b'] - V['X', k, 'y_c'],
V['X', k, 'z_b']])
r_cos_omega = ca.mul(d.T, r)
if warm_start:
cos_omega = r_cos_omega / (ca.norm_2(r) + 1e-6)
stage_cost += 1e-1 * (1 - cos_omega)
else:
stage_cost -= 1e-1 * r_cos_omega * model.dt
running_cost += stage_cost
return final_cost + running_cost
# ========================================================================
# Common functions
# ========================================================================
@staticmethod
def _create_box_constraints(model, V):
lbx = V(-ca.inf)
ubx = V(ca.inf)
# Control limits
model._set_control_limits(lbx, ubx)
# State limits
model._set_state_limits(lbx, ubx)
# Initial state
lbx['X', 0] = ubx['X', 0] = model.m0
return [lbx, ubx]
# ========================================================================
# Belief space planning
# ========================================================================
@classmethod
def create_belief_plan(cls, model, warm_start=False,
x0=0, lam_x0=0, lam_g0=0):
# Degrees of freedom for the optimizer
V = cat.struct_symSX([
(
cat.entry('X', repeat=model.n+1, struct=model.x),
cat.entry('U', repeat=model.n, struct=model.u)
)
])
# Box constraints
[lbx, ubx] = cls._create_box_constraints(model, V)
# Non-linear constraints
[g, lbg, ubg] = cls._create_belief_nonlinear_constraints(model, V)
# Objective function
J = cls._create_belief_objective_function(model, V)
# Formulate non-linear problem
nlp = ca.SXFunction('nlp', ca.nlpIn(x=V), ca.nlpOut(f=J, g=g))
op = {# Linear solver
#'linear_solver': 'ma57',
# Warm start
# 'warm_start_init_point': 'yes',
# Termination
'max_iter': 1500,
'tol': 1e-6,
'constr_viol_tol': 1e-5,
'compl_inf_tol': 1e-4,
# Acceptable termination
'acceptable_tol': 1e-3,
'acceptable_iter': 5,
'acceptable_obj_change_tol': 1e-2,
# NLP
# 'fixed_variable_treatment': 'make_constraint',
# Quasi-Newton
'hessian_approximation': 'limited-memory',
'limited_memory_max_history': 5,
'limited_memory_max_skipping': 1}
if warm_start:
op['warm_start_init_point'] = 'yes'
op['fixed_variable_treatment'] = 'make_constraint'
# Initialize solver
solver = ca.NlpSolver('solver', 'ipopt', nlp, op)
# Solve
if warm_start:
sol = solver(x0=x0, lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg,
lam_x0=lam_x0, lam_g0=lam_g0)
else:
sol = solver(x0=x0, lbx=lbx, ubx=ubx, lbg=lbg, ubg=ubg)
return V(sol['x']), sol['lam_x'], sol['lam_g']
@staticmethod
def _create_belief_nonlinear_constraints(model, V):
"""Non-linear constraints for planning"""
bk = cat.struct_SX(model.b)
bk['S'] = model.b0['S']
g, lbg, ubg = [], [], []
for k in range(model.n):
# Belief propagation
bk['m'] = V['X', k]
[bk_next] = model.BF([bk, V['U', k]])
bk_next = model.b(bk_next)
# Multiple shooting
g.append(bk_next['m'] - V['X', k+1])
lbg.append(ca.DMatrix.zeros(model.nx))
ubg.append(ca.DMatrix.zeros(model.nx))
# Control constraints
constraint_k = model._set_constraint(V, k)
g.append(constraint_k)
lbg.append(-ca.inf)
ubg.append(0)
# Advance time
bk = bk_next
g = ca.veccat(g)
lbg = ca.veccat(lbg)
ubg = ca.veccat(ubg)
return [g, lbg, ubg]
@staticmethod
def _create_belief_objective_function(model, V):
# Simple cost
running_cost = 0
for k in range(model.n):
[stage_cost] = model.c([V['X', k], V['U', k]])
running_cost += stage_cost
[final_cost] = model.cl([V['X', model.n]])
# Uncertainty cost
running_uncertainty_cost = 0
bk = cat.struct_SX(model.b)
bk['S'] = model.b0['S']
for k in range(model.n):
# Belief propagation
bk['m'] = V['X', k]
[bk_next] = model.BF([bk, V['U', k]])
bk_next = model.b(bk_next)
# Accumulate cost
[stage_uncertainty_cost] = model.cS([bk_next])
running_uncertainty_cost += stage_uncertainty_cost
# Advance time
bk = bk_next
[final_uncertainty_cost] = model.cSl([bk_next])
return running_cost + final_cost +\
running_uncertainty_cost + final_uncertainty_cost
| 32.515267
| 78
| 0.471182
| 994
| 8,519
| 3.853119
| 0.173038
| 0.009922
| 0.013316
| 0.016971
| 0.68564
| 0.64047
| 0.608616
| 0.585117
| 0.553786
| 0.518016
| 0
| 0.013565
| 0.35098
| 8,519
| 261
| 79
| 32.639847
| 0.679146
| 0.190985
| 0
| 0.621622
| 0
| 0
| 0.06561
| 0.027741
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047297
| false
| 0
| 0.013514
| 0
| 0.114865
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b09a552321fe21418a22b65a3d6582f20e8c9eea
| 4,526
|
py
|
Python
|
commtrack/links/gerrit.py
|
bregman-arie/commtrack
|
5359958dd07d1b9e868ec5276da6dde22f982d07
|
[
"Apache-2.0"
] | 3
|
2020-01-14T10:15:40.000Z
|
2020-12-01T14:32:01.000Z
|
commtrack/links/gerrit.py
|
bregman-arie/commit-tracker
|
5359958dd07d1b9e868ec5276da6dde22f982d07
|
[
"Apache-2.0"
] | 2
|
2018-12-24T12:16:58.000Z
|
2019-02-18T07:16:42.000Z
|
commtrack/links/gerrit.py
|
bregman-arie/commit-tracker
|
5359958dd07d1b9e868ec5276da6dde22f982d07
|
[
"Apache-2.0"
] | 1
|
2019-07-15T08:27:36.000Z
|
2019-07-15T08:27:36.000Z
|
# Copyright 2019 Arie Bregman
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import crayons
import json
import logging
import subprocess
import sys
from commtrack.gerrit import constants as const
from commtrack.gerrit import exceptions as exc
from commtrack.link import Link
LOG = logging.getLogger(__name__)
class Gerrit(Link):
"""Managing operations on Gerrit Code review system."""
def __init__(self, name, address, parameters):
super(Gerrit, self).__init__(name, address,
const.LINK_TYPE, parameters)
def get_basic_query_cmd(self, address):
"""Returns a very basic query command which extended based
on provided input from the user.
"""
return ['ssh', '-p', '29418',
address.strip('\"'),
'gerrit', 'query',
'limit:5',
'--format JSON']
def query(self):
"""Returns query result"""
query_cmd = self.get_basic_query_cmd(self.address)
if 'change_id' in self.params:
query_cmd.append('change:{}'.format(self.params['change_id']))
if 'project' in self.chain_params['global'] and self.chain_params[
'global']['project']:
query_cmd.append('project:{}'.format(self.chain_params['global']['project']))
if 'subject' in self.params:
query_cmd.append(self.params['subject'])
output = subprocess.check_output(query_cmd)
decoded_output = output.decode('utf-8')
query_result_li = decoded_output.split('\n')
# Handle multiple matches
if len(query_result_li) > 1 and self.chain_params['global']['commit']:
LOG.info(exc.multiple_matches())
sys.exit(2)
# return json.loads(query_result_li)
return query_result_li
def search(self, params=None, same_project=True):
"""Returns the result of searching the given change."""
self.verify_and_set_reqs(const.REQUIRED_PARAMS)
raw_result_li = self.query()
# Check if there is at least one result
if len(raw_result_li) < 3:
self.results.append("{} find such change.".format(crayons.red("Couldn't")))
else:
self.params['found'] = True
json_result_li = []
for res in raw_result_li:
if '"type":' not in res and res != '':
json_result_li.append(json.loads(res))
if len(json_result_li) > 1:
same_project = self.verify_same_project(json_result_li)
if same_project:
for result in json_result_li:
self.update_link_params(result)
self.results.append(self.process_result(result))
else:
LOG.error(exc.multiple_projects())
sys.exit(2)
return self.params
def verify_same_project(self, changes):
"""Returns true if all the changes belong to the same project."""
project = changes[0]['project']
for change in changes[1:]:
if change['project'] != project:
return False
return True
def update_link_params(self, data):
"""Update link parameters using data discovered during the query."""
for param in const.SINGLE_PROVIDED_PARAMS:
if param in data:
self.params[param] = data[param]
for param in const.MULTI_PROVIDED_PARAMS:
if param in data:
if param not in self.params:
self.params[param] = list()
self.params[param].append(data[param])
def process_result(self, data):
"""Returns adjusted result with only the relevant information."""
result_str = "Status in project {} branch {} is {}".format(
data['project'],
data['branch'],
self.colorize_result(data['status']))
return result_str
def colorize_result(self, status):
return const.COLORED_STATS[status]
| 35.085271
| 89
| 0.617543
| 564
| 4,526
| 4.804965
| 0.338652
| 0.035424
| 0.02214
| 0.030996
| 0.089668
| 0.059041
| 0
| 0
| 0
| 0
| 0
| 0.006757
| 0.280601
| 4,526
| 128
| 90
| 35.359375
| 0.825553
| 0.236191
| 0
| 0.075949
| 0
| 0
| 0.077014
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.101266
| false
| 0
| 0.101266
| 0.012658
| 0.303797
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b09e1dcc4651bbea3e5a05f15eded482f3e5b822
| 2,097
|
py
|
Python
|
src/leetcode_106_construct_binary_tree_from_inorder_and_postorder_traversal.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
src/leetcode_106_construct_binary_tree_from_inorder_and_postorder_traversal.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
src/leetcode_106_construct_binary_tree_from_inorder_and_postorder_traversal.py
|
sungho-joo/leetcode2github
|
ce7730ef40f6051df23681dd3c0e1e657abba620
|
[
"MIT"
] | null | null | null |
# @l2g 106 python3
# [106] Construct Binary Tree from Inorder and Postorder Traversal
# Difficulty: Medium
# https://leetcode.com/problems/construct-binary-tree-from-inorder-and-postorder-traversal
#
# Given two integer arrays inorder and postorder where inorder is the inorder traversal of a binary tree and postorder is the postorder traversal of the same tree,
# construct and return the binary tree.
#
# Example 1:
#
#
# Input: inorder = [9,3,15,20,7], postorder = [9,15,7,20,3]
# Output: [3,9,20,null,null,15,7]
#
# Example 2:
#
# Input: inorder = [-1], postorder = [-1]
# Output: [-1]
#
#
# Constraints:
#
# 1 <= inorder.length <= 3000
# postorder.length == inorder.length
# -3000 <= inorder[i], postorder[i] <= 3000
# inorder and postorder consist of unique values.
# Each value of postorder also appears in inorder.
# inorder is guaranteed to be the inorder traversal of the tree.
# postorder is guaranteed to be the postorder traversal of the tree.
#
#
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
from typing import List, Optional
class Solution:
def buildTree(self, inorder: List[int], postorder: List[int]) -> Optional[TreeNode]:
inorder_map = defaultdict(list)
for i, val in enumerate(inorder):
inorder_map[val] = i
self.post_idx = len(postorder) - 1
def traverse(l, r):
# base case
if l > r:
return
cur_value = postorder[self.post_idx]
self.post_idx -= 1
cur_node = TreeNode(val=cur_value)
inorder_idx = inorder_map[cur_value]
# right subtree
cur_node.right = traverse(inorder_idx + 1, r)
# left subtree
cur_node.left = traverse(l, inorder_idx - 1)
return cur_node
return traverse(0, len(inorder) - 1)
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_106.py")])
| 26.884615
| 163
| 0.637577
| 284
| 2,097
| 4.605634
| 0.341549
| 0.038226
| 0.058104
| 0.035168
| 0.144495
| 0.077982
| 0.077982
| 0.077982
| 0
| 0
| 0
| 0.036352
| 0.252265
| 2,097
| 77
| 164
| 27.233766
| 0.797832
| 0.532189
| 0
| 0
| 0
| 0
| 0.025532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0a07ddea1fef76e6d524883928e67b18be43ea3
| 2,695
|
py
|
Python
|
tornado/models/Session.py
|
maqg/wcrobot
|
7d026c1a34362c5434105c27c5bd25f08c6fabe2
|
[
"MIT"
] | null | null | null |
tornado/models/Session.py
|
maqg/wcrobot
|
7d026c1a34362c5434105c27c5bd25f08c6fabe2
|
[
"MIT"
] | null | null | null |
tornado/models/Session.py
|
maqg/wcrobot
|
7d026c1a34362c5434105c27c5bd25f08c6fabe2
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from conf.dbconfig import TB_SESSION
from core.err_code import DB_ERR, OCT_SUCCESS
from core.log import WARNING,DEBUG
from utils.commonUtil import getUuid, transToStr, transToObj
from utils.timeUtil import get_current_time, getStrTime
SESSION_EXPIRE_TIME = 86400 * 30 * 1000 # one month
class Session:
db = None
username = ""
myId = 0
cookie = { }
createTime = 0
expireTime = 0
dbObj = None
def __init__(self, db=None, myId=None, dbObj=None):
self.db = db
self.myId = myId
self.userId = ""
self.username = ""
self.role = 3
self.dbObj = dbObj
if (self.dbObj):
self.loadFromObj()
def init(self):
cond = "WHERE ID='%s' AND S_ExpireTime > %ld " % (self.myId, get_current_time())
dbObj = self.db.fetchone(TB_SESSION, cond)
if (not dbObj):
return -1
self.dbObj = dbObj
self.loadFromObj()
return 0
def add(self):
self.myId = getUuid()
self.createTime = get_current_time()
self.expireTime = get_current_time() + SESSION_EXPIRE_TIME
obj = {
"ID": self.myId,
"S_UserId": self.userId,
"S_UserName": self.username,
"S_UserType": self.role,
"S_Cookie": transToStr(self.cookie),
"S_CreateTime": self.createTime,
"S_ExpireTime": self.expireTime,
}
ret = self.db.insert(TB_SESSION, obj)
if ret == -1:
WARNING("add session %s error for db operation" % self.myId)
return DB_ERR
return OCT_SUCCESS
def delete(self):
cond = "WHERE ID='%s'" % self.myId
DEBUG("to delete session %s" % (self.myId))
ret = self.db.delete(TB_SESSION, cond=cond)
if ret == -1:
WARNING("delete session %s error for db operation" % self.myId)
return DB_ERR
return 0
def update(self):
obj = {
"S_ExpireTime": get_current_time() + SESSION_EXPIRE_TIME,
}
cond = "WHERE ID='%s'" % self.myId
ret = self.db.update(TB_SESSION, obj, cond=cond)
if ret == -1:
WARNING("update session %s error for db operation" % self.myId)
return DB_ERR
return 0
def loadFromObj(self):
self.myId = self.dbObj["ID"]
self.username = self.dbObj["S_UserName"]
self.role = self.dbObj["S_UserType"]
self.userId = self.dbObj["S_UserId"]
self.cookie = transToObj(self.dbObj["S_Cookie"])
self.createTime = self.dbObj["S_CreateTime"]
self.expireTime = self.dbObj["S_ExpireTime"]
return 0
def toObj(self):
return {
"id": self.myId,
"user": self.username,
"userId": self.userId,
"userRole":self.role,
"cookie": self.cookie,
"creatTime": getStrTime(self.createTime),
"expireTime": getStrTime(self.expireTime)
}
| 21.733871
| 83
| 0.640445
| 364
| 2,695
| 4.615385
| 0.217033
| 0.057143
| 0.035714
| 0.021429
| 0.223214
| 0.21131
| 0.146429
| 0.097619
| 0.097619
| 0.097619
| 0
| 0.011527
| 0.227458
| 2,695
| 123
| 84
| 21.910569
| 0.795389
| 0.017811
| 0
| 0.209302
| 0
| 0
| 0.151131
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081395
| false
| 0
| 0.05814
| 0.011628
| 0.348837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0a17272de31b01f9feb7f36322ad30ad949bc47
| 1,379
|
py
|
Python
|
python/equalSubstring.py
|
l0latgithub/codediary
|
a0327d2ee1137a542886d0af85129692711cd68a
|
[
"MIT"
] | null | null | null |
python/equalSubstring.py
|
l0latgithub/codediary
|
a0327d2ee1137a542886d0af85129692711cd68a
|
[
"MIT"
] | null | null | null |
python/equalSubstring.py
|
l0latgithub/codediary
|
a0327d2ee1137a542886d0af85129692711cd68a
|
[
"MIT"
] | null | null | null |
class Solution:
def equalSubstring(self, s: str, t: str, maxCost: int) -> int:
"""
You are given two strings s and t of the same length.
You want to change s to t. Changing the i-th character
of s to i-th character of t costs |s[i] - t[i]| that is,
the absolute difference between the ASCII values of the characters.
You are also given an integer maxCost.
Return the maximum length of a substring of s that
can be changed to be the same as the corresponding
substring of twith a cost less than or equal to maxCost.
If there is no substring from s that can be changed to
its corresponding substring from t, return 0.
"""
# runcost, lo, ans = 0, 0, 0
# for hi in range(len(s)):
# runcost += abs( ord(s[hi]) -ord(t[hi]) )
# while runcost>maxCost:
# runcost -= abs( ord(s[lo]) -ord(t[lo]) )
# lo+=1
# ans = max(ans, hi-lo+1)
# return ans
runcost, lo, ans = 0, 0, 0
for hi in range(len(s)):
runcost += abs( ord(s[hi]) -ord(t[hi]) )
if runcost>maxCost:
runcost -= abs( ord(s[lo]) -ord(t[lo]) )
lo+=1
return len(s)-lo
| 33.634146
| 75
| 0.503263
| 196
| 1,379
| 3.540816
| 0.372449
| 0.011527
| 0.074928
| 0.080692
| 0.319885
| 0.319885
| 0.26513
| 0.26513
| 0.26513
| 0.26513
| 0
| 0.012107
| 0.401015
| 1,379
| 41
| 76
| 33.634146
| 0.828087
| 0.602611
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0a5f7e6a00c493dc8a6dfec6c9d4ed52c54782b
| 1,165
|
py
|
Python
|
prog_vae/cfg_parser/parser_demo.py
|
Hanjun-Dai/sdvae
|
bd26ea949c496419634fd2cf4802fc8e19a9194c
|
[
"MIT"
] | 70
|
2018-02-24T07:50:59.000Z
|
2021-12-27T02:42:37.000Z
|
prog_vae/cfg_parser/parser_demo.py
|
Hanjun-Dai/sdvae
|
bd26ea949c496419634fd2cf4802fc8e19a9194c
|
[
"MIT"
] | 7
|
2018-05-31T00:50:19.000Z
|
2021-09-28T11:58:22.000Z
|
prog_vae/cfg_parser/parser_demo.py
|
Hanjun-Dai/sdvae
|
bd26ea949c496419634fd2cf4802fc8e19a9194c
|
[
"MIT"
] | 19
|
2019-01-11T10:56:00.000Z
|
2022-03-23T23:09:39.000Z
|
#!/usr/bin/env python2
import os
import nltk
import cfg_parser as parser
def main():
cfg_grammar_file = '../../dropbox/context_free_grammars/prog_leftskew.grammar'
grammar = parser.Grammar(cfg_grammar_file)
ts = parser.parse(
'v1=sin(v0);v2=v0*4;v3=v1/v2;v4=cos(v0);v5=v0*3;v6=sin(v1);v7=v3-v6;v8=v7+v5;v9=v8+v4;return:v9', grammar
)
t = ts[0]
print('(ugly) tree:')
print(t)
print()
print('for root:')
print(
'symbol is %s, is it non-terminal = %s, it\' value is %s (of type %s)' %
(t.symbol, isinstance(t, parser.Nonterminal), t.symbol.symbol(), type(t.symbol.symbol()))
)
print(
'rule is %s, its left side is %s (of type %s), its right side is %s, a tuple '
'which each element can be either str (for terminal) or Nonterminal (for nonterminal)' % (
t.rule,
t.rule.lhs(),
type(t.rule.lhs()),
t.rule.rhs(),
)
)
import pdb, traceback, sys, code
if __name__ == '__main__':
try:
main()
except:
type, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
| 24.270833
| 113
| 0.572532
| 173
| 1,165
| 3.745665
| 0.491329
| 0.023148
| 0.04321
| 0.027778
| 0.030864
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031765
| 0.270386
| 1,165
| 47
| 114
| 24.787234
| 0.730588
| 0.018026
| 0
| 0.057143
| 0
| 0.057143
| 0.334208
| 0.132108
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.114286
| 0
| 0.142857
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0a7fba1d79fe97862c63c08c78298201ee28e87
| 5,857
|
py
|
Python
|
cpf/python/training/auto_trade/action_suggester.py
|
nQuantums/tips
|
cdea2a0e778d2de14cd69882f6273c872c366f03
|
[
"MIT"
] | 1
|
2018-12-11T14:51:43.000Z
|
2018-12-11T14:51:43.000Z
|
cpf/python/training/auto_trade/action_suggester.py
|
nQuantums/tips
|
cdea2a0e778d2de14cd69882f6273c872c366f03
|
[
"MIT"
] | null | null | null |
cpf/python/training/auto_trade/action_suggester.py
|
nQuantums/tips
|
cdea2a0e778d2de14cd69882f6273c872c366f03
|
[
"MIT"
] | null | null | null |
import typing
import numpy as np
import trade_environment
def detect_turning_points(values: np.ndarray, gap: int) -> typing.Tuple[np.ndarray, np.ndarray]:
"""指定数列の折返しポイントの地点を検出する.
Args:
values: 数列.
gap: 折返し判定閾値、この値を超えて反転したら折返しと判断する.
Returns:
(折返しインデックス, 検出途中に生成した一定値以上距離を保って付いてくる値の数列) のタプル.
"""
indices = []
stalkers = np.empty((len(values),), dtype=np.int32)
last_value = int(values[0])
stalker = last_value
stalkers[0] = stalker
last_i = 0
for i in range(1, len(values)):
v = int(values[i])
up = last_value < stalker and stalker <= v
down = stalker < last_value and v <= stalker
if up or down:
delta_array = values[last_i:i + 1]
tpi = last_i + int(np.argmin(delta_array) if up else np.argmax(delta_array))
tpv = int(values[tpi])
indices.append(tpi)
last_i = i
stalker = tpv - gap if up else tpv + gap
# indices.append(i - 1)
# stalker = v - gap if up else v + gap
else:
d = v - stalker
if d < -gap:
stalker = v + gap
elif gap < d:
stalker = v - gap
stalkers[i] = stalker
last_value = v
return np.array(indices, dtype=np.int32), stalkers
class TpActionSuggester:
"""予め折り返し点を探索し、それを用いて指定環境での状態からお勧めアクションを提示するクラス.
"""
def __init__(self, env: trade_environment.TradeEnvironment, spread_adj: int = 1) -> None:
self.env = env # トレード用環境
self.threshould = int(np.rint(env.spread * spread_adj).item()) # エントリーするかどうか判断する閾値、現在値と折返し値の差がこの値以下ならエントリーしない
self.tp_indices = np.empty((0,), dtype=np.int32) # 折返し点のエピソード内インデックス一覧
self.tp_values = np.empty((0,), dtype=np.int32) # 折返し点の値一覧
def start_episode(self) -> None:
"""トレード用環境のエピソード開始直後に呼び出す必要がある."""
values = self.env.episode_values
c = values[:, 3]
self.tp_indices, _ = detect_turning_points(c, self.threshould)
self.tp_values = c[self.tp_indices]
def get_next_turning_index(self) -> int:
"""次の折返しインデックスの取得."""
i1 = np.where(self.env.index_in_episode <= self.tp_indices)[0][:1]
return i1.item() if i1.size else -1
def get_suggested_action(self) -> int:
"""現状の状態でのお勧めアクションの取得."""
tp_indices = self.tp_indices # 折返し点インデックス列
tp_values = self.tp_values # 折り返し点値列
value = self.env.get_value() # 現在値
tp_idx = self.get_next_turning_index() # 未来の直近折り返し点インデックス
tp_delta = None # 現在値から次の折返し点の値への差
on_tp = False # 現在折り返し点上かどうか
if 0 <= tp_idx:
# 現在が丁度折り返し点なら次の折返し点が目標となる
if tp_indices[tp_idx] == self.env.index_in_episode:
on_tp = True
tp_idx += 1
# まだこれから折り返し点があるなら現在値との差分を計算
if tp_idx < len(tp_values):
tp_delta = tp_values[tp_idx] - value
threshould = self.threshould
suggested_action = 0 # 基本何もしない
if self.env.position_type == 0:
# ポジション持っておらず、次の折返し値との差が閾値より大きいなら売買する
if tp_delta is not None:
if threshould < tp_delta:
suggested_action = 1
elif tp_delta < -threshould:
suggested_action = 2
else:
# 既にポジション持っている際の処理
if on_tp:
# 現在が折り返し点上の場合は次の折返しに備える
suggested_action = 3
if tp_delta is not None:
if threshould < tp_delta:
suggested_action = 1
elif tp_delta < -threshould:
suggested_action = 2
else:
# 現在が折り返し点間の場合は必要に応じてポジションを調整する
suggested_action = 0
if tp_delta is not None:
if threshould < tp_delta and self.env.position_type != 1:
suggested_action = 1
elif tp_delta < -threshould and self.env.position_type != -1:
suggested_action = 2
# elif tp_delta * self.env.position_type < 0:
# suggested_action = 3
return suggested_action
class TpRewardAdjuster:
"""TpActionSuggester 用の指定アクションから報酬調整処理を行うクラス.
Args:
action_suggester: お勧めアクション提示オブジェクト.
adj_rate: 想定される損益から報酬調整量に換算する係数.
loss_cut_check: 適切に損切りできているかチェックを行うかどうか.
securing_profit_check: 適切に利確できているかチェックを行うかどうか.
"""
def __init__(self,
action_suggester: TpActionSuggester,
adj_rate: float = 0.01,
loss_cut_check: bool = False,
securing_profit_check: bool = False):
self.action_suggester = action_suggester
self.adj_rate = adj_rate
self.loss_cut_check = loss_cut_check
self.securing_profit_check = securing_profit_check
self.env = action_suggester.env
self.threshould = action_suggester.threshould
def adjust_reward(self, action: int) -> float:
"""現状の状態で指定のアクションを行った際の報酬調整料の取得."""
tp_indices = self.action_suggester.tp_indices # 折返し点インデックス列
tp_values = self.action_suggester.tp_values # 折り返し点値列
value = self.env.get_value() # 現在値
tp_idx = self.action_suggester.get_next_turning_index() # 未来の直近折り返し点インデックス
tp_delta = None # 現在値から次の折返し点の値への差
on_tp = False # 現在折り返し点上かどうか
if 0 <= tp_idx:
# 現在が丁度折り返し点なら次の折返し点が目標となる
if tp_indices[tp_idx] == self.env.index_in_episode:
on_tp = True
tp_idx += 1
# まだこれから折り返し点があるなら現在値との差分を計算
if tp_idx < len(tp_values):
tp_delta = tp_values[tp_idx] - value
reward = 0.0
# 現状のポジションから行っても無視されるアクションを排除
if self.env.is_action_ignored(action):
action = 0
if 1 <= action and action <= 3 and self.env.position_type != 0:
# 決済するなら残りの損益から報酬を調整する
if tp_delta is not None and not on_tp:
reward -= self.adj_rate * self.env.position_type * tp_delta
if action == 0:
if tp_delta is not None:
if self.env.position_type == 0:
# チャンスがある状態で何もしていないなら報酬を減衰させる
if self.threshould < abs(tp_delta):
reward -= self.adj_rate * abs(tp_delta)
else:
pr = self.env.calc_positional_reward()
miss_position = tp_delta * self.env.position_type < 0
# 間違ったポジションなら報酬を減衰させ続ける
if self.loss_cut_check and miss_position and pr < 0:
reward += pr * self.adj_rate
# 正しいポジションなら利確すべきタイミングを逃した瞬間に報酬を減衰させる
if self.securing_profit_check and on_tp and miss_position and 0 < pr:
reward -= pr * self.adj_rate
elif action == 1 or action == 2:
# 売買の方向と次の折り返し点への差分から報酬を調整する
if tp_delta is not None:
reward += self.adj_rate * (1.0 if action == 1 else -1.0) * tp_delta
return reward
| 30.035897
| 111
| 0.701383
| 785
| 5,857
| 5.008917
| 0.185987
| 0.039166
| 0.030519
| 0.038657
| 0.315107
| 0.294252
| 0.247965
| 0.228637
| 0.209308
| 0.200661
| 0
| 0.013444
| 0.199932
| 5,857
| 194
| 112
| 30.190722
| 0.825651
| 0.21803
| 0
| 0.320313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054688
| false
| 0
| 0.023438
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0a9f0bcb8836537939df03b6e758465264c1d2b
| 330
|
py
|
Python
|
mesyparams.py
|
jkrueger1/stm_ethernet
|
f9fc87222a6a12f64d033c79a3858e16405ea524
|
[
"Apache-2.0"
] | 2
|
2019-08-02T10:00:46.000Z
|
2020-07-27T02:25:23.000Z
|
mesyparams.py
|
jkrueger1/stm_ethernet
|
f9fc87222a6a12f64d033c79a3858e16405ea524
|
[
"Apache-2.0"
] | null | null | null |
mesyparams.py
|
jkrueger1/stm_ethernet
|
f9fc87222a6a12f64d033c79a3858e16405ea524
|
[
"Apache-2.0"
] | 1
|
2019-12-21T11:40:30.000Z
|
2019-12-21T11:40:30.000Z
|
import sys
import socket
from mesylib import send_cmd
try:
addr = sys.argv[1]
rate = int(sys.argv[2])
except (ValueError, IndexError):
print('usage: mesyparams.py ipaddr rate')
sys.exit(1)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
send_cmd(sock, addr, 0xF1F0, 'IH', rate, 0)
print('configure ok')
| 20.625
| 55
| 0.70303
| 51
| 330
| 4.470588
| 0.627451
| 0.061404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025362
| 0.163636
| 330
| 15
| 56
| 22
| 0.800725
| 0
| 0
| 0
| 0
| 0
| 0.139394
| 0
| 0
| 0
| 0.018182
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0aa64db3850f67c0060437e85aa0d6cf6487d84
| 11,751
|
py
|
Python
|
Data/Stocks/Stock.py
|
yangtx2009/finance
|
5ba428f0495a16de89ea852e04e71bde8a00f9ba
|
[
"MIT"
] | null | null | null |
Data/Stocks/Stock.py
|
yangtx2009/finance
|
5ba428f0495a16de89ea852e04e71bde8a00f9ba
|
[
"MIT"
] | null | null | null |
Data/Stocks/Stock.py
|
yangtx2009/finance
|
5ba428f0495a16de89ea852e04e71bde8a00f9ba
|
[
"MIT"
] | null | null | null |
# from bin.x64.iFinDPy import *
import urllib.request
import json
from abc import ABC
import matplotlib.pyplot as plt
import pandas as pd
import os
from bs4 import BeautifulSoup
import requests
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from datetime import datetime
import time
import itertools
import copy
from sklearn import preprocessing
import random
import tqdm
from Data.Stocks.Loader import LoadFinishCondition, LoadThread
from Data.SqlClient import DatabaseClient
pd.set_option('display.max_columns', 20)
class Stock(ABC):
# https://www.jianshu.com/p/2f45fcb44771
def __init__(self):
super(Stock, self).__init__()
self.localDir = os.path.dirname(os.path.realpath(__file__))
self.collection = {}
self.selected_data = None
self.client = DatabaseClient()
self.m_industryList = pd.DataFrame(columns=["industry", "link"])
self.loadIndustryListFromDB()
self.m_stockList = pd.DataFrame(columns=["industry", "name", "symbol"])
self.loadStockListFromDB()
self.loadStocks()
def loadIndustryListFromDB(self):
df = self.client.readTableNames()
if "industry" in df.values:
self.m_industryList = self.client.readTable("industry")
print("industries\n", self.m_industryList)
else:
self.readIndustryList()
self.client.createTable("industry", ["industry VARCHAR(255)", "link VARCHAR(255)"], "industry")
self.client.storeData("industry", self.m_industryList, "append")
self.client.showTable("industry")
def loadStockListFromDB(self):
df = self.client.readTableNames()
if "stock" in df.values:
print("Stock already exists in database")
self.m_stockList = self.client.readTable("stock")
print("stocks\n", self.m_stockList)
else:
print("Cannot find 'stock' in database. Creating it ...")
self.client.createTable("stock", ["industry VARCHAR(255)", "name VARCHAR(255)", "symbol VARCHAR(255)"],
"symbol")
self.readStockList()
self.m_stockList = self.m_stockList.drop_duplicates(subset=['symbol'])
self.client.storeData("stock", self.m_stockList, "append")
self.client.showTable("stock")
def readHSIndex(self, p_draw=False):
url = "http://img1.money.126.net/data/hs/kline/day/times/1399001.json"
with urllib.request.urlopen(url) as url_file:
l_jsonData = json.loads(url_file.read().decode())
self.m_hsIndexTotal = pd.DataFrame(data={"closes": l_jsonData["closes"], "times": l_jsonData["times"]})
print("hsIndex total", self.m_hsIndexTotal.head(5))
url = "http://img1.money.126.net/data/hs/time/today/1399001.json"
with urllib.request.urlopen(url) as url_file:
l_jsonData = json.loads(url_file.read().decode())
print(l_jsonData.keys())
# self.m_hsIndexToday = pd.DataFrame(data={"data": data["closes"], "times": data["times"]})
# print("hsIndex today", self.m_hsIndexToday.head(5))
if p_draw:
self.m_hsIndexTotal.plot(x="times", y="closes", title="HS index", figsize=(10, 4))
plt.title("HS index", fontproperties='SimHei', fontsize='large')
plt.show()
def readIndustryList(self):
print("Reading industry list ...")
url = "http://stock.eastmoney.com/hangye.html"
r = requests.get(url)
r.encoding = 'utf-8'
soup = BeautifulSoup(r.text, 'html.parser')
hangye_div = soup.find('div', {'class': 'hot-hy-list'})
children = hangye_div.findChildren("a", recursive=True)
for child in children:
original_link = child.get("href")
code = int(original_link.split(".")[0].split("hy")[1])
link = "http://quote.eastmoney.com/center/boardlist.html#boards-BK{:04d}1".format(code)
self.m_industryList = self.m_industryList.append({"industry": child.get("title"), "link": link},
ignore_index=True)
print("Created new industry list")
# self.m_industryList.to_csv("IndustryList.csv")
# print(self.m_industryList["industry"], "\n")
def xpath_soup(self, element):
"""
Generate xpath of soup element
:param element: bs4 text or node
:return: xpath as string
"""
components = []
child = element if element.name else element.parent
for parent in child.parents:
"""
@type parent: bs4.element.Tag
"""
previous = itertools.islice(parent.children, 0, parent.contents.index(child))
xpath_tag = child.name
xpath_index = sum(1 for i in previous if i.name == xpath_tag) + 1
components.append(xpath_tag if xpath_index == 1 else '%s[%d]' % (xpath_tag, xpath_index))
child = parent
components.reverse()
return '/%s' % '/'.join(components)
def readStockList(self):
"""
use selenium to wait for javascript in webpage loading data
:return:
"""
print("Reading stock list ...")
startTime = time.time()
fireFoxOptions = webdriver.FirefoxOptions()
fireFoxOptions.add_argument("--headless")
fireFoxOptions.add_argument('--disable-gpu')
fireFoxOptions.add_argument('--no-sandbox')
browser = webdriver.Firefox(firefox_options=fireFoxOptions, executable_path=r"geckodriver.exe")
for index, row in tqdm.tqdm(self.m_industryList.iterrows()):
print("{}/{}: Getting {} information ({})".format(index, len(self.m_industryList), row["industry"],
row['link']))
industry_url = row['link']
browser.get(industry_url)
# time.sleep(5)
WebDriverWait(browser, timeout=10).until(LoadFinishCondition()) # , poll_frequency=5
html = browser.page_source
soup = BeautifulSoup(html, 'html.parser')
while True:
next_button_soup = None
self.findStocks(soup, row["industry"])
next_button_soup = soup.find("a", {"class", "next paginate_button"})
if next_button_soup:
xpath = self.xpath_soup(next_button_soup)
next_button = browser.find_element_by_xpath(xpath)
if next_button:
next_button.click()
print("To next page")
WebDriverWait(browser, timeout=10).until(LoadFinishCondition())
html = browser.page_source
soup = BeautifulSoup(html, 'html.parser')
else:
print("Cannot find button component!")
break
else:
print("Cannot find next page button!")
break
self.m_stockList.to_csv("StockList.csv")
browser.quit()
print("Created new industry list")
# self.m_stockList.to_csv("StockList.csv")
print(self.m_stockList.head(5))
timeElapsed = (time.time() - startTime)
print("The loading of stock list takes {} seconds".format(timeElapsed))
def findStocks(self, soup, key):
table = soup.find('table', {'id': 'table_wrapper-table'})
stocks = table.findChild("tbody", recursive=True).findChildren("tr", recursive=True)
for stock in stocks:
values = stock.findChildren("td", recursive=True)
temp = {"industry": key}
for idx, value in enumerate(values):
if idx == 1:
temp["symbol"] = value.string
elif idx == 2:
temp["name"] = value.string
# print("adding stock:", temp)
self.m_stockList = self.m_stockList.append(temp, ignore_index=True)
def correctTimes(self):
industries = self.m_stockList.groupby("industry")
for name, industry in industries:
filename = os.path.join("industries", "{}.csv".format(name))
data = pd.read_csv(filename)
data["times"] = ["{}.{}.{}".format(str(t)[:4], str(t)[4:6], str(t)[6:8]) for t in data["times"].tolist()]
data.to_csv(filename)
print(data.head(10))
def chunkIt(self, seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def loadStocks(self, threadNum=30):
industries = self.m_stockList.groupby("industry")
if not os.path.exists("industries"):
os.makedirs("industries")
industryNames = list(industries.groups.keys())
temp = industryNames
for industryName in temp:
path = os.path.join("industries", "{}.csv".format(industryName))
if os.path.exists(path):
industryNames.remove(industryName)
self.collection[industryName] = pd.read_csv(path)
if len(industryNames) > 0:
grouped = self.chunkIt(industryNames, threadNum)
threads = list()
for n in range(threadNum):
threads.append(LoadThread(n, self, grouped[n]))
threads[n].start()
print("Waiting for reading stocks ...")
for n in range(threadNum):
threads[n].join()
else:
print("Already read all stocks ...")
def calculateIndustryPerformance(self, showRows=100):
print("Calculating industry performance ...")
industries = self.m_stockList.groupby("industry")
if os.path.exists(os.path.join(self.localDir, "joined.csv")):
joined = pd.read_csv(os.path.join(self.localDir, "joined.csv"))
else:
joined = None
for idx, (name, data) in enumerate(self.collection.items()):
averaged_industry = pd.DataFrame(columns=["times", name])
averaged_industry["times"] = data["times"].tolist()
data = data.fillna(0)
temp = copy.deepcopy(data).drop("times", axis=1)
nonZeroNum = temp.gt(0).sum(axis=1)
if name == "珠宝首饰":
print("珠宝首饰", nonZeroNum)
temp = temp.sum(axis=1) / nonZeroNum
averaged_industry[name] = temp
if joined is None:
joined = averaged_industry
else:
joined = pd.merge(joined, averaged_industry, on="times", how='outer')
joined = joined.sort_values(by="times")
joined.to_csv(os.path.join(self.localDir, "joined.csv"), index=False)
self.selected_data = joined.tail(showRows)
def getRandomStock(self):
industries = self.m_stockList.groupby("industry")
industryNames = list(industries.groups.keys())
industryName = random.sample(industryNames, 1)[0]
filename = os.path.join(self.localDir, "industries", "{}.csv".format(industryName))
if os.path.exists(filename):
data = pd.read_csv(filename)
titles = list(data.columns)
titles.remove("times")
return data[["times", titles[0]]]
else:
print("Cannot find {} in industries directory".format(filename))
return None
if __name__ == '__main__':
stock = Stock()
| 40.6609
| 117
| 0.580631
| 1,285
| 11,751
| 5.214786
| 0.255253
| 0.022385
| 0.031339
| 0.011342
| 0.187882
| 0.167288
| 0.119684
| 0.07193
| 0.039994
| 0.024474
| 0
| 0.011903
| 0.29223
| 11,751
| 288
| 118
| 40.802083
| 0.793796
| 0.047996
| 0
| 0.150442
| 0
| 0.013274
| 0.133762
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057522
| false
| 0
| 0.084071
| 0
| 0.163717
| 0.097345
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0ad7ea0b53d5e610e64b17510e844460d79cd69
| 947
|
py
|
Python
|
daily_learn/basic_pdf.py
|
thc128/limudScripts
|
4d44f82b00b9cd7bca58b56cbc0fba08914c89a1
|
[
"MIT"
] | null | null | null |
daily_learn/basic_pdf.py
|
thc128/limudScripts
|
4d44f82b00b9cd7bca58b56cbc0fba08914c89a1
|
[
"MIT"
] | null | null | null |
daily_learn/basic_pdf.py
|
thc128/limudScripts
|
4d44f82b00b9cd7bca58b56cbc0fba08914c89a1
|
[
"MIT"
] | null | null | null |
from PyPDF2 import PdfFileWriter, PdfFileReader
import daily_learn
def read_book(path):
input1 = PdfFileReader(open(path, "rb"))
my_book=[]
for i in xrange(input1.getNumPages()):
my_book.append(input1.getPage(i))
return my_book
def write_files(output_file,folder,name):
j=0
output = []
for wok in output_file:
output.append(PdfFileWriter())
for day in wok:
for page in day:
output[j].addPage(page)
outputStream = file(folder + "\\week_" + str(j) + "_" + name, "wb")
output[j].write(outputStream)
j = j + 1
book1=read_book("korban.pdf")
book2=read_book("har_habait.pdf")
book3=read_book("RIF.pdf")
my_book1=daily_learn.Mybook(book1,2,5,17)
my_book2=daily_learn.Mybook(book2,1,5,0,2)
my_book3=daily_learn.Mybook(book3,2,7,205)
my_file = daily_learn.books_into_weeks([my_book1,my_book2,my_book3],4)
write_files(my_file,'splitted','torah.pdf')
| 30.548387
| 75
| 0.674762
| 144
| 947
| 4.236111
| 0.416667
| 0.081967
| 0.078689
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041505
| 0.18585
| 947
| 30
| 76
| 31.566667
| 0.749676
| 0
| 0
| 0
| 0
| 0
| 0.063358
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.074074
| 0
| 0.185185
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0ad9a13b69f0d5d58e42c1013c83345a35544ed
| 592
|
py
|
Python
|
firmware/polyfit.py
|
mfkiwl/OpenXcvr
|
9bea6efd03cd246f16982f0fadafed684ac5ce1c
|
[
"MIT"
] | 14
|
2020-02-16T15:36:31.000Z
|
2022-03-27T02:24:40.000Z
|
firmware/polyfit.py
|
mfkiwl/OpenXcvr
|
9bea6efd03cd246f16982f0fadafed684ac5ce1c
|
[
"MIT"
] | 1
|
2020-11-23T16:16:33.000Z
|
2020-11-23T16:16:33.000Z
|
firmware/polyfit.py
|
mfkiwl/OpenXcvr
|
9bea6efd03cd246f16982f0fadafed684ac5ce1c
|
[
"MIT"
] | 4
|
2021-03-29T16:55:03.000Z
|
2022-01-23T16:43:59.000Z
|
import numpy as np
from matplotlib import pyplot as plt
def function_to_approximate(x):
return 128*(np.sin((np.pi*x/256)-(0.5*np.pi))+1)
def calculate_poly(z, x):
return np.round(z[3]+x*(z[2]+x*(z[1]+z[0]*x)))
def quantize(z, fraction_bits):
q = 2.0**fraction_bits
z = z * q
z = np.round(z)
z /= q
return z
a = np.arange(256)
z = np.polyfit(a, function_to_approximate(a), 4)
p = np.poly1d(z)
z = quantize(z, 18)
q = np.poly1d(z)
print(z * 2**18)
plt.plot(function_to_approximate(a))
plt.plot(p(a))
plt.plot(q(a))
plt.plot(calculate_poly(z, a))
plt.show()
| 17.939394
| 52
| 0.633446
| 119
| 592
| 3.067227
| 0.344538
| 0.076712
| 0.172603
| 0.120548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.053061
| 0.172297
| 592
| 32
| 53
| 18.5
| 0.691837
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0
| 0.086957
| 0.086957
| 0.347826
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0af71c36645d20ea3374d900810326b1c95631d
| 11,718
|
py
|
Python
|
apps/integration_tests/selenium_cases.py
|
CMSgov/bluebutton-web-server
|
3e7bfb049a2b6bd64fdc4eeae7512b461ccbe682
|
[
"Apache-2.0"
] | 25
|
2017-12-10T00:48:31.000Z
|
2022-03-25T01:29:13.000Z
|
apps/integration_tests/selenium_cases.py
|
CMSgov/bluebutton-web-server
|
3e7bfb049a2b6bd64fdc4eeae7512b461ccbe682
|
[
"Apache-2.0"
] | 298
|
2017-12-05T05:53:32.000Z
|
2022-03-21T19:29:03.000Z
|
apps/integration_tests/selenium_cases.py
|
CMSgov/bluebutton-web-server
|
3e7bfb049a2b6bd64fdc4eeae7512b461ccbe682
|
[
"Apache-2.0"
] | 31
|
2017-12-04T16:01:12.000Z
|
2021-09-26T22:34:55.000Z
|
from django.conf import settings
from enum import Enum
from selenium.webdriver.common.by import By
class Action(Enum):
LOAD_PAGE = 1
FIND_CLICK = 2
FIND = 3
FIND_SEND_KEY = 4
CHECK = 5
BACK = 6
LOGIN = 7
CONTAIN_TEXT = 8
GET_SAMPLE_TOKEN_START = 9
SLEEP = 10
TESTCLIENT_BUNDLE_LABEL_FMT = "Response (Bundle of {}), API version: {}"
TESTCLIENT_RESOURCE_LABEL_FMT = "Response ({}), API version: {}"
MESSAGE_NO_PERMISSION = "You do not have permission to perform this action."
TESTCASE_BANNER_FMT = "** {} TEST: {}, API: {}, STEP: {}, {}"
'''
UI Widget text: texts on e.g. buttons, links, labels etc.
'''
LNK_TXT_TESTCLIENT = "Test Client"
LNK_TXT_GET_TOKEN_V1 = "Get a Sample Authorization Token"
LNK_TXT_GET_TOKEN_V2 = "Get a Sample Authorization Token for v2"
LNK_TXT_AUTH_AS_BENE = "Authorize as a Beneficiary"
LNK_TXT_RESTART_TESTCLIENT = "restart testclient"
# FHIR search result bundle pagination
LNK_TXT_NAV_FIRST = "first"
LNK_TXT_NAV_NEXT = "next"
LNK_TXT_NAV_PREV = "previous"
LNK_TXT_NAV_LAST = "last"
LNK_TXT_NAV_SELF = "self"
# FHIR resources query page
LNK_TXT_PATIENT = "Patient"
LNK_TXT_EOB = "ExplanationOfBenefit"
LNK_TXT_COVERAGE = "Coverage"
LNK_TXT_PROFILE = "Profile"
LNK_TXT_METADATA = "FHIR Metadata"
LNK_TXT_OIDC_DISCOVERY = "OIDC Discovery"
# FHIR result page label H2
LAB_FHIR_RESULTPAGE_H2 = "h2"
CONTENT_FHIR_RESULTPAGE_PRE = "pre"
# MSLSX login form
TXT_FLD_SUB_MSLSX = "username"
TXT_FLD_HICN_MSLSX = "hicn"
TXT_FLD_MBI_MSLSX = "mbi"
TXT_FLD_VAL_SUB_MSLSX = "fred"
MSLSX_TXT_FLD_HICN_VAL = "1000044680"
MSLSX_TXT_FLD_MBI_VAL = "2SW4N00AA00"
MSLSX_CSS_BUTTON = "button"
# SLSX login form
SLSX_TXT_FLD_USERNAME = "username-textbox"
SLSX_TXT_FLD_PASSWORD = "password-textbox"
SLSX_TXT_FLD_USERNAME_VAL = "BBUser00000"
SLSX_TXT_FLD_PASSWORD_VAL = "PW00000!"
SLSX_CSS_BUTTON = "login-button"
# Demographic info access grant form
BTN_ID_GRANT_DEMO_ACCESS = "approve"
BTN_ID_DENY_DEMO_ACCESS = "deny"
BTN_ID_RADIO_NOT_SHARE = "label:nth-child(5)"
# API versions
API_V2 = "v2"
API_V1 = "v1"
BROWSERBACK = {
"display": "Back to FHIR resource page",
"action": Action.BACK,
}
WAIT_SECONDS = {
"display": "Sleep seconds...",
"action": Action.SLEEP,
"params": [3],
}
CHECK_TESTCLIENT_START_PAGE = {
"display": "Check it's on 'Test Client' start page",
"action": Action.FIND,
"params": [30, By.LINK_TEXT, LNK_TXT_GET_TOKEN_V1]
}
CLICK_TESTCLIENT = {
"display": "Click link 'Test Client'",
"action": Action.FIND_CLICK,
"params": [30, By.LINK_TEXT, LNK_TXT_TESTCLIENT]
}
CLICK_RADIO_NOT_SHARE = {
"display": "Click 'Share healthcare data, but not your personal info' on DEMO info grant form",
"action": Action.FIND_CLICK,
"params": [20, By.CSS_SELECTOR, BTN_ID_RADIO_NOT_SHARE]
}
CLICK_AGREE_ACCESS = {
"display": "Click 'Agree' on DEMO info grant form",
"action": Action.FIND_CLICK,
"params": [20, By.ID, BTN_ID_GRANT_DEMO_ACCESS]
}
CLICK_DENY_ACCESS = {
"display": "Click 'Deny' on DEMO info grant form",
"action": Action.FIND_CLICK,
"params": [20, By.ID, BTN_ID_DENY_DEMO_ACCESS]
}
CALL_LOGIN = {
"display": "Start login ...",
"action": Action.LOGIN,
}
SEQ_LOGIN_MSLSX = [
{
"display": "Input SUB(username)",
"action": Action.FIND_SEND_KEY,
"params": [20, By.NAME, TXT_FLD_SUB_MSLSX, TXT_FLD_VAL_SUB_MSLSX]
},
{
"display": "Input hicn",
"action": Action.FIND_SEND_KEY,
"params": [20, By.NAME, TXT_FLD_HICN_MSLSX, MSLSX_TXT_FLD_HICN_VAL]
},
{
"display": "Input mbi",
"action": Action.FIND_SEND_KEY,
"params": [20, By.NAME, TXT_FLD_MBI_MSLSX, MSLSX_TXT_FLD_MBI_VAL]
},
{
"display": "Click 'submit' on MSLSX login form",
"action": Action.FIND_CLICK,
"params": [20, By.CSS_SELECTOR, MSLSX_CSS_BUTTON]
},
]
SEQ_LOGIN_SLSX = [
{
"display": "Medicare.gov login username",
"action": Action.FIND_SEND_KEY,
"params": [20, By.ID, SLSX_TXT_FLD_USERNAME, SLSX_TXT_FLD_USERNAME_VAL]
},
{
"display": "Medicare.gov login password",
"action": Action.FIND_SEND_KEY,
"params": [20, By.ID, SLSX_TXT_FLD_PASSWORD, SLSX_TXT_FLD_PASSWORD_VAL]
},
{
"display": "Click 'submit' on SLSX login form",
"action": Action.FIND_CLICK,
"params": [20, By.ID, SLSX_CSS_BUTTON]
},
]
SEQ_AUTHORIZE_START = [
{
"display": "Load BB2 Landing Page ...",
"action": Action.LOAD_PAGE,
"params": [settings.HOSTNAME_URL]
},
CLICK_TESTCLIENT,
{
"display": "Click link to get sample token v1/v2",
"action": Action.GET_SAMPLE_TOKEN_START,
},
{
"display": "Click link 'Authorize as a Beneficiary' - start authorization",
"action": Action.FIND_CLICK,
"params": [30, By.LINK_TEXT, LNK_TXT_AUTH_AS_BENE]
},
]
SEQ_QUERY_FHIR_RESOURCES = [
{
"display": "Click 'Patient' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_PATIENT]
},
{
"display": "Check Patient result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_RESOURCE_LABEL_FMT, LNK_TXT_PATIENT]
},
BROWSERBACK,
{
"display": "Click 'Coverage' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_COVERAGE]
},
{
"display": "Check Coverage result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_BUNDLE_LABEL_FMT, LNK_TXT_COVERAGE]
},
{
"display": "Check and click Coverage result page navigation links 'last'",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_NAV_LAST]
},
CLICK_TESTCLIENT,
{
"display": "Click 'ExplanationOfBenefit' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_EOB]
},
{
"display": "Check ExplanationOfBenefit result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_BUNDLE_LABEL_FMT, LNK_TXT_EOB]
},
{
"display": "Check and click ExplanationOfBenefit result page navigation links 'last'",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_NAV_LAST]
},
WAIT_SECONDS,
CLICK_TESTCLIENT,
WAIT_SECONDS,
{
"display": "Click 'Profile' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_PROFILE]
},
WAIT_SECONDS,
{
"display": "Check Profile result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_RESOURCE_LABEL_FMT,
"{} (OIDC Userinfo)".format(LNK_TXT_PROFILE)]
},
BROWSERBACK,
{
"display": "Click 'FHIR Metadata' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_METADATA]
},
{
"display": "Check FHIR Metadata result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_RESOURCE_LABEL_FMT, LNK_TXT_METADATA]
},
BROWSERBACK,
{
"display": "Click 'OIDC Discovery' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_OIDC_DISCOVERY]
},
{
"display": "Check OIDC Discovery result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_RESOURCE_LABEL_FMT, LNK_TXT_OIDC_DISCOVERY]
},
BROWSERBACK,
]
SEQ_QUERY_FHIR_RESOURCES_NO_DEMO = [
{
"display": "Click 'Patient' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_PATIENT]
},
{
"display": "Check Patient result page content (<pre>) expect no permission message",
"action": Action.CONTAIN_TEXT,
"params": [20, By.TAG_NAME, CONTENT_FHIR_RESULTPAGE_PRE, MESSAGE_NO_PERMISSION]
},
BROWSERBACK,
{
"display": "Click 'Coverage' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_COVERAGE]
},
{
"display": "Check Coverage result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_BUNDLE_LABEL_FMT, LNK_TXT_COVERAGE]
},
{
"display": "Check and click Coverage result page navigation links 'last'",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_NAV_LAST]
},
CLICK_TESTCLIENT,
{
"display": "Click 'ExplanationOfBenefit' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_EOB]
},
{
"display": "Check ExplanationOfBenefit result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_BUNDLE_LABEL_FMT, LNK_TXT_EOB]
},
{
"display": "Check and click ExplanationOfBenefit result page navigation links 'last'",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_NAV_LAST]
},
WAIT_SECONDS,
CLICK_TESTCLIENT,
WAIT_SECONDS,
{
"display": "Click 'Profile' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_PROFILE]
},
WAIT_SECONDS,
{
"display": "Check Profile result page content (<pre>) expect no permission message",
"action": Action.CONTAIN_TEXT,
"params": [20, By.TAG_NAME, CONTENT_FHIR_RESULTPAGE_PRE, MESSAGE_NO_PERMISSION]
},
BROWSERBACK,
{
"display": "Click 'FHIR Metadata' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_METADATA]
},
{
"display": "Check FHIR Metadata result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_RESOURCE_LABEL_FMT, LNK_TXT_METADATA]
},
BROWSERBACK,
{
"display": "Click 'OIDC Discovery' on FHIR resources page",
"action": Action.FIND_CLICK,
"params": [20, By.LINK_TEXT, LNK_TXT_OIDC_DISCOVERY]
},
{
"display": "Check OIDC Discovery result page title",
"action": Action.CHECK,
"params": [20, By.TAG_NAME, LAB_FHIR_RESULTPAGE_H2, TESTCLIENT_RESOURCE_LABEL_FMT, LNK_TXT_OIDC_DISCOVERY]
},
BROWSERBACK,
]
TESTS = {
"auth_grant_fhir_calls": [
{"sequence": SEQ_AUTHORIZE_START},
CALL_LOGIN,
CLICK_AGREE_ACCESS,
{"sequence": SEQ_QUERY_FHIR_RESOURCES}
],
"auth_deny_fhir_calls": [
{"sequence": SEQ_AUTHORIZE_START},
CALL_LOGIN,
CLICK_DENY_ACCESS,
CHECK_TESTCLIENT_START_PAGE
],
"auth_grant_w_no_demo": [
{"sequence": SEQ_AUTHORIZE_START},
CALL_LOGIN,
CLICK_RADIO_NOT_SHARE,
CLICK_AGREE_ACCESS,
{"sequence": SEQ_QUERY_FHIR_RESOURCES_NO_DEMO}
]
}
| 32.55
| 115
| 0.628179
| 1,417
| 11,718
| 4.868031
| 0.12844
| 0.080023
| 0.055088
| 0.07002
| 0.695999
| 0.624529
| 0.613656
| 0.604523
| 0.592201
| 0.56915
| 0
| 0.016592
| 0.254224
| 11,718
| 359
| 116
| 32.640669
| 0.772743
| 0.014422
| 0
| 0.359517
| 0
| 0
| 0.299631
| 0.005847
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.012085
| 0.009063
| 0
| 0.042296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0b389c0a24e47982d3180a11aacaf35518b9f2f
| 2,202
|
py
|
Python
|
extraction/extract.py
|
ejuarezg/manga109-demos
|
667da247683d467047a4bd03c171cb885a27c858
|
[
"MIT"
] | 7
|
2021-03-17T04:26:27.000Z
|
2021-04-21T16:48:40.000Z
|
extraction/extract.py
|
ejuarezg/manga109-demos
|
667da247683d467047a4bd03c171cb885a27c858
|
[
"MIT"
] | 4
|
2021-03-17T06:23:44.000Z
|
2021-11-20T13:49:56.000Z
|
extraction/extract.py
|
ejuarezg/manga109-demos
|
667da247683d467047a4bd03c171cb885a27c858
|
[
"MIT"
] | 3
|
2021-03-17T05:50:40.000Z
|
2021-04-26T02:12:12.000Z
|
import manga109api
import argparse
import os
import glob
from PIL import Image
def args_parser():
"""
:return: This function returns the manual input of book, annotation_type, and page count.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--book', type=str, help='Name of book to annotate from.')
parser.add_argument('--annotation', type=str, help='Type of annotation: "body", "face", "frame", "text".')
parser.add_argument('--pages', type=int, default=1, help='Number of pages to annotate.')
parser.add_argument('--preprocess', action='store_true', help='Preprocess the extracted images to have a uniform size.')
parser.add_argument('--size', type=int, default=128, help='The uniform size if using preprocessing.')
args = parser.parse_args()
return args
if __name__ == "__main__":
ap = args_parser()
manga109_root_dir = "manga109extracted"
if not os.path.exists(manga109_root_dir):
os.makedirs(manga109_root_dir)
book = ap.book
page_count = ap.pages
file_count = [glob.glob(os.path.join(manga109_root_dir, '**', '*.*'), recursive=True)]
count = len(file_count[0])
for page_index in range(page_count):
tracker = 0
p = manga109api.Parser(root_dir="Manga109s_data")
annotation = p.get_annotation(book=book)
img = Image.open(p.img_path(book=book, index=page_index))
for annotation_type in [ap.annotation]:
rois = annotation["page"][page_index][annotation_type]
for roi in rois:
cropped = img.crop((roi["@xmin"], roi["@ymin"], roi["@xmax"], roi["@ymax"]))
image_x_dim, image_y_dim = cropped.size
if ap.preprocess:
cropped = cropped.resize((ap.size, ap.size), Image.ANTIALIAS)
if image_x_dim >= (ap.size / 2) and image_y_dim >= (ap.size / 2):
cropped.save("manga109extracted/%s_%d.jpg" % (ap.book, count))
count += 1
tracker += 1
print("Extracted %d %s images from page %d of %s's book." % (tracker, ap.annotation, page_index + 1, ap.book))
| 44.938776
| 125
| 0.616712
| 286
| 2,202
| 4.576923
| 0.356643
| 0.034377
| 0.064935
| 0.015279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022975
| 0.248865
| 2,202
| 48
| 126
| 45.875
| 0.76844
| 0.040418
| 0
| 0
| 0
| 0
| 0.196193
| 0.013177
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.125
| 0
| 0.175
| 0.025
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0b507ab20f27a7bafa822e29f3aeb4ce2fdbbe0
| 5,565
|
py
|
Python
|
libraries/botframework-streaming/botframework/streaming/payloads/header_serializer.py
|
andreikop/botbuilder-python
|
5e073e0c68fcbdc558133bdbd59a02453e597abe
|
[
"MIT"
] | 388
|
2019-05-07T15:53:21.000Z
|
2022-03-28T20:29:46.000Z
|
libraries/botframework-streaming/botframework/streaming/payloads/header_serializer.py
|
andreikop/botbuilder-python
|
5e073e0c68fcbdc558133bdbd59a02453e597abe
|
[
"MIT"
] | 1,286
|
2019-05-07T23:38:19.000Z
|
2022-03-31T10:44:16.000Z
|
libraries/botframework-streaming/botframework/streaming/payloads/header_serializer.py
|
andreikop/botbuilder-python
|
5e073e0c68fcbdc558133bdbd59a02453e597abe
|
[
"MIT"
] | 168
|
2019-05-14T20:23:25.000Z
|
2022-03-16T06:49:14.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from uuid import UUID
from typing import List
from botframework.streaming.transport import TransportConstants
from .models import Header
_CHAR_TO_BINARY_INT = {val.decode(): list(val)[0] for val in [b".", b"\n", b"1", b"0"]}
# TODO: consider abstracting the binary int list logic into a class for easier handling
class HeaderSerializer:
DELIMITER = _CHAR_TO_BINARY_INT["."]
TERMINATOR = _CHAR_TO_BINARY_INT["\n"]
END = _CHAR_TO_BINARY_INT["1"]
NOT_END = _CHAR_TO_BINARY_INT["0"]
TYPE_OFFSET = 0
TYPE_DELIMITER_OFFSET = 1
LENGTH_OFFSET = 2
LENGTH_LENGTH = 6
LENGTH_DELIMETER_OFFSET = 8
ID_OFFSET = 9
ID_LENGTH = 36
ID_DELIMETER_OFFSET = 45
END_OFFSET = 46
TERMINATOR_OFFSET = 47
@staticmethod
def serialize(
header: Header,
buffer: List[int],
offset: int, # pylint: disable=unused-argument
) -> int:
# write type
buffer[HeaderSerializer.TYPE_OFFSET] = HeaderSerializer._char_to_binary_int(
header.type
)
buffer[HeaderSerializer.TYPE_DELIMITER_OFFSET] = HeaderSerializer.DELIMITER
# write length
length_binary_array: List[int] = list(
HeaderSerializer._int_to_formatted_encoded_str(
header.payload_length, "{:06d}"
)
)
HeaderSerializer._write_in_buffer(
length_binary_array, buffer, HeaderSerializer.LENGTH_OFFSET
)
buffer[HeaderSerializer.LENGTH_DELIMETER_OFFSET] = HeaderSerializer.DELIMITER
# write id
id_binary_array: List[int] = list(
HeaderSerializer._uuid_to_numeric_encoded_str(header.id)
)
HeaderSerializer._write_in_buffer(
id_binary_array, buffer, HeaderSerializer.ID_OFFSET
)
buffer[HeaderSerializer.ID_DELIMETER_OFFSET] = HeaderSerializer.DELIMITER
# write terminator
buffer[HeaderSerializer.END_OFFSET] = (
HeaderSerializer.END if header.end else HeaderSerializer.NOT_END
)
buffer[HeaderSerializer.TERMINATOR_OFFSET] = HeaderSerializer.TERMINATOR
return TransportConstants.MAX_HEADER_LENGTH
@staticmethod
def deserialize(
buffer: List[int], offset: int, count: int # pylint: disable=unused-argument
) -> Header:
if count != TransportConstants.MAX_HEADER_LENGTH:
raise ValueError("Cannot deserialize header, incorrect length")
header = Header(
type=HeaderSerializer._binary_int_to_char(
buffer[HeaderSerializer.TYPE_OFFSET]
)
)
if buffer[HeaderSerializer.TYPE_DELIMITER_OFFSET] != HeaderSerializer.DELIMITER:
raise ValueError("Header type delimeter is malformed")
length_str = HeaderSerializer._binary_array_to_str(
buffer[
HeaderSerializer.LENGTH_OFFSET : HeaderSerializer.LENGTH_OFFSET
+ HeaderSerializer.LENGTH_LENGTH
]
)
try:
length = int(length_str)
except Exception:
raise ValueError("Header length is malformed")
header.payload_length = length
if (
buffer[HeaderSerializer.LENGTH_DELIMETER_OFFSET]
!= HeaderSerializer.DELIMITER
):
raise ValueError("Header length delimeter is malformed")
identifier_str = HeaderSerializer._binary_array_to_str(
buffer[
HeaderSerializer.ID_OFFSET : HeaderSerializer.ID_OFFSET
+ HeaderSerializer.ID_LENGTH
]
)
try:
identifier = UUID(identifier_str)
except Exception:
raise ValueError("Header id is malformed")
header.id = identifier
if buffer[HeaderSerializer.ID_DELIMETER_OFFSET] != HeaderSerializer.DELIMITER:
raise ValueError("Header id delimeter is malformed")
if buffer[HeaderSerializer.END_OFFSET] not in [
HeaderSerializer.END,
HeaderSerializer.NOT_END,
]:
raise ValueError("Header end is malformed")
header.end = buffer[HeaderSerializer.END_OFFSET] == HeaderSerializer.END
if buffer[HeaderSerializer.TERMINATOR_OFFSET] != HeaderSerializer.TERMINATOR:
raise ValueError("Header terminator is malformed")
return header
@staticmethod
def _char_to_binary_int(char: str) -> int:
if len(char) != 1:
raise ValueError("Char to cast should be a str of exactly length 1")
unicode_list = list(char.encode())
if len(unicode_list) != 1:
raise ValueError("Char to cast should be in the ASCII domain")
return unicode_list[0]
@staticmethod
def _int_to_formatted_encoded_str(value: int, str_format: str) -> bytes:
return str_format.format(value).encode("ascii")
@staticmethod
def _uuid_to_numeric_encoded_str(value: UUID) -> bytes:
return str(value).encode("ascii")
@staticmethod
def _binary_int_to_char(binary_int: int) -> str:
return bytes([binary_int]).decode("ascii")
@staticmethod
def _binary_array_to_str(binary_array: List[int]) -> str:
return bytes(binary_array).decode("ascii")
@staticmethod
def _write_in_buffer(data: List[int], buffer: List[int], insert_index: int):
for byte_int in data:
buffer[insert_index] = byte_int
insert_index += 1
| 32.54386
| 88
| 0.65319
| 592
| 5,565
| 5.883446
| 0.1875
| 0.107379
| 0.024117
| 0.030146
| 0.427218
| 0.294861
| 0.207867
| 0.052254
| 0
| 0
| 0
| 0.006393
| 0.269182
| 5,565
| 170
| 89
| 32.735294
| 0.850012
| 0.051932
| 0
| 0.125984
| 0
| 0
| 0.070642
| 0
| 0
| 0
| 0
| 0.005882
| 0
| 1
| 0.062992
| false
| 0
| 0.031496
| 0.031496
| 0.267717
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0b80838d88129627ee88f66e99dd63a8662a687
| 6,849
|
py
|
Python
|
modules/tools/vehicle_calibration/preprocess.py
|
jzjonah/apollo
|
bc534789dc0548bf2d27f8d72fe255d5c5e4f951
|
[
"Apache-2.0"
] | 22,688
|
2017-07-04T23:17:19.000Z
|
2022-03-31T18:56:48.000Z
|
modules/tools/vehicle_calibration/preprocess.py
|
WJY-Mark/apollo
|
463fb82f9e979d02dcb25044e60931293ab2dba0
|
[
"Apache-2.0"
] | 4,804
|
2017-07-04T22:30:12.000Z
|
2022-03-31T12:58:21.000Z
|
modules/tools/vehicle_calibration/preprocess.py
|
WJY-Mark/apollo
|
463fb82f9e979d02dcb25044e60931293ab2dba0
|
[
"Apache-2.0"
] | 9,985
|
2017-07-04T22:01:17.000Z
|
2022-03-31T14:18:16.000Z
|
#!/usr/bin/env python3
###############################################################################
# Copyright 2020 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
This module provides the preprocessing function of vehicle calibration data
"""
import os
import re
import shutil
import time
from absl import app
from absl import flags
from absl import logging
from datetime import datetime
from cyber.python.cyber_py3 import cyber
from modules.dreamview.proto import preprocess_table_pb2
from modules.tools.vehicle_calibration.sanity_check import sanity_check
flags.DEFINE_string('vehicle_type', '', 'The vehicle type to be calibrated')
flags.DEFINE_string('data_path', '/apollo/output', 'Default output data path')
flags.DEFINE_string('calibration_data_path',
'/apollo/modules/calibration/data',
'Default vehicle configuration file directory')
flags.DEFINE_string('config_file_name', 'vehicle_param.pb.txt',
'Default vehicle configuration file name')
flags.DEFINE_string('record_root_path', '/apollo/data/bag',
'Default record root path')
flags.DEFINE_integer(
'record_num', 1, 'The number of record folders '
'required for this calibration task')
FLAGS = flags.FLAGS
def main(argv):
cyber.init("Preprocessor")
preprocessor = Preprocessor()
task_dir = preprocessor.create_tree()
preprocessor.sanity_check_path(task_dir)
cyber.shutdown()
class Preprocessor(object):
def __init__(self):
self.record_num = FLAGS.record_num
self.vehicle_type = self.folder_case(FLAGS.vehicle_type)
self.config_file = self.get_config_path()
self.node = cyber.Node("vehicle_calibration_preprocessor")
self.writer = self.node.create_writer("/apollo/dreamview/progress",
preprocess_table_pb2.Progress,
10)
self.progress = preprocess_table_pb2.Progress()
self.progress.percentage = 0.0
self.progress.log_string = "Press the button to start preprocessing"
@staticmethod
def folder_case(str):
"""Convert a string from title case to folder case"""
return "_".join(str.lower().split(" "))
def create_if_not_exists(self, path):
"""Create dir if path does not exists"""
try:
if not os.path.exists(path):
os.makedirs(path)
self.log_and_publish(f'Sucessfully created {path}')
except OSError:
self.log_and_publish(f'Failed to create: {path}', 'error')
return path
def get_config_path(self):
"""Get the configuration file of the specified vehicle type"""
return os.path.join(FLAGS.calibration_data_path, self.vehicle_type,
FLAGS.config_file_name)
def get_records_info(self):
"""Get records required for calibration"""
res = []
for dir in os.listdir(FLAGS.record_root_path):
match = re.match(r'(^\d{4}-\d{2}-\d{2})-(\d{2}-\d{2}-\d{2}_s$)',
dir)
if match is not None:
record_info = {}
record_info['rel_path'] = match.group()
record_info['abs_path'] = os.path.join(FLAGS.record_root_path,
match.group())
record_info['prefix'] = match.group(1)
res.append(record_info)
if len(res) < self.record_num:
self.log_and_publish(
f'The number of records in {FLAGS.record_root_path} '
f'is less than {self.record_num}', 'error')
res = sorted(res, key=lambda record: record['rel_path'],
reverse=True)[:self.record_num]
return res
def log_and_publish(self,
str,
logging_level="info",
status=preprocess_table_pb2.Status.UNKNOWN):
"""Publish the str by cyber writer"""
if logging_level == 'info':
logging.info(str)
elif logging_level == 'warn':
logging.warn(str)
elif logging_level == 'error':
logging.error(str)
elif logging_level == 'fatal':
logging.fatal(str)
else:
logging.info(str)
self.progress.log_string = str
self.progress.status = status
self.writer.write(self.progress)
time.sleep(0.5)
def create_tree(self):
"""Create file tree according to a specific order"""
task_dir = self.create_if_not_exists(
os.path.join(FLAGS.data_path,
'task' + datetime.now().strftime("-%Y-%m-%d-%H-%M")))
vehicle_dir = self.create_if_not_exists(
os.path.join(task_dir, self.vehicle_type))
records_dir = self.create_if_not_exists(
os.path.join(vehicle_dir, "Records"))
shutil.copy(self.config_file, vehicle_dir)
records_info = self.get_records_info()
finished_records = 0
self.progress.log_string = 'Start preprocessing...'
for iter in records_info:
sub_dir = self.create_if_not_exists(
os.path.join(records_dir, iter['prefix']))
shutil.copytree(iter['abs_path'],
os.path.join(sub_dir, iter['rel_path']))
finished_records += 1
self.progress.percentage = (
finished_records / self.record_num) * 80.0
self.writer.write(self.progress)
self.log_and_publish(
f'The file tree has been successfully created at {task_dir}.')
return task_dir
def sanity_check_path(self, path):
"""Sanity check wrapper"""
result, log_str = sanity_check(path)
if result is True:
self.progress.percentage = 100.0
self.progress.status = preprocess_table_pb2.Status.SUCCESS
else:
self.progress.status = preprocess_table_pb2.Status.FAIL
self.progress.log_string = log_str
self.writer.write(self.progress)
time.sleep(0.5)
if __name__ == "__main__":
app.run(main)
| 38.26257
| 79
| 0.604614
| 829
| 6,849
| 4.810615
| 0.268999
| 0.042126
| 0.017553
| 0.021314
| 0.181545
| 0.087262
| 0.07673
| 0.055667
| 0.053159
| 0
| 0
| 0.00825
| 0.274347
| 6,849
| 178
| 80
| 38.477528
| 0.794165
| 0.138414
| 0
| 0.085271
| 0
| 0.007752
| 0.15386
| 0.031299
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.085271
| 0
| 0.20155
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0b9d81b27101a0308d3e4f404ef228daa426733
| 2,847
|
py
|
Python
|
app/api/views.py
|
Olexsai2020/todo_app
|
f93faffefaaa78292930061867d4ecf772fa0add
|
[
"MIT"
] | null | null | null |
app/api/views.py
|
Olexsai2020/todo_app
|
f93faffefaaa78292930061867d4ecf772fa0add
|
[
"MIT"
] | null | null | null |
app/api/views.py
|
Olexsai2020/todo_app
|
f93faffefaaa78292930061867d4ecf772fa0add
|
[
"MIT"
] | null | null | null |
from django.utils.decorators import method_decorator
from rest_framework import viewsets, status, generics
from rest_framework.response import Response
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from drf_yasg.utils import swagger_auto_schema
from .models import Todo
from .serializers import UserSignupSerializer, UserLoginSerializer, \
TodoSerializer
class UserSignupView(generics.CreateAPIView):
'''
User Signup
Endpoint for registration new user
'''
serializer_class = UserSignupSerializer
permission_classes = (AllowAny, )
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
response = {'message': 'User signup successfully',
'result': 'New user created: ' + request.data['email']}
return Response(response, status=status.HTTP_201_CREATED)
class UserLoginView(generics.CreateAPIView):
'''
User Login
Endpoint for JWT Authorization
'''
serializer_class = UserLoginSerializer
permission_classes = (AllowAny, )
def post(self, request):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
response = {'message': 'User logged in successfully',
'result': 'User logged in: ' + request.data['email'],
'token': serializer.data['token']}
return Response(response, status=status.HTTP_200_OK)
@method_decorator(name='list', decorator=swagger_auto_schema(
operation_description="Endpoint for viewing todo list",
operation_summary='ToDo List',
))
@method_decorator(name='create', decorator=swagger_auto_schema(
operation_description="Endpoint for creation a new task",
operation_summary='Create New Task',
))
@method_decorator(name='retrieve', decorator=swagger_auto_schema(
operation_description="Endpoint for reading a task",
operation_summary='Read Task',
))
@method_decorator(name='update', decorator=swagger_auto_schema(
operation_description="Endpoint for updating a task",
operation_summary='Update Task',
))
@method_decorator(name='partial_update', decorator=swagger_auto_schema(
operation_description="Endpoint for partial updating a task",
operation_summary='Partial Update Task',
))
@method_decorator(name='destroy', decorator=swagger_auto_schema(
operation_description="Endpoint to delete a task",
operation_summary='Delete Task',
))
class TodoViewSet(viewsets.ModelViewSet):
queryset = Todo.objects.all()
serializer_class = TodoSerializer
permission_classes = (IsAuthenticated, )
authentication_class = JSONWebTokenAuthentication
| 36.5
| 75
| 0.736916
| 302
| 2,847
| 6.758278
| 0.307947
| 0.051445
| 0.058305
| 0.076433
| 0.385595
| 0.32876
| 0.291524
| 0.265066
| 0.181284
| 0.119549
| 0
| 0.002546
| 0.172111
| 2,847
| 77
| 76
| 36.974026
| 0.863386
| 0.031612
| 0
| 0.241379
| 0
| 0
| 0.157469
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.137931
| 0
| 0.396552
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0bb6683aa9dcadc69b57d035b577b7055890820
| 8,244
|
py
|
Python
|
django_sqltools/common.py
|
yang0/django_sqltools
|
44e7208337815d2fe8361824d223ba518bf38221
|
[
"MIT"
] | null | null | null |
django_sqltools/common.py
|
yang0/django_sqltools
|
44e7208337815d2fe8361824d223ba518bf38221
|
[
"MIT"
] | null | null | null |
django_sqltools/common.py
|
yang0/django_sqltools
|
44e7208337815d2fe8361824d223ba518bf38221
|
[
"MIT"
] | null | null | null |
# _*_coding:utf-8_*_
from django.conf import settings
import uuid, os, json, logging, time, shutil
from datetime import datetime, date
from PIL import Image, ImageFile
import mimetypes
import re
logger = logging.getLogger(__name__)
def get_file_path(instance, filename):
folder = instance.__class__.__name__.lower() + datetime.now().strftime("/%Y/%m/%d")
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
return os.path.join(folder, filename)
def getFileUri(fileInfo):
""" 上传文件相对路径 """
if fileInfo is None or fileInfo == "":
return ""
fileList = fileInfo.split(",")
if len(fileList) == 3:
return fileList[0]
else:
return ""
def getFileName(fileInfo):
""" 上传文件相对路径 """
return re.findall(r'\w+\.\w+', fileInfo)[0]
def getPath(fileInfo):
""" 截取路径 """
return re.findall(r'[/\w]+[/]', fileInfo)[0]
def moveFile(fileUri, folder):
""" 将fileInfo中的文件移到某个folder """
if not os.path.exists(settings.MEDIA_ROOT + fileUri):
return
# 获取路径中的日期信息
datePath = re.findall(r'[/][/\w]+[/]', fileUri)[0]
path = settings.MEDIA_ROOT + folder + datePath
if not os.path.exists(path):
os.makedirs(path)
# 移动文件
os.rename(settings.MEDIA_ROOT + fileUri, path + getFileName(fileUri))
return folder + datePath + getFileName(fileUri)
def getUploadImageSize(fileInfo):
""" 上传图片的原始尺寸 @return width,height"""
if fileInfo is None or fileInfo == "":
return None, None
fileList = fileInfo.split(",")
if len(fileList) == 3:
sizeList = fileList[1].split("_")
return sizeList[0], sizeList[1]
else:
return None, None
def getFileSize(fileInfo):
""" 上传文件的大小 """
if fileInfo is None or fileInfo == "":
return None
fileList = fileInfo.split(",")
if len(fileList) == 3:
return fileList[2]
else:
return None
def uploadFile(upload_file, domain, extType=('png', 'jpeg', 'gif', 'bmp', 'jpg')):
if upload_file:
datePath = date.strftime(date.today(), "%Y/%m/%d")
uid = uuid.UUID.time_low.fget(uuid.uuid4())
folder = domain + "/" + str(datePath)
# 中文文件名处理 encode('utf-8')
ext = str(upload_file.name.encode('utf-8')).split(".")[-1] # 暂时未考虑 tar.gz 这样的后缀
if ext in extType:
# file_name = image.name.encode('utf-8')
file_uid = str(uid)
path_root = settings.MEDIA_ROOT
path_folder = path_root + folder
# 保存文件到服务器的路径
file_upload = path_folder + "/" + file_uid + "." + ext
# 保存在DB中的文件信息:文件路径,文件尺寸(若有),文件大小
fileInfo = folder + "/" + file_uid + "." + ext
# path_save = path_folder + "/" + file_uid + ".jpg"
# save_50 = path_folder + "/" + 'snap_50X50_' + file_uid + '.jpg'
# save_60 = path_folder + "/" + 'snap_60X60_' + file_uid + '.jpg'
# avatar_info = 'folder='+ folder + ',uid=' + file_uid + ',ext=jpg' + ',swidth=50,sheight=50' + ',name=' +file_name +',size=' + file_size
if not os.path.exists(path_folder):
os.makedirs(path_folder)
try:
if ext in ('png', 'jpeg', 'gif', 'bmp', 'jpg'):
parser = ImageFile.Parser()
for chunk in upload_file.chunks():
parser.feed(chunk)
img = parser.close()
img.save(file_upload, format="JPEG", quality=85)
else:
with open(file_upload, 'wb') as fd:
for chunk in upload_file.chunks():
fd.write(chunk)
except Exception as e:
logger.error(u"上传失败!%s", e)
return 3, "上传失败!"
# 获取文件大小
if ext in ('png', 'jpeg', 'gif', 'bmp', 'jpg'):
image = Image.open(file_upload)
srcW, srcH = image.size
fileInfo += "," + str(srcW) + "_" + str(srcH)
else:
fileInfo += ",0_0"
file_size = os.path.getsize(file_upload)
fileInfo += "," + str(file_size)
return 1, fileInfo
else:
return 2, """不是支持的文件类型!"""
else:
return 0, """未上传文件"""
def resizeImage(imgPath, thumbPath, width, height, pathRoot=settings.MEDIA_ROOT):
"""等比压缩生成缩略图 @param imgPath 原图(相对路径) @param thumbPath 缩略图"""
img = pathRoot + imgPath
resizeImg = pathRoot + thumbPath
if os.path.exists(img):
image = Image.open(img)
# 获得图像的宽度和高度
newWidth = 0
newHeight = 0
srcWidth, srcHeight = image.size
if srcWidth <= width and srcHeight <= height:
newWidth = srcWidth
newHeight = srcHeight
else:
ratioH = 1.0 * srcHeight / height
ratioW = 1.0 * srcWidth / width
if ratioH >= ratioW:
newHeight = height
newWidth = int(1.0 * height / srcHeight * srcWidth)
else:
newWidth = width
newHeight = int(1.0 * width / srcWidth * srcHeight)
if image.format == 'GIF':
image = image.convert('RGB')
image.resize((newWidth, newHeight), Image.ANTIALIAS).save(resizeImg, format=image.format, quality=95)
if os.path.exists(resizeImg):
return True
return False
def isImageSize(img, width, height):
"""判断图片尺寸 @param img 图片的绝对路径"""
image = Image.open(img)
srcWidth, srcHeight = image.size
if srcWidth == width and srcHeight == height:
return True
return False
def getImageSize(img):
"""获取图片尺寸 @param img 图片的绝对路径"""
image = Image.open(img)
srcWidth, srcHeight = image.size
return srcWidth, srcHeight
def cropImageCenter(img, newImg, width, height, pathRoot=settings.MEDIA_ROOT):
"""最大范围裁切图片的中间部分"""
img = pathRoot + img
newImg = pathRoot + newImg
image = Image.open(img)
srcWidth, srcHeight = image.size
ratioH = 1.0 * srcHeight / height
ratioW = 1.0 * srcWidth / width
x1 = 0
y1 = 0
x2 = 0
y2 = 0
if ratioW <= 1 or ratioH <= 1:
# if ratioW<=1:
# x1=0
# else:
# x1=int(1.0*(srcWidth-width)/2)
# if ratioH<=1:
# y1=0
# else:
# y1=int(1.0*(srcHeight-height)/2)
x = int(1.0 * (srcWidth - width) / 2)
x1 = x if x > 0 else 0
y = int(1.0 * (srcHeight - height) / 2)
y1 = y if y > 0 else 0
x2 = x1 + width
y2 = y1 + height
x2 = x2 if x2 <= srcWidth else srcWidth
y2 = y2 if y2 <= srcHeight else srcHeight
box = (x1, y1, x2, y2)
image.crop(box).save(newImg)
else:
# 先等比压缩到最接近裁切比例,再裁切
newWidth = 0
newHeight = 0
if ratioW <= ratioH:
newWidth = width
newHeight = int(srcHeight / ratioW)
else:
newHeight = height
newWidth = int(srcWidth / ratioH)
if image.format == 'GIF':
image = image.convert('RGB')
image.resize((newWidth, newHeight), Image.ANTIALIAS).save(newImg, format=image.format, quality=95)
x = int(1.0 * (newWidth - width) / 2)
y = int(1.0 * (newHeight - height) / 2)
x1 = x if x > 0 else 0
y1 = y if y > 0 else 0
x2 = x1 + width
y2 = y1 + height
x2 = x2 if x2 <= newWidth else newWidth
y2 = y2 if y2 <= newHeight else newHeight
box = (x1, y1, x2, y2)
image = Image.open(newImg)
image.crop(box).save(newImg)
if os.path.exists(newImg):
return True
return False
def delFile(filePath, pathRoot=settings.MEDIA_ROOT):
if filePath:
fullPath = pathRoot + filePath
if os.path.exists(fullPath):
os.remove(fullPath)
def renameFile(srcFile, newFile, pathRoot=settings.MEDIA_ROOT):
if srcFile and newFile:
fullPath = pathRoot + srcFile
newFilePath = pathRoot + newFile
if os.path.exists(fullPath):
os.rename(fullPath, newFilePath)
if os.path.exists(newFilePath):
return True
return False
| 31.346008
| 149
| 0.548156
| 951
| 8,244
| 4.679285
| 0.211356
| 0.005393
| 0.02427
| 0.018876
| 0.337753
| 0.273933
| 0.200899
| 0.193708
| 0.142921
| 0.120899
| 0
| 0.024919
| 0.323387
| 8,244
| 262
| 150
| 31.465649
| 0.772858
| 0.10359
| 0
| 0.402116
| 0
| 0
| 0.022842
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.031746
| 0
| 0.248677
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0bbdeb9ad8ad6836124106f4fb1414f2a3afe49
| 1,853
|
py
|
Python
|
csn_searcher/loader.py
|
box-key/shanet
|
6a2679a2dcb98dc4447af8eb453e297cd7585c79
|
[
"Apache-2.0"
] | 2
|
2020-05-07T00:46:36.000Z
|
2020-05-26T10:17:36.000Z
|
csn_searcher/loader.py
|
box-key/shanet
|
6a2679a2dcb98dc4447af8eb453e297cd7585c79
|
[
"Apache-2.0"
] | 2
|
2022-02-27T20:43:49.000Z
|
2022-03-02T12:28:26.000Z
|
csn_searcher/loader.py
|
box-key/shanet
|
6a2679a2dcb98dc4447af8eb453e297cd7585c79
|
[
"Apache-2.0"
] | 1
|
2020-05-08T23:32:03.000Z
|
2020-05-08T23:32:03.000Z
|
import requests
from tqdm import tqdm
from time import sleep
import os
class DataLoader():
"""
This class is used to download a pre-trained neural network model from url.
It stores model name and the link to a model.
"""
def __init__(self):
# url where model is stored
self.model_url = 'https://www.dropbox.com/s/z82x8xtofzwgae8/siamense-lstm.pt?dl=1'
self.field_url = 'https://www.dropbox.com/s/99ogf071ncl47ut/TEXT.Field?dl=1'
# output file name
# model_path = os.path.join('csn_searcher', 'data', 'siamese-lstm.pt')
self.model_path = 'siamese-lstm.pt'
self.text_field_path = 'TEXT.Field'
# chunk size for progress bar
self.chunk_size = 2**20
self._load()
def _load(self):
# load nn model
# if a model exists skip this process
if not os.path.exists(self.model_path):
# get model
r = requests.get(self.model_url, stream=True)
# output model
with open(self.model_path, 'wb') as f:
size = int(r.headers.get('content-length'))
task = 'Download NN model'
# print progress bar
with tqdm(total=size, unit=' data', desc=task) as pbar:
for chunk in r.iter_content(chunk_size=self.chunk_size):
if chunk:
f.write(chunk)
f.flush()
pbar.update(len(chunk))
# loads field object
if not os.path.exists(self.text_field_path):
print('Loading Field object...')
r = requests.get(self.field_url)
with open(self.text_field_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=self.chunk_size):
f.write(chunk)
| 38.604167
| 90
| 0.563411
| 243
| 1,853
| 4.18107
| 0.395062
| 0.05315
| 0.038386
| 0.050197
| 0.17126
| 0.17126
| 0.086614
| 0.086614
| 0.086614
| 0.086614
| 0
| 0.012998
| 0.335672
| 1,853
| 47
| 91
| 39.425532
| 0.812348
| 0.201295
| 0
| 0.133333
| 0
| 0.033333
| 0.143349
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.233333
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0c2140b35a3bb72f96b2eb82a9fe58420a9a0cf
| 4,419
|
py
|
Python
|
airflow/providers/tesouro_gerencial/hooks/tesouro_gerencial.py
|
CarlosAdp/airflow-providers-tesouro-gerencial
|
f48ba321a5152dfd9b72107f640c66b217d59b9d
|
[
"MIT"
] | null | null | null |
airflow/providers/tesouro_gerencial/hooks/tesouro_gerencial.py
|
CarlosAdp/airflow-providers-tesouro-gerencial
|
f48ba321a5152dfd9b72107f640c66b217d59b9d
|
[
"MIT"
] | null | null | null |
airflow/providers/tesouro_gerencial/hooks/tesouro_gerencial.py
|
CarlosAdp/airflow-providers-tesouro-gerencial
|
f48ba321a5152dfd9b72107f640c66b217d59b9d
|
[
"MIT"
] | null | null | null |
from enum import Enum
from typing import List, Union
from urllib.parse import urljoin
import warnings
from airflow.exceptions import AirflowException
from airflow.providers.siafi.hooks.siafi import SIAFIHook
import requests
warnings.filterwarnings('ignore', message='Unverified HTTPS request')
class TesouroGerencialHook(SIAFIHook):
'''Hook para interação com Tesouro Gerencial.
Classe herdada de :class:`airflow.providers.siafi.hooks.siafi.SIAFIHook`
'''
class FORMATO(Enum):
PDF = 'pdf'
CSV = 'csv'
EXCEL = 'excel'
def __str__(self) -> str:
return self.value
URL = 'https://tesourogerencial.tesouro.gov.br/'
string_sessao: str
def __enter__(self) -> 'TesouroGerencialHook':
'''Inicia sessão.'''
super().__enter__()
cpf = self.cpf
senha = self.senha
self.log.info('Iniciando sessão com usuário "%s"', self.cpf)
url = urljoin(self.URL, 'tg/servlet/taskAdmin')
params = {
'taskId': 'senhaMstrSSOTask',
'taskEnv': 'xhr',
'taskContentType': 'json',
'cpf': cpf,
'token': '',
'server': '',
'project': 'TESOURO%20GERENCIAL%20-%20DES',
'senha': senha,
'novaSenha': '',
}
resposta = requests.get(url, params=params, verify=False)
try:
resposta_json = resposta.json()
self.string_sessao = resposta_json['sessionState']
except Exception:
raise AirflowException(resposta)
self.log.info('Sessão iniciado com sucesso')
return self
def __exit__(self, *args, **kwargs) -> None:
'''Encerra sessão.'''
url = urljoin(self.URL, 'tg/servlet/taskAdmin')
params = {'taskId': 'logout', 'sessionState': self.string_sessao}
requests.get(url, params=params, verify=False)
self.log.info('Sessão encerrada com sucesso')
def retorna_relatorio(
self,
id_relatorio: str,
formato: Union[str, FORMATO] = FORMATO.CSV,
respostas_prompts_valor: List[str] = None,
) -> bytes:
'''Retorna um relatório do Tesouro Gerencial.
:param id_relatorio: ID do relatório
:type id_relatorio: str
:param formato: formato do relatório a ser buscado no Tesouro
Gerencial, podendo ser "csv", "excel" ou "pdf". O atributo
:attr:`~TesouroGerencialHook.FORMATO` também pode ser utilizado.
:type formato: Union[str, TesouroGerencialHook.FORMATO]
:param respostas_prompts_valor: lista com respostas de prompts de
valor, respeitando sua ordem conforme consta no relatório
:type respostas_prompts_valor: List[str]
:return: conteúdo do relatório, em cadeia de caracteres binários
:rtype: bytes
'''
self.log.info(
'Solicitando relatório "%s" no formato "%s" com as seguintes '
'respostas para prompts: "%s"',
id_relatorio, formato, respostas_prompts_valor
)
url = urljoin(self.URL, 'tg/servlet/taskAdmin')
params = {
'taskId': 'exportReport',
'taskEnv': 'juil_iframe',
'taskContent': 'json',
'expandPageBy': True,
}
params.update({
'sessionState': self.string_sessao,
'reportID': id_relatorio,
'valuePromptAnswers': '^'.join(respostas_prompts_valor or [])
})
try:
formato = self.FORMATO(formato)
except ValueError:
raise AirflowException(f'"{formato}" não é um formato válido')
if formato == self.FORMATO.CSV:
params.update({'executionMode': 4, 'plainTextDelimiter': ','})
elif formato == self.FORMATO.EXCEL:
params.update({'executionMode': 3, 'excelVersion': 4})
elif formato == self.FORMATO.PDF:
params.update({'executionMode': 2})
requisicao = requests.Request('GET', url, params=params)
requisicao_preparada = requisicao.prepare()
self.log.info('Solicitando URL "%s"', requisicao_preparada.url)
resposta = requests.get(requisicao_preparada.url, verify=False)
if resposta.ok:
self.log.info('Relatório gerado com sucesso')
return resposta.content
else:
raise AirflowException(resposta)
| 32.977612
| 76
| 0.605114
| 449
| 4,419
| 5.861915
| 0.36971
| 0.015957
| 0.025076
| 0.019377
| 0.12652
| 0.081687
| 0.081687
| 0.053571
| 0.053571
| 0
| 0
| 0.003153
| 0.282191
| 4,419
| 133
| 77
| 33.225564
| 0.826608
| 0.167006
| 0
| 0.101124
| 0
| 0
| 0.209904
| 0.00816
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044944
| false
| 0
| 0.078652
| 0.011236
| 0.202247
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0c27fbbd572c04635510356965faf39003f2e6a
| 843
|
py
|
Python
|
offensive_nn/models/offensive_lstm_model.py
|
TharinduDR/OffensiveNN
|
336b377c44a7067d2e23ca4a8d331ce7f99157cc
|
[
"Apache-2.0"
] | null | null | null |
offensive_nn/models/offensive_lstm_model.py
|
TharinduDR/OffensiveNN
|
336b377c44a7067d2e23ca4a8d331ce7f99157cc
|
[
"Apache-2.0"
] | null | null | null |
offensive_nn/models/offensive_lstm_model.py
|
TharinduDR/OffensiveNN
|
336b377c44a7067d2e23ca4a8d331ce7f99157cc
|
[
"Apache-2.0"
] | null | null | null |
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
class OffensiveLSTMModel:
def __init__(self, args, embedding_matrix):
emb = layers.Embedding(args.max_features, args.embed_size, trainable=False,
name="embedding_layer")
inp = tf.keras.Input(shape=(None,), dtype="int64", name="input")
x = emb(inp)
x = layers.Bidirectional(layers.LSTM(64, return_sequences=True, name="lstm_1"))(x)
x = layers.Bidirectional(layers.LSTM(64, name="lstm_2"))(x)
x = layers.Dense(256, activation="relu", name="dense_1")(x)
x = layers.Dense(args.num_classes, activation="softmax", name="dense_predictions")(x)
emb.set_weights([embedding_matrix])
self.model = tf.keras.Model(inputs=inp, outputs=x, name="lstm_model")
| 42.15
| 93
| 0.664294
| 110
| 843
| 4.936364
| 0.472727
| 0.051565
| 0.044199
| 0.095764
| 0.117864
| 0.117864
| 0
| 0
| 0
| 0
| 0
| 0.017778
| 0.199288
| 843
| 19
| 94
| 44.368421
| 0.786667
| 0
| 0
| 0
| 0
| 0
| 0.097272
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0c2f5602738aeed19e70471739de83515468bde
| 566
|
py
|
Python
|
examples/date-format.py
|
adamantonio/gooeypie
|
a60416a6a317f83e89541d6fdcac3559ace87cac
|
[
"MIT"
] | 1
|
2021-11-20T16:28:47.000Z
|
2021-11-20T16:28:47.000Z
|
examples/date-format.py
|
adamantonio/gooeypie
|
a60416a6a317f83e89541d6fdcac3559ace87cac
|
[
"MIT"
] | null | null | null |
examples/date-format.py
|
adamantonio/gooeypie
|
a60416a6a317f83e89541d6fdcac3559ace87cac
|
[
"MIT"
] | null | null | null |
import gooeypie as gp
date_formats = ['28/8/20', '8/28/20', '28/08/2020', '08/28/2020', '2020-08-28',
'28-Aug-2020', 'Friday, August 28, 2020', 'Friday, 28 August, 2020',
'August 28, 2020', '28 August, 2020']
app = gp.GooeyPieApp('Time and date')
app.width = 250
label = gp.Label(app, 'Available formats:')
date_options = gp.Listbox(app, date_formats)
date_options.height = 8
ok = gp.Button(app, 'OK', None)
ok.width = 10
app.set_grid(3, 1)
app.add(label, 1, 1)
app.add(date_options, 2, 1, fill=True)
app.add(ok, 3, 1)
app.run()
| 25.727273
| 84
| 0.627208
| 98
| 566
| 3.561224
| 0.387755
| 0.051576
| 0.045845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168831
| 0.183746
| 566
| 21
| 85
| 26.952381
| 0.58658
| 0
| 0
| 0
| 0
| 0
| 0.289753
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0c3406b4cbc0b4a885af800bf5115dce13d7dd2
| 1,131
|
py
|
Python
|
color_contrast_calc/converters/grayscale.py
|
nico-hn/color_contrast_calc_py
|
92cf9eecbd8e5d000f284ec786103cb719df6026
|
[
"MIT"
] | 2
|
2020-10-01T11:50:30.000Z
|
2020-10-11T20:59:06.000Z
|
color_contrast_calc/converters/grayscale.py
|
nico-hn/color_contrast_calc_py
|
92cf9eecbd8e5d000f284ec786103cb719df6026
|
[
"MIT"
] | null | null | null |
color_contrast_calc/converters/grayscale.py
|
nico-hn/color_contrast_calc_py
|
92cf9eecbd8e5d000f284ec786103cb719df6026
|
[
"MIT"
] | 1
|
2019-06-18T02:08:06.000Z
|
2019-06-18T02:08:06.000Z
|
# https://www.w3.org/TR/filter-effects/#funcdef-grayscale
# https://www.w3.org/TR/filter-effects/#grayscaleEquivalent
# https://www.w3.org/TR/SVG/filters.html#feColorMatrixElement
import numpy as np
from . import rgb_clamp
_CONST_PART = np.array([[0.2126, 0.7152, 0.0722],
[0.2126, 0.7152, 0.0722],
[0.2126, 0.7152, 0.0722]])
_RATIO_PART = np.array([[0.7874, -0.7152, -0.0722],
[-0.2126, 0.2848, -0.0722],
[-0.2126, -0.7152, 0.9278]])
def calc_rgb(rgb, s):
"""Convert passed a passed color to grayscale.
The calculation is based on the definition found at
https://www.w3.org/TR/filter-effects/#funcdef-grayscale
:param rgb: The Original RGB value before the conversion.
:type rgb: (int, int, int)
:param s: Conversion ratio in percentage
:type s: float
:return: RGB value of grayscale color
:rtype: (int, int, int)
"""
return rgb_clamp((_calc_grayscale(s) * np.array(rgb)).sum(1))
def _calc_grayscale(s):
r = 1 - min((100, s)) / 100.0
return _CONST_PART + _RATIO_PART * r
| 29.763158
| 65
| 0.612732
| 169
| 1,131
| 4.011834
| 0.390533
| 0.036873
| 0.044248
| 0.076696
| 0.306785
| 0.284661
| 0.284661
| 0.196165
| 0.196165
| 0.066372
| 0
| 0.11949
| 0.237843
| 1,131
| 37
| 66
| 30.567568
| 0.667053
| 0.465959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0c4a12b9a13aa11699c0f68047498ec12883a9b
| 845
|
py
|
Python
|
dictionary.py
|
LordGhostX/ECXBotsMastery
|
199000f8c24b30734869e8bbdebb5901604cd0fa
|
[
"MIT"
] | 2
|
2021-07-17T20:09:24.000Z
|
2021-08-09T13:48:38.000Z
|
dictionary.py
|
LordGhostX/ECXBotsMastery
|
199000f8c24b30734869e8bbdebb5901604cd0fa
|
[
"MIT"
] | null | null | null |
dictionary.py
|
LordGhostX/ECXBotsMastery
|
199000f8c24b30734869e8bbdebb5901604cd0fa
|
[
"MIT"
] | 1
|
2021-07-17T00:12:07.000Z
|
2021-07-17T00:12:07.000Z
|
import requests
from bs4 import BeautifulSoup
def find_word_meaning(word):
r = requests.get(f"https://www.dictionary.com/browse/{word}")
if r.status_code == 200:
page = BeautifulSoup(r.text, "html.parser")
luna_pos = page.find("span", {"class": "luna-pos"}).text
word_meaning = f"{word} - {luna_pos}\n\n"
meanings = page.find(
"div", {"class": "css-1uqerbd e1hk9ate0"}).find_all("div", {"class": "e1q3nk1v2"})
for i, meaning in enumerate(meanings):
word_meaning += f"{i + 1} - {meaning.find('span').text}\n\n"
return word_meaning.strip()
elif r.status_code == 404:
return "the specified word does not exist!"
else:
return "an error occured while finding word meaning!"
if __name__ == "__main__":
print(find_word_meaning("intense"))
| 33.8
| 94
| 0.620118
| 112
| 845
| 4.5
| 0.544643
| 0.130952
| 0.059524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024427
| 0.224852
| 845
| 24
| 95
| 35.208333
| 0.745038
| 0
| 0
| 0
| 0
| 0
| 0.32071
| 0.036686
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.105263
| 0
| 0.315789
| 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0c77869671d10fbedbed340a7a7c9b4e8912459
| 2,993
|
py
|
Python
|
ocdsapi_outlet/utils.py
|
openprocurement/ocdsapi_outlet
|
e5aab856fbb833d34c4d56831cad7c09d7719a5e
|
[
"Apache-2.0"
] | null | null | null |
ocdsapi_outlet/utils.py
|
openprocurement/ocdsapi_outlet
|
e5aab856fbb833d34c4d56831cad7c09d7719a5e
|
[
"Apache-2.0"
] | 6
|
2019-12-26T16:43:14.000Z
|
2022-03-21T22:16:25.000Z
|
ocdsapi_outlet/utils.py
|
openprocurement/ocdsapi_outlet
|
e5aab856fbb833d34c4d56831cad7c09d7719a5e
|
[
"Apache-2.0"
] | 1
|
2018-07-27T16:19:27.000Z
|
2018-07-27T16:19:27.000Z
|
""" utils.py - helper functions """
import logging
import functools
import operator
from repoze.lru import lru_cache
from gevent import spawn
from gevent.subprocess import Popen, PIPE
try:
import boto3
except ImportError:
boto3 = None
def dump(app, logger):
"""
Run dump script as separate process
"""
def read_stream(stream):
try:
while not stream.closed:
line = stream.readline()
if not line:
break
line = line.rstrip().decode('utf-8')
logger.info(line.split(' - ')[-1])
except:
pass
args = prepare_pack_command(app.config)
logger.warn("Going to start dump with args {}".format(args))
popen = Popen(args, stdout=PIPE, stderr=PIPE)
spawn(read_stream, popen.stdout)
spawn(read_stream, popen.stderr)
popen.wait()
return_code = popen.returncode
logger.info("Dumper ended work with code {}".format(return_code))
def setup_logger(
logger,
handler,
level,
formatter,
filename):
if filename:
handler = functools.partial(handler, filename)
handler = handler()
if formatter:
handler.setFormatter(logging.Formatter(formatter))
logger.addHandler(handler)
logger.setLevel(getattr(logging, level.upper()))
return logger
def find_package_date(releases):
""" Find max date inside package """
return max(
releases,
key=operator.itemgetter('date')
).get('date')
def prepare_package(date, metainfo=None):
""" Prepare metainfo for package """
base = {
'publishedDate': date,
'releases': [],
'publisher': {
'name': '',
'scheme': '',
'uri': ''
},
}
if metainfo:
base.update(metainfo)
return base
@lru_cache(maxsize=1)
def connect_bucket(cfg):
""" TODO: do we really need this? """
return (
cfg.bucket,
boto3.client('s3')
)
def prepare_pack_command(cfg):
base_bin = cfg.get('bin_path', 'ocds-pack')
base_args = [
base_bin,
'--package-meta',
cfg.get('dump', {}).get('metainfo_file', 'meta.yml')
]
for key in ('clean_up', 'with_zip', 'count'):
if cfg.get('dump', {}).get(key):
base_args.extend([
'--{}'.format(key.replace('_', '-')),
str(cfg['dump'][key])
])
db_args = [
item
for arg, value in cfg.get('db').items()
for item in '--{} {}'.format(arg.replace('_', '-'), value).split()
]
backend = list(cfg.get('backend', {'fs': ''}).keys())[0]
backend_args = [backend]
backend_args.extend([
item
for arg, value in cfg['backend'][backend].items()
for item in '--{} {}'.format(arg.replace('_', '-'), value).split()
])
for args in db_args, backend_args:
base_args.extend(args)
return base_args
| 25.364407
| 74
| 0.559973
| 335
| 2,993
| 4.901493
| 0.397015
| 0.01827
| 0.021924
| 0.024361
| 0.073082
| 0.073082
| 0.048721
| 0.048721
| 0.048721
| 0
| 0
| 0.003791
| 0.295022
| 2,993
| 117
| 75
| 25.581197
| 0.774408
| 0.051453
| 0
| 0.084211
| 0
| 0
| 0.08935
| 0
| 0
| 0
| 0
| 0.008547
| 0
| 1
| 0.073684
| false
| 0.010526
| 0.084211
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0c97b770b216db5c6e4a8a50cbc39cc9f11e3a2
| 1,467
|
py
|
Python
|
venv/lib/python2.7/site-packages/daemon/pidfile.py
|
mutaihillary/mycalculator
|
55685dd7c968861f18ae0701129f5af2bc682d67
|
[
"MIT"
] | null | null | null |
venv/lib/python2.7/site-packages/daemon/pidfile.py
|
mutaihillary/mycalculator
|
55685dd7c968861f18ae0701129f5af2bc682d67
|
[
"MIT"
] | 7
|
2021-02-08T20:22:15.000Z
|
2022-03-11T23:19:41.000Z
|
venv/lib/python2.7/site-packages/daemon/pidfile.py
|
mutaihillary/mycalculator
|
55685dd7c968861f18ae0701129f5af2bc682d67
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# daemon/pidfile.py
# Part of python-daemon, an implementation of PEP 3143.
#
# Copyright © 2008–2010 Ben Finney <ben+python@benfinney.id.au>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Python Software Foundation License, version 2 or
# later as published by the Python Software Foundation.
# No warranty expressed or implied. See the file LICENSE.PSF-2 for details.
""" Lockfile behaviour implemented via Unix PID files.
"""
from __future__ import absolute_import
from lockfile.pidlockfile import PIDLockFile
class TimeoutPIDLockFile(PIDLockFile, object):
""" Lockfile with default timeout, implemented as a Unix PID file.
This uses the ``PIDLockFile`` implementation, with the
following changes:
* The `acquire_timeout` parameter to the initialiser will be
used as the default `timeout` parameter for the `acquire`
method.
"""
def __init__(self, path, acquire_timeout=None, *args, **kwargs):
""" Set up the parameters of a TimeoutPIDLockFile. """
self.acquire_timeout = acquire_timeout
super(TimeoutPIDLockFile, self).__init__(path, *args, **kwargs)
def acquire(self, timeout=None, *args, **kwargs):
""" Acquire the lock. """
if timeout is None:
timeout = self.acquire_timeout
super(TimeoutPIDLockFile, self).acquire(timeout, *args, **kwargs)
| 34.116279
| 75
| 0.691888
| 185
| 1,467
| 5.4
| 0.52973
| 0.084084
| 0.054054
| 0.054054
| 0.082082
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013021
| 0.214724
| 1,467
| 42
| 76
| 34.928571
| 0.852431
| 0.560327
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0c9cd685e3c553d9d38e5a73ed8bdcbc7f01131
| 11,258
|
py
|
Python
|
zazi/apps/mpesa/utils/transaction.py
|
felixcheruiyot/zazi-core-banking
|
0a2dac42235adcac3cf8c114961e407f54844223
|
[
"Apache-2.0"
] | null | null | null |
zazi/apps/mpesa/utils/transaction.py
|
felixcheruiyot/zazi-core-banking
|
0a2dac42235adcac3cf8c114961e407f54844223
|
[
"Apache-2.0"
] | 1
|
2021-08-20T06:41:57.000Z
|
2021-08-20T06:41:57.000Z
|
zazi/apps/mpesa/utils/transaction.py
|
felixcheruiyot/zazi-core-banking
|
0a2dac42235adcac3cf8c114961e407f54844223
|
[
"Apache-2.0"
] | null | null | null |
import logging
from django.db import transaction as db_transaction
from django.conf import settings
from django.utils import timezone
from django.urls import reverse_lazy
from zazi.core.utils import get_absolute_url, get_encrypted_text
from .. import api
from ..models import (
MpesaAccount,
MpesaAccountBalance,
MpesaAccountRegisteredURL,
MpesaAPIAccount,
MpesaTransaction,
generate_id)
from ..enums import IdentifierType, CommandID, ResultCode, MpesaTransactionStatus
logger = logging.getLogger(__name__)
def get_mpesa_webhook_url(url_name, kwargs=None, endpoint=None):
logger.debug(f'get_mpesa_webhook_url({url_name}, kwargs={kwargs}, endpoint={endpoint})')
return get_absolute_url(url_name, kwargs=kwargs, endpoint=settings.MPESA_PROXY_URL)
def request_b2b_transaction(
sender_short_code,
receiver_short_code,
amount,
user,
mpesa_receipt_number=None,
account_reference=None,
transaction_category=None,
command_id=CommandID.B2B_BUSINESS_TO_BUSINESS_TRANSFER,
remarks=None,
):
with db_transaction.atomic():
sender_account = MpesaAPIAccount.objects.get(
organization__owner=user,
identifier=sender_short_code,
identifier_type=IdentifierType.BUSINESS_PAYBILL)
recipient_account = MpesaAPIAccount.objects.get(
organization__owner=user,
identifier=receiver_short_code,
identifier_type=IdentifierType.BUSINESS_PAYBILL)
return MpesaTransaction.objects.create(
transaction_id=generate_id(),
command_id=command_id,
mpesa_receipt_number=mpesa_receipt_number,
sender_account=sender_account,
recipient_account=recipient_account,
transaction_time=timezone.now(),
transaction_amount=amount)
def request_b2c_transaction(
organization_id,
sender_short_code,
receiver_phone_number,
amount,
user,
transaction_id=None,
account_reference=None,
transaction_category=None,
command_id=CommandID.B2C_BUSINESS_PAYMENT,
remarks=None,
):
logger.info("request_b2c_transaction %s %s %s %s" % (
organization_id,
sender_short_code,
receiver_phone_number,
amount
))
with db_transaction.atomic():
sender_account = MpesaAccount.objects.get(
organization__owner=user,
organization__organization_id=organization_id,
identifier=sender_short_code,
identifier_type=IdentifierType.BUSINESS_PAYBILL)
recipient_account = MpesaAccount.objects.get(
identifier=receiver_phone_number,
identifier_type=IdentifierType.PERSONAL_MPESA)
transaction_id = transaction_id or generate_id()
mpesa_api_account = sender_account.api_account
queue_timeout_url = get_mpesa_webhook_url('mpesa:mpesa_b2c_queue_timeout_url', kwargs={
"organization_id": organization_id,
"reference": transaction_id
})
result_url = get_mpesa_webhook_url('mpesa:mpesa_b2c_result_url', kwargs={
"organization_id": organization_id,
"reference": transaction_id
})
security_credential = get_encrypted_text(
mpesa_api_account.security_credential,
function_name="zazi-certificate-microservice-dev-encrypt_text")
if not security_credential:
raise Exception("Error accessing securty credentials for M-Pesa account %s" % mpesa_api_account.account_id)
else:
logger.info("Security credential received for %s" % sender_account.account_id)
request_payload = api.b2c_transact(
env="production" if sender_account.api_account.in_production else "sandbox",
app_key=mpesa_api_account.consumer_key,
app_secret=mpesa_api_account.consumer_secret,
initiator_name=mpesa_api_account.username,
security_credential=security_credential,
command_id=command_id,
party_a=sender_account.identifier,
party_b=recipient_account.identifier,
amount=amount,
remarks=remarks,
account_reference=account_reference,
queue_timeout_url=queue_timeout_url,
result_url=result_url)
transaction = MpesaTransaction.objects\
.create(
command_id=command_id,
transaction_category=transaction_category,
transaction_id=transaction_id,
sender_account=sender_account,
result_code=ResultCode.success,
status=MpesaTransactionStatus.PENDING,
initiated_at=timezone.now(),
recipient_account=recipient_account)
if request_payload:
logger.info(request_payload)
transaction.request_payload = {
"conversation_id": request_payload["ConversationID"],
"originator_conversation_id": request_payload["OriginatorConversationID"],
"response_code": ResultCode(int(request_payload["ResponseCode"])),
"response_description": request_payload["ResponseDescription"]
}
transaction.save()
else:
transaction.request_payload = request_payload
transaction.save()
return transaction
def request_mpesa_express_stk_push(
organization_id,
short_code,
phone_number,
amount,
transaction_category=None,
reference_code=None,
description=None,
):
with db_transaction.atomic():
business_account = MpesaAccount.objects.get(
organization__organization_id=organization_id,
identifier=short_code,
identifier_type=IdentifierType.BUSINESS_PAYBILL)
personal_account = MpesaAccount.objects.get(
identifier=phone_number,
identifier_type=IdentifierType.PERSONAL_MPESA)
lipa_na_mpesa_account = business_account.lipa_na_mpesa_accounts.first()
reference = generate_id()
request_payload = api.mpesa_express_stk_push(
env="production" if business_account.api_account.in_production else "sandbox",
app_key=business_account.api_account.consumer_key,
app_secret=business_account.api_account.consumer_secret,
business_shortcode=short_code,
passcode=lipa_na_mpesa_account.pass_code,
amount=amount,
callback_url=get_mpesa_webhook_url('mpesa:mpesa_c2b_stk_push_callback_url', kwargs={
"organization_id": organization_id,
"reference": reference
}),
reference_code=reference_code or reference,
phone_number=phone_number,
description=description)
transaction = MpesaTransaction.objects.create(
transaction_id=reference,
command_id=CommandID.C2B_PAYBILL,
transaction_category=transaction_category,
sender_account=personal_account,
recipient_account=business_account,
status=MpesaTransactionStatus.PENDING,
initiated_at=timezone.now(),
transaction_amount=amount,
request_payload=request_payload)
if request_payload.get("errorCode") is None:
transaction.request_payload = {
"merchant_request_id": request_payload["MerchantRequestID"],
"checkout_request_id": request_payload["CheckoutRequestID"],
"response_code": ResultCode(int(request_payload["ResponseCode"])),
"response_description": request_payload["ResponseDescription"],
"customer_message": request_payload["CustomerMessage"]
}
transaction.save()
else:
transaction.request_payload = request_payload
transaction.save()
logger.debug(request_payload)
return transaction
def request_transaction_reverse(
transaction_id,
mpesa_user,
remarks=None,
occassion=None
):
with db_transaction.atomic():
organization_id = mpesa_user.organization.organization_id
transaction = MpesaTransaction.objects.get(
transaction_id=transaction_id)
response = api.transaction_reverse(
env="production" if transaction.sender_account.in_production else "sandbox",
app_key=None,
app_secret=None,
receiver_party=None,
initiator=None,
security_credential=None,
command_id=None,
transaction_id=None,
receiver_identifier_type=None,
amount=None,
result_url=get_mpesa_webhook_url('mpesa_balance_check_result_url', kwargs={
"organization_id": organization_id,
"reference": transaction.transaction_id
}),
queue_timeout_url=get_mpesa_webhook_url('mpesa_balance_check_queue_timeout_url', kwargs={
"organization_id": organization_id,
"reference": transaction.transaction_id
}),
remarks=remarks,
occassion=occassion)
transaction_reference = generate_id()
return MpesaTransaction.objects.create(
command_id=CommandID.UTILITY_TRANSACTION_REVERSAL,
transaction_id=transaction_reference,
initiator=mpesa_user,
status=MpesaTransactionStatus.PENDING,
initiated_at=timezone.now(),
request_payload=response)
def request_check_transaction_status(
transaction_id,
mpesa_user,
remarks=None
):
with db_transaction.atomic():
organization_id = mpesa_user.organization.organization_id
transaction = MpesaTransaction.objects.get(
transaction_id=transaction_id)
response = api.check_transaction_status(
env="production" if transaction.sender_account.in_production else "sandbox",
app_key=transaction.api_account.consumer_key,
app_secret=transaction.api_account.consumer_secret,
identifier_type=transaction.sender_account.identifier_type,
initiator=transaction.initiator.username,
party_a=transaction.sender_account.identifier,
remarks=remarks,
result_url=get_mpesa_webhook_url('mpesa_check_status_result_url', kwargs={
"organization_id": organization_id,
"reference": transaction.mpesa_receipt_number
}),
queue_timeout_url=get_mpesa_webhook_url('mpesa_check_status_queue_timeout_url', kwargs={
"organization_id": organization_id,
"reference": transaction.mpesa_receipt_number
}))
transaction_reference = generate_id()
return MpesaTransaction.objects.create(
command_id=CommandID.UTILITY_TRANSACTION_STATUS_QUERY,
transaction_id=transaction_reference,
initiator=mpesa_user,
status=MpesaTransactionStatus.PENDING,
initiated_at=timezone.now(),
request_payload=response)
| 38.162712
| 119
| 0.669213
| 1,090
| 11,258
| 6.526606
| 0.151376
| 0.049199
| 0.018977
| 0.022772
| 0.535704
| 0.479899
| 0.429013
| 0.361681
| 0.310655
| 0.271437
| 0
| 0.001203
| 0.261325
| 11,258
| 294
| 120
| 38.292517
| 0.854257
| 0
| 0
| 0.478599
| 0
| 0
| 0.091232
| 0.033757
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023346
| false
| 0.003891
| 0.035019
| 0
| 0.081712
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0d04d598a11b7e88dbf9f7dcbe48c016cf93d7f
| 2,280
|
py
|
Python
|
recsystem/embedder/post_embed.py
|
DOREMUS-ANR/recommender
|
027e0dcb3639f03204c67777e2e10aac8505a70a
|
[
"MIT"
] | 2
|
2017-03-28T15:48:18.000Z
|
2018-09-06T08:50:34.000Z
|
recsystem/embedder/post_embed.py
|
DOREMUS-ANR/recommender
|
027e0dcb3639f03204c67777e2e10aac8505a70a
|
[
"MIT"
] | null | null | null |
recsystem/embedder/post_embed.py
|
DOREMUS-ANR/recommender
|
027e0dcb3639f03204c67777e2e10aac8505a70a
|
[
"MIT"
] | null | null | null |
import os
import codecs
from shutil import copyfile
from gensim.models import KeyedVectors
from SPARQLWrapper import SPARQLWrapper, JSON
def ns_filter(embeddings_file, namespaces):
with open(embeddings_file) as file:
raw_embs = [l.strip() for l in file]
def belong_to_category(x):
for prefix in namespaces:
if x.startswith(prefix):
return True
return False
n = list(filter(belong_to_category, raw_embs))
head = '%d %s' % (len(n), raw_embs[0].split(' ')[1])
embeddings_temp = embeddings_file + "_temp"
with open(embeddings_temp, 'w') as f:
f.write("%s" % head)
for item in n:
f.write("\n%s" % item)
return embeddings_temp
def get_label(uri, endpoint):
query = "select sql:BEST_LANGMATCH(?o, 'en;q=0.9, en-gb;q=0.8, *;q=0.1', 'en') as ?label" \
" where { <%s> skos:prefLabel ?o }" % uri
sparql = SPARQLWrapper(endpoint)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
r = results["results"]["bindings"][0]
if r is None or 'label' not in r:
print(uri)
return None
return r["label"]["value"]
def main(args):
what = args.feature
if what is None:
raise RuntimeError('You must specify the feature using -f or --feature')
chosen = args.featureList[what]
namespaces = chosen['namespaces'] if 'namespaces' in chosen else False
embeddings_file = '%s/%s.emb' % (args.embDir, what)
embeddings_run = embeddings_file
copyfile(embeddings_file, embeddings_file + '_raw')
if namespaces:
embeddings_run = ns_filter(embeddings_file, namespaces)
# L2 normalisation
# https://www.quora.com/Should-I-do-normalization-to-word-embeddings-from-word2vec-if-I-want-to-do-semantic-tasks
wv_from_text = KeyedVectors.load_word2vec_format(embeddings_run)
wv_from_text.init_sims(replace=True)
labels = ['%s %s' % (uri, get_label(uri, args.endpoint)) for uri in wv_from_text.index2entity]
with codecs.open(embeddings_file + '.l', 'w', 'utf-8') as fl:
fl.write('\n'.join(labels))
wv_from_text.save_word2vec_format(embeddings_file)
if embeddings_run.endswith('_temp'):
os.remove(embeddings_run)
| 31.232877
| 117
| 0.658333
| 316
| 2,280
| 4.60443
| 0.405063
| 0.09622
| 0.027491
| 0.030241
| 0.043986
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008385
| 0.215351
| 2,280
| 72
| 118
| 31.666667
| 0.804919
| 0.05614
| 0
| 0
| 0
| 0.019231
| 0.122383
| 0.010237
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.096154
| 0
| 0.269231
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0d2ea6ce566b3bde93b03ec4fb9ce3e2422e739
| 6,292
|
py
|
Python
|
clustering/track_visualization.py
|
XiaoSanGit/wda_tracker
|
b68ec0edb9daa6cc495815ba9ca549b36eec0369
|
[
"MIT"
] | 24
|
2020-06-23T11:17:42.000Z
|
2022-03-29T00:38:09.000Z
|
clustering/track_visualization.py
|
XiaoSanGit/wda_tracker
|
b68ec0edb9daa6cc495815ba9ca549b36eec0369
|
[
"MIT"
] | 13
|
2020-07-07T03:59:02.000Z
|
2022-03-30T04:28:06.000Z
|
clustering/track_visualization.py
|
XiaoSanGit/wda_tracker
|
b68ec0edb9daa6cc495815ba9ca549b36eec0369
|
[
"MIT"
] | 9
|
2021-02-14T07:11:05.000Z
|
2021-12-23T12:47:08.000Z
|
import os
from tqdm import tqdm
import pandas as pd
from utilities.helper import get_bbox_middle_pos,drawBoundingBox
from clustering.clustering_utils import get_person_id_to_track,get_groundtruth_person_id_to_track
import cv2
from utilities.helper import *
from utilities.pandas_loader import load_csv
class Track_Visualization:
def __init__(self,dataset_base_folder,track_results_path,track_evaluation_results_path,cam_id, work_dirs,output_folder=None):
self.work_dirs = work_dirs
self.dataset_base_folder = dataset_base_folder
self.track_results_path = track_results_path
self.cam_id = cam_id
self.track_evaluation_results_path = track_evaluation_results_path
if output_folder is None:
self.output_folder = os.path.join(self.work_dirs, "clustering", "drawn_track_videos")
else:
self.output_folder = output_folder
self.track_colors = [(0,0,255),(0,255,0)]
self.track_circle_radi = [2,1]
def read_track_results(self,track_results_path):
track_results = pd.read_csv(track_results_path)
person_id_to_tracks = get_person_id_to_track(track_results)
return person_id_to_tracks
def read_ground_truth(self,person_identifier="ped_id"):
dataset_base_path = os.path.join(self.dataset_base_folder
,"cam_{}".format(self.cam_id)
,"coords_cam_{}.csv".format(self.cam_id))
ground_truth = load_csv(self.work_dirs, dataset_base_path)
ground_truth = ground_truth.groupby(["frame_no_gta", person_identifier], as_index=False).mean()
ground_truth = adjustCoordsTypes(ground_truth, person_identifier=person_identifier)
ground_truth = drop_unnecessary_columns(ground_truth)
person_id_to_track = get_groundtruth_person_id_to_track(ground_truth)
return person_id_to_track
def read_track_evaluation_results(self):
track_evaluation_results = pd.read_csv(self.track_evaluation_results_path)
return track_evaluation_results
def get_union_frame_nos(self,track1,track2):
def track_to_frame_nos(track):
result = []
for track_pos in track:
result.append(track_pos["frame_no_cam"])
return result
track1_frame_nos = track_to_frame_nos(track1)
track2_frame_nos = track_to_frame_nos(track2)
frame_no_union = set(track1_frame_nos).union(track2_frame_nos)
frame_no_union = list(frame_no_union)
frame_no_union.sort()
return frame_no_union
def draw_one_frame(self,img,until_frame_no,track,color,radius):
for track_pos in track:
bbox = track_pos["bbox"]
bbox = tuple(map(int, bbox))
person_pos = get_bbox_middle_pos(bbox)
person_pos = tuple(map(int, person_pos))
cv2.circle(img, person_pos, radius=radius, color=color, thickness=-1)
if until_frame_no == track_pos["frame_no_cam"]:
drawBoundingBox(img, bbox, color=color)
if until_frame_no <= track_pos["frame_no_cam"]:
break
def draw_all_frames(self,union_frames,tracks,hid,oid):
current_frame = union_frames[-1]
for current_frame in union_frames:
img_path = os.path.join(self.dataset_base_folder
, "cam_{}".format(self.cam_id)
, "image_{}_{}.jpg".format(current_frame, self.cam_id))
img = cv2.imread(img_path)
for track_idx,track in enumerate(tracks):
track_color = self.track_colors[track_idx % len(self.track_colors)]
circle_radius = self.track_circle_radi[track_idx % len(self.track_circle_radi)]
self.draw_one_frame(img,current_frame,track,track_color,circle_radius)
track_output_folder = os.path.join(self.output_folder,"hid_{}_oid_{}".format(hid,oid))
os.makedirs(track_output_folder,exist_ok=True)
track_output_image_path = os.path.join(track_output_folder,"image_{}_{}.jpg".format(current_frame, self.cam_id))
cv2.imwrite(track_output_image_path,img)
def run_visualization(self):
track_evaluation_results = self.read_track_evaluation_results()
gt_person_id_to_track = self.read_ground_truth()
tr_person_id_to_track = self.read_track_results(self.track_results_path)
for idx, eval_res_row in tqdm(track_evaluation_results.iterrows(),total=len(track_evaluation_results)):
hid = eval_res_row["hid"]
oid = eval_res_row["oid"]
if oid not in gt_person_id_to_track or hid not in tr_person_id_to_track:
break
gt_track = gt_person_id_to_track[oid]
tr_track = tr_person_id_to_track[hid]
union_frames = self.get_union_frame_nos(gt_track,tr_track)
self.draw_all_frames(union_frames,[gt_track,tr_track],hid,oid)
if __name__ == "__main__":
trv = Track_Visualization(dataset_base_folder="/home/philipp/Downloads/Recording_12.07.2019"
,track_results_path="/home/philipp/work_dirs/clustering/single_camera_refinement/track_results_2.txt"
,track_evaluation_results_path="/home/philipp/work_dirs/clustering/evaluation_per_track_results.csv"
,work_dirs="/home/philipp/work_dirs"
,cam_id=2)
trv = Track_Visualization(dataset_base_folder="/net/merkur/storage/deeplearning/users/koehl/gta/Recording_12.07.2019_17"
,
track_results_path="/home/koehlp/Downloads/work_dirs/clustering/single_camera_refinement/track_results_2.txt"
,
track_evaluation_results_path="/home/koehlp/Downloads/work_dirs/clustering/evaluation_per_track_results.csv"
, work_dirs="/home/koehlp/Downloads/work_dirs"
, cam_id=2
, output_folder="/net/merkur/storage/deeplearning/users/koehl/gta/drawn_tracks_matched")
trv.run_visualization()
| 33.827957
| 139
| 0.665607
| 814
| 6,292
| 4.706388
| 0.179361
| 0.050117
| 0.036544
| 0.046985
| 0.381885
| 0.309058
| 0.22344
| 0.197076
| 0.142522
| 0.125816
| 0
| 0.010187
| 0.251113
| 6,292
| 185
| 140
| 34.010811
| 0.802844
| 0
| 0
| 0.057692
| 0
| 0
| 0.114785
| 0.08744
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086538
| false
| 0
| 0.076923
| 0
| 0.221154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0d30d1f5e1fa7c7ff3fa8d3f5343770ef436fc7
| 445
|
py
|
Python
|
Src/ZenCoding/zencoding/coda.py
|
JetBrains/ReSharperPowerToys
|
352d61acba98d71b4c7a63a1def9fe550b7a0e57
|
[
"Apache-2.0"
] | 18
|
2015-01-22T18:18:17.000Z
|
2021-11-08T09:49:53.000Z
|
Src/ZenCoding/zencoding/coda.py
|
JetBrains/ReSharperPowerToys
|
352d61acba98d71b4c7a63a1def9fe550b7a0e57
|
[
"Apache-2.0"
] | null | null | null |
Src/ZenCoding/zencoding/coda.py
|
JetBrains/ReSharperPowerToys
|
352d61acba98d71b4c7a63a1def9fe550b7a0e57
|
[
"Apache-2.0"
] | 8
|
2015-05-15T19:34:04.000Z
|
2022-03-19T07:00:15.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Coda plug-in
Created on Apr 20, 2009
@author: sergey
'''
import os
from zencoding import zen_core
from zencoding.settings import zen_settings
zen_core.newline = os.getenv('CODA_LINE_ENDING', zen_core.newline)
zen_core.insertion_point = '$$IP$$'
cur_line = 'hello world div>p'
cur_index = 17
abbr = zen_core.find_abbr_in_line(cur_line, cur_index)
if abbr:
print(zen_core.expand_abbr(abbr))
| 19.347826
| 66
| 0.74382
| 74
| 445
| 4.22973
| 0.581081
| 0.134185
| 0.089457
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023136
| 0.125843
| 445
| 22
| 67
| 20.227273
| 0.781491
| 0.21573
| 0
| 0
| 0
| 0
| 0.114706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0d43b5a5e1054b6e91b5c04e0aefe730f1c03da
| 958
|
py
|
Python
|
Scripts/simulation/world/rentable_lot_tuning.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/world/rentable_lot_tuning.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
Scripts/simulation/world/rentable_lot_tuning.py
|
velocist/TS4CheatsInfo
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
[
"Apache-2.0"
] | null | null | null |
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\world\rentable_lot_tuning.py
# Compiled at: 2014-09-09 00:07:16
# Size of source mod 2**32: 925 bytes
from sims4.tuning.tunable import TunableTuple, Tunable
from sims4.tuning.tunable_base import ExportModes
class RentableZoneTuning:
PRICE_MODIFIERS = TunableTuple(description='\n Global price modifiers for all rentable zones.\n ',
add=Tunable(description='\n Add modifier for the price to rent a lot.\n ',
tunable_type=float,
default=0.0),
multiply=Tunable(description='\n Multiplier for the price to rent a lot.\n ',
tunable_type=float,
default=1.0),
export_class_name='TunablePriceModifiers',
export_modes=(ExportModes.All))
| 50.421053
| 116
| 0.685804
| 138
| 958
| 4.695652
| 0.615942
| 0.009259
| 0.046296
| 0.067901
| 0.138889
| 0.138889
| 0.138889
| 0.138889
| 0.138889
| 0.138889
| 0
| 0.09247
| 0.209812
| 958
| 19
| 117
| 50.421053
| 0.76354
| 0.322547
| 0
| 0.166667
| 0
| 0
| 0.346812
| 0.032659
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0d58a0cf4141bba29b28d7f0ec53b0cea18f50b
| 7,099
|
py
|
Python
|
grin-py/utils/MWGP_earningsEstimate.py
|
JPaulMora/grin-pool
|
c980fdbcae4edeaa661d36d5b6da6f7a49beed05
|
[
"Apache-2.0"
] | null | null | null |
grin-py/utils/MWGP_earningsEstimate.py
|
JPaulMora/grin-pool
|
c980fdbcae4edeaa661d36d5b6da6f7a49beed05
|
[
"Apache-2.0"
] | null | null | null |
grin-py/utils/MWGP_earningsEstimate.py
|
JPaulMora/grin-pool
|
c980fdbcae4edeaa661d36d5b6da6f7a49beed05
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# Copyright 2018 Blade M. Doyle
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
###
# Estmate MWGrinPool earnings from historic data
# Input: --days, --c29gps, --c31gps
# Algorithm:
# Get a list of the blocks found by MWGrinPool within the requested block range
# For each pool-found-block:
# Calculate the theoritical rewards for a user with provided GPS
# Generate a graph
import os
import sys
import argparse
from datetime import datetime, timedelta
try:
import requests
except Exception as e:
print("Error: This script requires the 'requests' module, please run `pip3 install requests`")
Graph = True
try:
import plotly
import plotly.graph_objs as go
except Exception as e:
Graph = False
mwURL = "https://api.mwgrinpool.com"
NanoGrin = 1.0/1000000000.0
SecondsInDay = float(60*60*24)
PPLNGSeconds = float(60*60*4)
def print_header():
print(" ")
print("############# MWGrinPool Average Daily Earnings #############")
print("## ")
if Graph == False:
print(" WARNING: ")
print(" This script requires the 'plotly' module to produce a graph")
print(" Please run: `pip3 install plotly`")
print(" (running in text mode)")
print(" ")
def print_footer(rewardTotal, c29gps, c31gps, numDays, startTS, endTS):
print(" ")
print(" ")
print(" Report for {} days - from: {} to: {}".format(numDays, startTS.strftime("%m-%d-%y %H:%M"), endTS.strftime("%m-%d-%y %H:%M")))
print(" Mining C29 at {}gps, C31 at {}gps".format(c29gps, c31gps))
print(" ")
print(" Total Rewards: {} Grin".format(rewardTotal))
print(" Avg Daily Reward = {} Grin".format(rewardTotal/NumDays))
print(" ")
def epoch_to_dt(epoch):
return datetime.fromtimestamp(epoch)
parser = argparse.ArgumentParser()
parser.add_argument("--days", help="Number of days to average over")
parser.add_argument("--c29gps", help="Miners C29 Graphs/second")
parser.add_argument("--c31gps", help="Miners C31 Graphs/second")
parser.add_argument("--debug", help="Print lots of debug info")
args = parser.parse_args()
print_header()
if args.days is None:
NumDays = float(input(" Number of days to average over: "))
else:
NumDays = float(args.days)
if NumDays > 62:
print(" ")
print(" -- Error: Please limit your query to 60 days to prevent excess load on our pool API")
print(" ")
sys.exit(1)
if args.c29gps is None:
C29Gps = float(input(" Miners C29 Graphs/second: "))
else:
C29Gps = float(args.c29gps)
if args.c31gps is None:
C31Gps = float(input(" Miners C31 Graphs/second: "))
else:
C31Gps = float(args.c31gps)
if args.debug is None:
debug = False
EndTS = datetime.now()
startTS = EndTS - timedelta(days=NumDays)
# Get a list of the pool-found-blocks within the range
poolblocksURL = mwURL + "/pool/blocks/0,1440/timestamp,height"
poolblocksJSON = requests.get(url = poolblocksURL).json()
poolblocks = [block['height'] for block in poolblocksJSON if(block['timestamp'] >= startTS.timestamp() and block['timestamp'] <= EndTS.timestamp())]
poolblocks.sort()
debug and print("Pool Blocks found in range: {}".format(poolblocks))
print(" ")
print(" Getting Mining Data: ")
rewardTotal = 0
x = [startTS]
y = [0]
debug and print("Start Time: {} - {}".format(startTS, startTS.timestamp()))
debug and print("End Time: {} - {}".format(EndTS, EndTS.timestamp()))
debug or sys.stdout.write(" ")
sys.stdout.flush()
for blockHeight in poolblocks:
# For each pool block, get some information:
# Secondary Scale Value
# Any TX fees included in the block reward
grinBlockURL = mwURL + "/grin/block/{}/timestamp,height,secondary_scaling,fee".format(blockHeight)
grinblockJSON = requests.get(url = grinBlockURL).json()
# Pool GPS at that block height
poolGpsURL = mwURL + "/pool/stat/{}/gps".format(blockHeight)
poolGpsJSON = requests.get(url = poolGpsURL).json()
# Calculate theoretical miners reward
scale = (2**(1+31-24)*31)/float(max(29, grinblockJSON['secondary_scaling']))
minerValue = C29Gps + C31Gps*scale
poolValue = 0
for gps in poolGpsJSON['gps']:
if gps['edge_bits'] == 29:
poolValue += gps['gps']
else:
poolValue += gps['gps']*scale
debug and print("Miner value: {}, pool value: {}".format(minerValue, poolValue))
fullMinersReward = (minerValue/poolValue)*(60+grinblockJSON['fee']*NanoGrin)
tsNow = datetime.fromtimestamp(grinblockJSON['timestamp'])
timedelta = tsNow - startTS
# Check if we get the full reward or not
if(timedelta.total_seconds() < PPLNGSeconds):
minersReward = fullMinersReward * (timedelta.total_seconds()/PPLNGSeconds)
else:
minersReward = fullMinersReward
debug and print(" + Miners reward for {} block {}: {}".format(datetime.fromtimestamp(grinblockJSON['timestamp']).strftime('%c'), blockHeight, minersReward))
rewardTotal += minersReward
# Graph
x.append(tsNow)
timedelta = tsNow - startTS
debug and print("timedelta = {}".format(timedelta))
daysSinceStartTS = float(timedelta.total_seconds())/float(SecondsInDay)
debug and print("daysSinceStartTS = {}".format(daysSinceStartTS))
y.append(rewardTotal/daysSinceStartTS)
debug and print(" ")
debug or sys.stdout.write(".")
sys.stdout.flush()
x.append(EndTS)
y.append(rewardTotal/NumDays)
print_footer(rewardTotal, C29Gps, C31Gps, NumDays, startTS, EndTS)
if Graph == True:
print("Generating graph...")
graphName = "Avg Daily Reward: {} Grin".format(round(rewardTotal/NumDays, 2))
graphData = [go.Scatter(x=x, y=y, name=graphName)]
graphLayout = go.Layout(
title=go.layout.Title(text=graphName),
xaxis=go.layout.XAxis(
title=go.layout.xaxis.Title(
text='Time',
font=dict(
family='Courier New, monospace',
size=18,
color='#008000'
)
)
),
yaxis=go.layout.YAxis(
title=go.layout.yaxis.Title(
text='Grin',
font=dict(
family='Courier New, monospace',
size=18,
color='#008000'
)
)
),
)
graphFigure = go.Figure(data=graphData, layout=graphLayout)
graph_name = "estimate-{}days.html".format(NumDays)
plotly.offline.plot(graphFigure, filename=graph_name)
| 35.318408
| 162
| 0.646993
| 861
| 7,099
| 5.311266
| 0.317073
| 0.013995
| 0.022742
| 0.006998
| 0.105839
| 0.076974
| 0.060354
| 0.060354
| 0.021867
| 0.021867
| 0
| 0.02341
| 0.21172
| 7,099
| 200
| 163
| 35.495
| 0.793781
| 0.170306
| 0
| 0.219178
| 0
| 0
| 0.224309
| 0.015193
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020548
| false
| 0
| 0.047945
| 0.006849
| 0.075342
| 0.239726
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0d7ebc0b913cd1eef91933fd5bfa8848faaf124
| 10,700
|
py
|
Python
|
cpd_analysis.py
|
jfmalloy1/Patents
|
734e62497acfbd9be42980b310379979415ab924
|
[
"MIT"
] | null | null | null |
cpd_analysis.py
|
jfmalloy1/Patents
|
734e62497acfbd9be42980b310379979415ab924
|
[
"MIT"
] | null | null | null |
cpd_analysis.py
|
jfmalloy1/Patents
|
734e62497acfbd9be42980b310379979415ab924
|
[
"MIT"
] | null | null | null |
# import igraph as ig
# import numpy as np
import pickle
import pandas as pd
from tqdm import tqdm
import os
import heapq
import scipy.stats as stats
from random import sample
def build_cpd_df(fp):
""" Takes 29 separate compound data files and combines them into a single pandas dataframe for ease of access
Args:
fp (string): Filepath to SureChemBL data files (assuming G drive goes to jmalloy3 Google Account)
Returns:
None - but does write a pickled dataframe to SureChemBL_Patents/Cpd_Data/ directory
"""
dfs = []
for f in tqdm(os.listdir(fp)):
if f.endswith(".txt"):
dfs.append(pd.read_csv(fp + f, sep="\t", header=0))
df = pd.concat(dfs, ignore_index=True)
print(df)
pickle.dump(df, file=open(fp + "SureChemBL_allCpds.p", "wb"))
del df
def find_highest_degrees(df, n, start, stop):
""" Finds the n highest-degree compounds within a specific date range
Saves various data associated with those n comopunds - smiles, inchi,
inchikey, degree, preferential attachment value
Args:
df (pandas dataframe): dataframe containing all SureChemBL compounds
n (int): the number of highest-degree compounds to select
start (int): 1st year of the range
stop (int): last year of the range
"""
print("----------", start, stop, "----------")
#Finding the top 10 preferential attachment compounds (from 1980-1984 as a test)
full_id_degrees = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\Degrees\\full_id_degrees_" +
str(start) + "_" + str(stop) + ".p", "rb"))
pref_attach_dict = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\pref_attach_dict_" +
str(start) + "_" + str(stop) + ".p", "rb"))
#Find n compounds with largest degree
highest_degree_cpds = heapq.nlargest(n,
full_id_degrees,
key=full_id_degrees.get)
highest_degree_cpds_df = df[df["SureChEMBL_ID"].isin(highest_degree_cpds)]
pref_attach_values = list(pref_attach_dict.values())
#Extra information to be added to the csv output file
degrees = []
pref_attach_highestCpd_values = []
pref_attach_percentiles = []
for cpd in tqdm(highest_degree_cpds_df["SureChEMBL_ID"]):
#Degree of compound
degrees.append(full_id_degrees[cpd][-1])
#Preferential attachment value
pref_attach_highestCpd_values.append(pref_attach_dict[cpd])
#Percentile of preferential attachment value
pref_attach_percentiles.append(
stats.percentileofscore(pref_attach_values, pref_attach_dict[cpd]))
highest_degree_cpds_df["degree"] = degrees
highest_degree_cpds_df["pref_attach_value"] = pref_attach_highestCpd_values
highest_degree_cpds_df["pref_attach_percentile"] = pref_attach_percentiles
highest_degree_cpds_df.to_csv(
"G:\\Shared drives\\SureChemBL_Patents\\Cpd_Data/highest_degree_data_" +
str(start) + "_" + str(stop) + "_1000.csv")
print()
def find_llanos_cpds(fp, df):
""" Tests various compounds found in Llanos et al (2019) in SureChemBL data
Llanos et al used Reaxys data to find the most popular compounds. This checks
where those compounds appear, if at all, in SureChembL patent data
Args:
df (pandas dataframe): dataframe of all SureChemBL chemistry
"""
cpds_1980_2015_inchi = {
"acetic anhydride":
"InChI=1S/C4H6O3/c1-3(5)7-4(2)6/h1-2H3",
"methanol":
"InChI=1S/CH4O/c1-2/h2H,1H3",
"methyl iodide":
"InChI=1S/CH3I/c1-2/h1H3",
"diazomethane":
"InChI=1S/CH2N2/c1-3-2/h1H2",
"formaldehyde":
"InChI=1S/CH2O/c1-2/h1H2",
"benzaldehyde":
"InChI=1S/C7H6O/c8-6-7-4-2-1-3-5-7/h1-6H",
"copper(II) oxide":
"InChI=1S/Cu.O",
"ethanol":
"InChI=1S/C2H6O/c1-2-3/h3H,2H2,1H3",
"benzoyl chloride":
"InChI=1S/C7H5ClO/c8-7(9)6-4-2-1-3-5-6/h1-5H",
"carbon monoxide":
"InChI=1S/CO/c1-2",
"water (2000)":
"InChI=1S/H2O/h1H2",
"Trifluoroacetic acid (2000)":
"InChI=1S/C2HF3O2/c3-2(4,5)1(6)7/h(H,6,7)",
"Phenylacetylene (2000)":
"InChI=1S/C8H6/c1-2-8-6-4-3-5-7-8/h1,3-7H",
"benzyl bromide (2000)":
"InChI=1S/C7H7Br/c8-6-7-4-2-1-3-5-7/h1-5H,6H2"
}
#Find stats for Llanos compounds - use 2015 data for stats (I really need to make a consensus graph)
full_id_degrees = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\Degrees\\full_id_degrees_2015_2019.p",
"rb"))
pref_attach_dict = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\pref_attach_dict_2015_2019.p",
"rb"))
pref_attach_values = list(pref_attach_dict.values())
#Loop through Llanos compounds
with open(fp + "llanos_cpds.csv", "a") as f:
f.write(
"name,inchi,SureChemBL_ID,degree,pref_attach_value,pref_attach_percentile\n"
)
for name, inchi in cpds_1980_2015_inchi.items():
s = df[df["InChI"] == inchi]
if not s.empty: #if SureChemBL holds that compound, save id & stats
#Degree of compound
degree = full_id_degrees[s.iloc[0]["SureChEMBL_ID"]][-1]
#Preferential attachment value
pref_attach_value = pref_attach_dict[s.iloc[0]["SureChEMBL_ID"]]
#Percentile of preferential attachment value
pref_attach_percentile = stats.percentileofscore(
pref_attach_values,
pref_attach_dict[s.iloc[0]["SureChEMBL_ID"]])
f.write(name + ",\"" + inchi + "\"," +
s.iloc[0]["SureChEMBL_ID"] + "," + str(degree) + "," +
str(pref_attach_value) + "," +
str(pref_attach_percentile) + "\n")
else: #if not, no name nor stats
f.write(name + ",\"" + inchi + "\",na,na,na,na\n")
def build_month_increments(start, stop):
""" Build all monthly increments from the start year to stop year in the
format YEAR-MONTH
Args:
start (int): start year of increments
stop (int): end year of increments
Returns:
list: list of strings holding the YEAR-MONTH increments
"""
months = []
while start <= stop:
for month in [
"01", "02", "03", "04", "05", "06", "07", "08", "09", "10",
"11", "12"
]:
months.append(str(start) + "-" + month)
start += 1
return months
def sample_compounds_unique(n, months, cpds, cpd_df):
""" Sample compounds which are uniquely added in a specific month
This uniquess is determined by determing when a compound is added in a month
and has not been present in the patent record before that month.
Args:
n (int): Number of compounds to sample every month
months (list): list of months to sample from
cpds (list): all SureChemBL IDs of compounds added in a specific month
cpd_df (pandas dataframe): Master dataframe of all compounds
"""
sample_inchis = {}
print("----- Sampling unique compounds -----")
for i in tqdm(range(len(months))):
offset = 216 #Account for starting in 1980 instead of 1962
#Only sample if there are more than 1000 compounds
if len(cpds[i+offset]) > n:
sample_cpds = sample(cpds[i+offset], n)
else:
sample_cpds = cpds[i+offset]
sub_df = cpd_df[cpd_df["SureChEMBL_ID"].isin(sample_cpds)]
sample_inchis[months[i]] = list(sub_df["InChI"])
print("\n----- Saving compounds -----")
pickle.dump(sample_inchis, file=open("Data/sample_inchi_1000_NEW.p", "wb"))
def sample_compounds(n1, n2, months, cpd_df):
""" Sample n compounds from each month, initially with overlap allowed
//TODO: fix so that only unique-to-that-month compounds are sampled
Args:
n (int): number of compounds to sample
n2 (int): another number of compounds to sample
months (string): description of month, e.g. 1980-01
cpd_df (pandas dataframe): contains information for each compound in SureChemBL, including InChIKey
Returns:
list: list of all randomly sampled compounds (in inchi?)
"""
#Inchis for all sampled compounds
sample_inchis_n1 = {}
sample_inchis_n2 = {}
print("----- Sampling Compounds ------\n")
for month in tqdm(months):
cpds = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\CpdPatentIdsDates\\unique_cpds_"
+ month + ".p", "rb"))
sample_cpds_n1 = sample(cpds, n1)
sample_cpds_n2 = sample(cpds, n2)
sub_df = cpd_df[cpd_df["SureChEMBL_ID"].isin(sample_cpds_n1)]
sample_inchis_n1[month] = list(sub_df["InChI"])
sub_df = cpd_df[cpd_df["SureChEMBL_ID"].isin(sample_cpds_n2)]
sample_inchis_n2[month] = list(sub_df["InChI"])
#Save memory by removing cpd datframe and monthly compounds
del (cpd_df)
del (cpds)
#Save sampled inchis to pickle files
print("\n----- Saving Data -----")
pickle.dump(sample_inchis_n1, file=open("Data/sample_inchi_100.p", "wb"))
pickle.dump(sample_inchis_n2, file=open("Data/sample_inchi_1000.p", "wb"))
def main():
# ### Highest Degree compounds ###
data_fp = "G:\\Shared drives\\SureChemBL_Patents\\Cpd_Data\\"
# # build_cpd_df(data_fp) #NOTE: only needs to be run once
cpd_df = pickle.load(file=open(data_fp + "SureChemBL_allCpds.p", "rb"))
print(cpd_df.columns)
# ### Statistics over highest degree compounds ###
# n = 1000 #Number of compounds to find
# for range in [(1980, 1984), (1985, 1989), (1990, 1994), (1995, 1999),
# (2000, 2004), (2005, 2009), (2010, 2014), (2015, 2019)]:
# find_highest_degrees(cpd_df, n, range[0], range[1])
# ### Testing Llanos et al (2019) compounds ###
# find_llanos_cpds(data_fp, cpd_df)
### Sampling compounds for MA analysis ###
month_unique_cpds = pickle.load(file=open(
"G:\\Shared drives\\SureChemBL_Patents\\CpdPatentIdsDates\\unique_cpds_AllMonths.p",
"rb"))
sample_compounds_unique(1000, build_month_increments(1980, 2019),
month_unique_cpds, cpd_df)
# sample_compounds(100, 1000, build_month_increments(1980, 2019), cpd_df)
### MA Analysis ###
if __name__ == "__main__":
main()
| 36.148649
| 113
| 0.622056
| 1,439
| 10,700
| 4.455872
| 0.246699
| 0.043668
| 0.021834
| 0.028696
| 0.279008
| 0.218497
| 0.171085
| 0.144261
| 0.097473
| 0.097473
| 0
| 0.048551
| 0.255047
| 10,700
| 295
| 114
| 36.271186
| 0.755865
| 0.330654
| 0
| 0.084967
| 0
| 0.039216
| 0.263647
| 0.143293
| 0
| 0
| 0
| 0.00339
| 0
| 1
| 0.045752
| false
| 0
| 0.045752
| 0
| 0.098039
| 0.052288
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0d801bb287e5fc7e3b701b27f659bf6cdfd48e3
| 303
|
py
|
Python
|
src/tests/pyfrc_test.py
|
SpookyWoogin/robot2018
|
a8ddf6a64b883904b15031e0ae13b2056faed4f5
|
[
"MIT"
] | 1
|
2018-10-24T21:43:00.000Z
|
2018-10-24T21:43:00.000Z
|
src/tests/pyfrc_test.py
|
SpookyWoogin/robot2018
|
a8ddf6a64b883904b15031e0ae13b2056faed4f5
|
[
"MIT"
] | 1
|
2018-03-10T01:25:47.000Z
|
2018-03-10T03:33:36.000Z
|
src/tests/pyfrc_test.py
|
SpookyWoogin/robot2018
|
a8ddf6a64b883904b15031e0ae13b2056faed4f5
|
[
"MIT"
] | 6
|
2018-01-13T17:54:31.000Z
|
2018-02-13T23:46:50.000Z
|
def test_drivetrain_nt(Notifier):
import networktables
from robot import Rockslide
robot = Rockslide()
robot.robotInit()
drivetrain = robot.drivetrain
drivetrain.periodic()
assert networktables.NetworkTables.getTable("/Drivetrain/Left").getNumber("Position", None) == 0.0
| 25.25
| 102
| 0.726073
| 31
| 303
| 7.032258
| 0.612903
| 0.12844
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007968
| 0.171617
| 303
| 11
| 103
| 27.545455
| 0.860558
| 0
| 0
| 0
| 0
| 0
| 0.079208
| 0
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0da89a6bb254fa22755c06a1a618c46fe33ed81
| 3,775
|
py
|
Python
|
media/limited_series.py
|
FellowHashbrown/MediaQueue
|
0844649709698c66b7ed14e70436b3830ab18627
|
[
"MIT"
] | null | null | null |
media/limited_series.py
|
FellowHashbrown/MediaQueue
|
0844649709698c66b7ed14e70436b3830ab18627
|
[
"MIT"
] | null | null | null |
media/limited_series.py
|
FellowHashbrown/MediaQueue
|
0844649709698c66b7ed14e70436b3830ab18627
|
[
"MIT"
] | null | null | null |
import os
from json import dump
from typing import List
from media import Episode, Show
from options import options
class LimitedSeries(Show):
"""A LimitedSeries is a Show that has only 1 Season of Episodes.
In this, only the Episodes need to be specified
:param name: The name of this LimitedSeries
:param episodes: A list of Episodes in this LimitedSeries
:param provider: The name of the streaming provider this LimitedSeries is located on
:param person: The person that is watching this LimitedSeries
:keyword started: Whether or not this LimitedSeries has been started (Defaults to False)
:keyword finished: Whether or not this LimitedSeries has been finished (Defaults to False)
:keyword json: The JSON object to load a LimitedSeries object from
:keyword filename: The JSON file to load a LimitedSeries object from
:raises FileNotFoundError: When the JSON file cannot be found
:raises KeyError: When the required parameters are missing from the JSON object
"""
FOLDER = "limitedSeries"
def __init__(self, name: str = None, provider: str = None,
person: str = None, episodes: List[Episode] = None,
*, started: bool = False, finished: bool = False,
json: dict = None, filename: str = None):
super().__init__(name, provider, person,
episodes=episodes,
started=started, finished=finished,
json=json, filename=filename)
def __str__(self):
return "LimitedSeries({}, {}, {}, {}, {}, {}, {})".format(
self.get_id(), self.get_name(),
self.get_provider(), self.get_person(),
"Started" if self.is_started() else "Not Started",
"Finished" if self.is_finished() else "Not Finished",
", ".join([str(episode) for episode in self.get_episodes()])
)
def __eq__(self, limited_series: 'LimitedSeries'):
if not isinstance(limited_series, LimitedSeries):
return False
return (limited_series.get_name() == self.get_name() and
{limited_series.get_episodes()} == {self.get_episodes()} and
limited_series.get_provider() == self.get_provider() and
limited_series.get_person() == self.get_person() and
limited_series.is_started() is self.is_started() and
limited_series.is_finished() is self.is_finished())
# # # # # # # # # # # # # # # # # # # # # # # # #
def to_csv(self) -> str:
"""Returns the CSV representation of this LimitedSeries object"""
show_csv = "\"{}\",{},{},{},{}".format(
self.get_name(), self.get_provider(),
self.get_person(),
self.is_started(), self.is_finished()
)
episodes_csv = "\n".join(episode.to_csv() for episode in self.get_episodes())
return f"LimitedSeries\n{show_csv}\n{episodes_csv}"
def to_json(self) -> dict:
"""Returns the JSON representation of this LimitedSeries object"""
super_json = super().to_json()
super_json.pop("seasons") # A limited series shouldn't have Seasons
return super_json
def save(self):
"""Saves this LimitedSeries object into a JSON file"""
if not os.path.exists(f"{options.get_base_dir()}/data"):
os.mkdir(f"{options.get_base_dir()}/data")
if not os.path.exists(f"{options.get_base_dir()}/data/{LimitedSeries.FOLDER}"):
os.mkdir(f"{options.get_base_dir()}/data/{LimitedSeries.FOLDER}")
with open("{}/data/{}/{}.json".format(options.get_base_dir(), LimitedSeries.FOLDER, self.get_id()), "w") as jsonfile:
dump(self.to_json(), jsonfile, indent=4)
| 45.481928
| 125
| 0.625166
| 464
| 3,775
| 4.931034
| 0.232759
| 0.042832
| 0.034965
| 0.03715
| 0.225524
| 0.191434
| 0.141608
| 0.11014
| 0.068182
| 0.034091
| 0
| 0.000712
| 0.255629
| 3,775
| 82
| 126
| 46.036585
| 0.813523
| 0.279205
| 0
| 0
| 0
| 0
| 0.133663
| 0.077304
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.098039
| 0.019608
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0dc67e2fb068fd1e2da407f360e7696247d97ff
| 8,104
|
py
|
Python
|
api.py
|
gitroulette/gitroulette
|
0c53cdf843b202efa7d0e7a3fcbcb0a7bc7f0b0b
|
[
"Apache-2.0"
] | null | null | null |
api.py
|
gitroulette/gitroulette
|
0c53cdf843b202efa7d0e7a3fcbcb0a7bc7f0b0b
|
[
"Apache-2.0"
] | null | null | null |
api.py
|
gitroulette/gitroulette
|
0c53cdf843b202efa7d0e7a3fcbcb0a7bc7f0b0b
|
[
"Apache-2.0"
] | null | null | null |
import json
import random
from flask import Blueprint
from flask import request
from flask import session
from sqlalchemy import and_
from sqlalchemy import or_
from urlparse import urlparse
# from flask import current_app
from gitRoulette import auth
from gitRoulette import models
from gitRoulette.utils import request_utils
api = Blueprint('api', __name__)
db = models.db
@api.route('/new_for_review', methods=['POST'])
@auth.login_required
def new_for_review():
if request.method == 'POST':
req_data = json.loads(request.data)
language_list = request_utils.get_url_languages(
req_data['url'], session['github_token'][0]).keys()
# FIXME: change name to description in post request
# FIXME: change time to be taken on the server
entry = models.Url(name=req_data['name'],
url=req_data['url'],
github_user=req_data['github_user'])
for l in language_list:
language = models.Language(language=l, url=entry)
db.session.add(language)
db.session.add(entry)
db.session.commit()
return str(entry.id)
@api.route('/remove_from_list', methods=['POST'])
@auth.login_required
def remove_from_queue():
req_data = json.loads(request.data)
url = models.Url.query.filter(
and_(models.Url.github_user == session['github_user'],
models.Url.name == req_data['name'])).first()
languages = url.languages.all()
for language in languages:
db.session.delete(language)
db.session.delete(url)
db.session.commit()
return "test"
@api.route('/new_something', methods=['POST'])
@auth.login_required
def new_something():
if request.method == 'POST':
req_data = json.loads(request.data)
github_user = models.GitUser.query.filter_by(
github_user=req_data['github_user']).first()
if github_user is None:
return "no user"
# checks if user is trying to add to himself
elif req_data['github_user'] == session['github_user']:
return "cannot add to yourself"
else:
something = github_user.somethings.filter_by(
comment_id=req_data['comment_id']).first()
if something is None:
something = models.Something(comment_id=req_data['comment_id'],
gituser=github_user)
db.session.add(something)
db.session.commit()
return "test"
@api.route('/somethings_by_url_id/<url_id>', methods=['GET'])
@auth.login_required
def somethings_by_url_id(url_id):
# TODO: maybe we need this for something
url = models.Url.query.filter_by(id=url_id).first()
somethings = [s.comment_id for s in url.somethings.all()]
return json.dumps({"somethings": somethings})
@api.route('/somethings_by_username/<username>', methods=['GET'])
@auth.login_required
def somethings_by_username(username):
github_user = models.GitUser.query.filter_by(github_user=username).first()
somethings = [s.comment_id for s in github_user.somethings.all()]
print(somethings)
return json.dumps({"somethings": somethings})
@api.route('/languages_by_url_id/<url_id>', methods=['GET'])
@auth.login_required
def languages_by_url_id(url_id):
url = models.Url.query.filter_by(id=url_id).first()
languages = url.languages.all()
language_list = [l.language for l in languages]
ret_val = {"languages": language_list}
return json.dumps(ret_val)
@api.route('/new_github_user', methods=['POST'])
@auth.login_required
def new_github_user():
# TODO: modify so that a user can add/remove/replace skills;
# TODO: case: no skills on github..
# TODO: add a dropdown with common skills.
if request.method == 'POST':
req_data = json.loads(request.data)
gituser = models.GitUser.query.filter_by(
github_user=session['github_user']).first()
if gituser is None:
gituser = models.GitUser(github_user=session['github_user'])
db.session.add(gituser)
for skill in req_data['skills']:
_s = models.Skill(skill=skill, gituser=gituser)
db.session.add(_s)
db.session.commit()
return "success"
@api.route('/comments_by_url_id/<url_id>')
@auth.login_required
def comments_by_url_id(url_id):
# FIXME: at the moment we only take pulls comments, no issues.
# issues will show comments in "conversation" too.
# Should we do another request if entry_type is pull?
url = models.Url.query.filter_by(id=url_id).first()
pathArray = urlparse(url.url).path.split('/')
github_user = pathArray[1]
project = pathArray[2]
entry_type = pathArray[3]
entry_id = pathArray[4]
endpoint = 'repos/' + github_user + "/" + project + "/"
endpoint += entry_type + "s/" + entry_id + "/comments"
comments = auth.github.get(endpoint)
# the response has nothing to do with the url_id restructure.
# needs work. we need a better standard
def lmbd(comment): comment.update({'url_name': url.name, 'url_id': url.id})
return json.dumps(
{project: [lmbd(comment) or comment for comment in comments.data]})
@api.route('/decline_comment', methods=['POST'])
@auth.login_required
def decline_comment():
req_data = json.loads(request.data)
url = models.Url.query.filter_by(id=req_data["url_id"]).first()
pathArray = urlparse(url.url).path.split('/')
github_user = pathArray[1]
project = pathArray[2]
entry_type = pathArray[3]
entry_id = pathArray[4]
endpoint = 'repos/' + github_user + "/" + project + "/"
endpoint += entry_type + "s/" + entry_id + "/comments"
post_data = {'body': 'No thanks!',
'in_reply_to': int(req_data["comment_id"])}
headers = {'Accept': 'application/json',
'Content-Type': 'application/json; charset=utf-8'}
resp = auth.github.post(endpoint, data=post_data, headers=headers,
format='json')
return json.dumps({"response": resp.data})
@api.route('/skills_by_username/<github_user>', methods=['GET'])
@auth.login_required
def skills_by_username(github_user):
endpoint = "/users/" + github_user + "/repos"
repos = auth.github.get(endpoint).data
languages = [language for repo in repos for language in
request_utils.get_url_languages(
repo["html_url"], session['github_token'][0]).keys()]
print(languages)
return json.dumps(list(set(languages)))
@api.route('/saved_skills_by_username/<github_user>', methods=['GET'])
@auth.login_required
def saved_skills_by_username(github_user):
user = models.GitUser.query.filter_by(github_user=github_user).first()
skills = user.skills.all()
skills_list = [s.skill for s in skills]
return json.dumps(list(set(skills_list)))
@api.route('/urls_by_username/<github_user>', methods=['GET'])
@auth.login_required
def saved_urls_by_username(github_user):
urls = models.Url.query.filter_by(github_user=github_user).all()
existing_urls = []
for url in urls:
entry = {'id': url.id,
'name': url.name,
'url': url.url,
'github_user': url.github_user}
existing_urls.append(entry)
return json.dumps(existing_urls)
@api.route('/url_to_review', methods=['GET'])
@auth.login_required
def url_to_review():
user = models.GitUser.query.filter_by(github_user=session['github_user']).first()
skills = user.skills.all()
# We need to have atleast one condition otherwise the query will return all.
if len(skills) == 0:
return ''
conditions = [getattr(models.Language, 'language').ilike('%{}%'.format(s.skill)) for s in skills]
q = models.Language.query.filter(or_(*conditions)).distinct(models.Language.url_id)
language_entries = q.all()
random_url_id = random.choice(language_entries).url_id
url = models.Url.query.filter_by(id=random_url_id).first()
return str(url.url)
| 32.15873
| 101
| 0.659057
| 1,080
| 8,104
| 4.750926
| 0.167593
| 0.077958
| 0.043072
| 0.050672
| 0.479828
| 0.415708
| 0.330345
| 0.263302
| 0.233093
| 0.207952
| 0
| 0.001876
| 0.210513
| 8,104
| 251
| 102
| 32.286853
| 0.800094
| 0.083292
| 0
| 0.287356
| 0
| 0
| 0.110871
| 0.030213
| 0
| 0
| 0
| 0.003984
| 0
| 1
| 0.08046
| false
| 0
| 0.063218
| 0
| 0.235632
| 0.022989
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0ddf344239a48edf5e77538da1c1fa89461c624
| 1,556
|
py
|
Python
|
translator/logger.py
|
dNationCloud/jsonnet-translator
|
94d9d1b56d21a357fcab8adc555aa4630234d19c
|
[
"Apache-2.0"
] | 7
|
2021-04-14T11:30:03.000Z
|
2021-05-17T11:26:50.000Z
|
translator/logger.py
|
dNationCloud/jsonnet-translator
|
94d9d1b56d21a357fcab8adc555aa4630234d19c
|
[
"Apache-2.0"
] | 10
|
2021-01-14T07:18:55.000Z
|
2021-10-01T12:56:39.000Z
|
translator/logger.py
|
dNationCloud/kubernetes-jsonnet-translator
|
94d9d1b56d21a357fcab8adc555aa4630234d19c
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2020 The dNation Jsonnet Translator Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import logging.handlers
import sys
from pythonjsonlogger import jsonlogger
LOGGER_NAME = "translator"
LOG_FORMAT = "%(asctime)s - [%(levelname)-5s] - %(message)s"
FORMATTER = {
"default": logging.Formatter(LOG_FORMAT),
"json": jsonlogger.JsonFormatter(LOG_FORMAT),
}
def get_logger():
return logging.getLogger(LOGGER_NAME)
def get_console_handler(formatter):
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(formatter)
return console_handler
def set_logger(level, format):
"""Sets the threshold for the logger to defined level and format
Args:
level (str): Logger threshold.
format (str): Logger format.
Return:
None
"""
formatter = FORMATTER[format]
logger = logging.getLogger(LOGGER_NAME)
logger.setLevel(level)
logger.addHandler(get_console_handler(formatter))
logger.propagate = False
| 27.785714
| 77
| 0.737147
| 202
| 1,556
| 5.60396
| 0.539604
| 0.053004
| 0.022968
| 0.028269
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007037
| 0.178021
| 1,556
| 55
| 78
| 28.290909
| 0.87803
| 0.482648
| 0
| 0
| 0
| 0
| 0.086728
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.181818
| 0.045455
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0e1dfb1774454a45bec65b7f4f0e603d7f1da9e
| 13,580
|
py
|
Python
|
lib/Protocol/EmNetconfProtocol.py
|
multi-service-fabric/element-manager
|
e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f
|
[
"Apache-2.0"
] | null | null | null |
lib/Protocol/EmNetconfProtocol.py
|
multi-service-fabric/element-manager
|
e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f
|
[
"Apache-2.0"
] | null | null | null |
lib/Protocol/EmNetconfProtocol.py
|
multi-service-fabric/element-manager
|
e550d1b5ec9419f1fb3eb6e058ce46b57c92ee2f
|
[
"Apache-2.0"
] | 1
|
2020-04-02T01:17:43.000Z
|
2020-04-02T01:17:43.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright(c) 2019 Nippon Telegraph and Telephone Corporation
# Filename: EmNetconfProtocol.py
'''
Protocol processing section (Netconf)
'''
import traceback
import time
import json
from ncclient import manager
from ncclient import operations
import GlobalModule
import EmNetconfClient
from EmCommonLog import decorater_log
from EmCommonLog import decorater_log_in_out
class EmNetconfProtocol(object):
'''
Protocol processing section (Netconf) class
'''
__CONNECT_OK = 1
__CONNECT_CAPABILITY_NG = 2
__CONNECT_NO_RESPONSE = 3
@decorater_log_in_out
def connect_device(self, device_info):
'''
Device connection control
Conduct SSH connection to applicable device as request information.
Explanation about parameter:
device_info: Device information
Platform name
OS
Firm version
Login ID
Password
IPv4 address for management
Prefix of IPv4 address for management
Device information to be used for ncclient.
(to be set only when necessary)
Port No. to be used for ncclient.
(to be set only when necessary)
Explanation about return value:
Connection result :
int (1:Normal, 2: Capability Abnormal 3:No response)
'''
parse_json = json.loads(device_info)
device_info_dict = parse_json["device_info"]
self.__device_ip = device_info_dict["mgmt_if_address"]
username = device_info_dict["username"]
password = device_info_dict["password"]
device_info = device_info_dict.get("device_info")
port_number = device_info_dict.get("port_number")
if device_info is not None:
device_params = {'name': str(device_info)}
else:
device_params = None
GlobalModule.EM_LOGGER.debug("device_params = %s", device_params)
if port_number is None:
port_number = 830
GlobalModule.EM_LOGGER.debug("port_number = %s", port_number)
result, timer_protocol = GlobalModule.EM_CONFIG.\
read_sys_common_conf("Timer_netconf_protocol")
if result is not True:
timeout_val = 60
GlobalModule.EM_LOGGER.debug(
"Netconf Protocol Timer default Setting: %s", timeout_val)
else:
timeout_val = timer_protocol / 1000
GlobalModule.EM_LOGGER.debug(
"Netconf Protocol Timer: %s", timeout_val)
result, timer_connection = GlobalModule.EM_CONFIG.\
read_sys_common_conf("Timer_connection_retry")
if result is not True:
retrytimer_val = 5
GlobalModule.EM_LOGGER.debug(
"Connection Retry Timer default Setting: %s", retrytimer_val)
else:
retrytimer_val = timer_connection / 1000.0
GlobalModule.EM_LOGGER.debug(
"Connection Retry Timer: %s", retrytimer_val)
result, retry_num = GlobalModule.EM_CONFIG.\
read_sys_common_conf("Connection_retry_num")
if result is not True:
retry_num_val = 5
GlobalModule.EM_LOGGER.debug(
"Connection Retry Num default Setting: %s", retry_num_val)
else:
retry_num_val = retry_num
GlobalModule.EM_LOGGER.debug(
"Connection Retry Num: %s", retry_num_val)
for count in range(retry_num_val):
try:
self.__connection = EmNetconfClient.connect_ssh(
host=self.__device_ip,
port=port_number,
username=username,
password=password,
timeout=timeout_val,
hostkey_verify=False,
device_params=device_params,
device_info=device_info_dict)
break
except Exception as exception:
GlobalModule.EM_LOGGER.debug(
"Connect Error:%s", str(type(exception)))
GlobalModule.EM_LOGGER.debug(
"Connect Error args: %s", str(exception.args))
GlobalModule.EM_LOGGER.debug(traceback.format_exc())
GlobalModule.EM_LOGGER.debug(
"Connection Wait Counter: %s", count)
time.sleep(retrytimer_val)
if count < (retry_num_val - 1):
continue
return self.__CONNECT_NO_RESPONSE
device_capability_list = self.__connection.server_capabilities
GlobalModule.EM_LOGGER.debug(
"device_capability_list: %s", device_capability_list)
capability_judge = False
for cap in self.__capability_list:
for device_cap in device_capability_list:
if cap == device_cap:
capability_judge = True
if capability_judge is not True:
GlobalModule.EM_LOGGER.debug(
"Connect Error:exceptions.MissingCapabilityError")
return self.__CONNECT_CAPABILITY_NG
self.__connection.raise_mode = operations.RaiseMode.NONE
GlobalModule.EM_LOGGER.info("107001 SSH Connection Open for %s",
self.__device_ip)
return self.__CONNECT_OK
@decorater_log_in_out
def send_control_signal(self, message_type, send_message):
'''
Transmit device control signal
Transmit Netconf to device and returns response signal.
Explanation about parameter:
message_type: Message type(response message)
discard-changes
validate
lock
unlock
get-config
edit-config
confirmed-commit
commit
send_message: Send message
get-config:XML format (<config></config>)
edit-config:XML format (<config></config>)
Not necessary in case of other message types.
Explanation about return value:
Send result : boolean (True:Normal,False:Abnormal)
REsponse signal : str (Netconf response signal
(Returns "NetconfSendOK" to return value 1
when rpc-error is received successfully.))
'''
is_judg_result, judg_message_type = self.__judg_control_signal(
message_type)
GlobalModule.EM_LOGGER.debug("__send_signal_judg:%s", is_judg_result)
GlobalModule.EM_LOGGER.debug("judg_message_type:%s", judg_message_type)
if is_judg_result is False:
GlobalModule.EM_LOGGER.debug("__send_signal_judg NG")
return False, None
GlobalModule.EM_LOGGER.debug("__send_signal_judg OK")
try:
if judg_message_type == "get_config":
GlobalModule.EM_LOGGER.debug("judg_message_type:get_config")
GlobalModule.EM_LOGGER.debug(
"send_message: %s", send_message)
receive_message = self.__connection.get_config(
source='running',
filter=('subtree', send_message)).data_xml
GlobalModule.EM_LOGGER.debug(
"receive_message: %s", receive_message)
GlobalModule.EM_LOGGER.debug(
"receive_message type: %s", type(receive_message))
elif judg_message_type == "edit_config":
GlobalModule.EM_LOGGER.debug("judg_message_type:edit_config")
GlobalModule.EM_LOGGER.debug(
"send_message: %s", send_message)
receive_message = self.__connection.edit_config(
config=send_message).xml
GlobalModule.EM_LOGGER.debug(
"receive_message: %s", receive_message)
GlobalModule.EM_LOGGER.debug(
"receive_message type: %s", type(receive_message))
elif judg_message_type == "confirmed_commit":
GlobalModule.EM_LOGGER.debug(
"judg_message_type:confirmed_commit")
is_send_result, return_value = \
GlobalModule.EM_CONFIG.read_sys_common_conf(
"Timer_confirmed-commit")
GlobalModule.EM_LOGGER.debug("read_sys_common:%s",
is_send_result)
if is_send_result is False:
GlobalModule.EM_LOGGER.debug("read_sys_common NG")
return False, None
GlobalModule.EM_LOGGER.debug("read_sys_common OK")
GlobalModule.EM_LOGGER.debug("return_value:%s", return_value)
return_value = return_value / 1000
receive_message = self.__connection.commit(
confirmed=True, timeout=str(return_value)).xml
GlobalModule.EM_LOGGER.debug(
"receive_message: %s", receive_message)
GlobalModule.EM_LOGGER.debug(
"receive_message type: %s", type(receive_message))
else:
GlobalModule.EM_LOGGER.debug("judg_message_type:%s",
judg_message_type)
try:
method = getattr(self.__connection, judg_message_type)
receive_message = method().xml
GlobalModule.EM_LOGGER.debug(
"receive_message: %s", receive_message)
GlobalModule.EM_LOGGER.debug(
"receive_message type: %s", type(receive_message))
except AttributeError:
GlobalModule.EM_LOGGER.debug("AttributeError:%s",
judg_message_type)
return False, None
GlobalModule.EM_LOGGER.info("107003 Sending %s to %s",
message_type, self.__device_ip)
except Exception as exception:
GlobalModule.EM_LOGGER.warning(
"207005 protocol %s Sending Error", message_type)
GlobalModule.EM_LOGGER.debug(
"Sending Error:%s", str(type(exception)))
return False, None
GlobalModule.EM_LOGGER.info("107002 Receiving rpc-reply from %s",
self.__device_ip)
return True, receive_message
@decorater_log_in_out
def disconnect_device(self):
'''
Device disconnection control
Disconnect from the device.
Explanation about parameter:
None
Explanation about return value:
Judgment result : boolean (True:Normal,False:Abnormal)
'''
try:
self.__connection.close_session()
except Exception as exception:
GlobalModule.EM_LOGGER.debug(
"Disconnect Error:%s", str(type(exception)))
return False
GlobalModule.EM_LOGGER.info("107004 SSH Connection Closed for %s",
self.__device_ip)
return True
@decorater_log
def __init__(self):
'''
Constructor
'''
self.__connection = None
self.__device_ip = None
self.__capability_list = \
('urn:ietf:params:netconf:base:1.0',
'urn:ietf:params:netconf:base:1.1')
@decorater_log
def __judg_control_signal(self, message_type):
'''
Control signal judgment
Make judgment on the message to be sent based on the message type.
Explanation about parameter:
message_type: Message type (response message)
discard-changes
validate
lock
unlock
get-config
edit-config
confirmed-commit
commit
Explanation about return value:
Judgment result : boolean (True:Normal,False:Abnormal)
Judgment message type : str
'''
message_list = ["discard-changes", "validate", "lock",
"unlock", "get-config", "edit-config",
"confirmed-commit", "commit"]
GlobalModule.EM_LOGGER.debug("message_type:%s", message_type)
if message_type in message_list:
GlobalModule.EM_LOGGER.debug("message_type Match")
judg_message_type = message_type.replace('-', '_')
return True, judg_message_type
GlobalModule.EM_LOGGER.debug("message_type UNMatch")
return False, None
| 37.513812
| 80
| 0.546318
| 1,295
| 13,580
| 5.439382
| 0.169884
| 0.101363
| 0.133447
| 0.149063
| 0.495031
| 0.422487
| 0.367121
| 0.266752
| 0.209966
| 0.192788
| 0
| 0.00828
| 0.386377
| 13,580
| 361
| 81
| 37.617729
| 0.837033
| 0.189912
| 0
| 0.309179
| 0
| 0
| 0.144065
| 0.030246
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024155
| false
| 0.009662
| 0.043478
| 0
| 0.144928
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0e21640f79315d5678d2b1eb1562ded16d33050
| 1,935
|
py
|
Python
|
policy/openbot/callbacks.py
|
januxnet/OpenBot
|
04768161a552281e1e14acde98589a64628b86c7
|
[
"MIT"
] | 1,971
|
2020-08-24T22:24:24.000Z
|
2021-07-24T18:43:39.000Z
|
policy/openbot/callbacks.py
|
FlorentGuinier/OpenBot
|
087b1c89fd61ee5e644a0b042c9e5f25540caeae
|
[
"MIT"
] | 145
|
2020-08-26T23:00:28.000Z
|
2021-07-26T22:00:06.000Z
|
policy/openbot/callbacks.py
|
FlorentGuinier/OpenBot
|
087b1c89fd61ee5e644a0b042c9e5f25540caeae
|
[
"MIT"
] | 342
|
2020-08-26T10:39:43.000Z
|
2021-07-26T12:12:10.000Z
|
# Created by Matthias Mueller - Intel Intelligent Systems Lab - 2020
import os
import tensorflow as tf
def checkpoint_cb(checkpoint_path, steps_per_epoch=-1, num_epochs=10):
# Create a callback that saves the model's weights every epochs
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=os.path.join(checkpoint_path, "cp-{epoch:04d}.ckpt"),
monitor="val_loss",
verbose=0,
save_best_only=False,
save_weights_only=False,
mode="auto",
save_freq="epoch" if steps_per_epoch < 0 else int(num_epochs * steps_per_epoch),
)
return checkpoint_callback
def tensorboard_cb(log_path):
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_path,
histogram_freq=0,
write_graph=True,
write_images=True,
update_freq="epoch",
profile_batch=2,
embeddings_freq=0,
embeddings_metadata=None,
)
return tensorboard_callback
def logger_cb(log_path, append=False):
logger_callback = tf.keras.callbacks.CSVLogger(
os.path.join(log_path, "log.csv"), append=append
)
return logger_callback
def early_stopping_cb():
early_stopping_callback = tf.keras.callbacks.EarlyStopping(
monitor="val_loss",
min_delta=0,
patience=20,
verbose=0,
mode="auto",
baseline=None,
restore_best_weights=False,
)
return early_stopping_callback
def reduce_lr_cb():
reduce_lr_callback = tf.keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", factor=0.3, patience=2, min_lr=0.0001
)
return reduce_lr_callback
def lr_schedule_cb():
return tf.keras.callbacks.LearningRateScheduler(scheduler)
# This function defines a custom learning schedule.
def scheduler(epoch):
if epoch < 10:
return 0.0002
elif epoch < 20:
return 0.0001
else:
return 0.00005
| 26.148649
| 88
| 0.67907
| 247
| 1,935
| 5.089069
| 0.421053
| 0.033413
| 0.076372
| 0.095465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030976
| 0.232558
| 1,935
| 73
| 89
| 26.506849
| 0.815488
| 0.09199
| 0
| 0.109091
| 0
| 0
| 0.038791
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.127273
| false
| 0
| 0.036364
| 0.018182
| 0.327273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0e2cf5bd2ba70f93247b07879264830fda0661a
| 856
|
py
|
Python
|
generate_landmarks.py
|
PandaWhoCodes/chutya_rating
|
88f73e37c517c1d68ebf518b40cf93667f39991b
|
[
"MIT"
] | null | null | null |
generate_landmarks.py
|
PandaWhoCodes/chutya_rating
|
88f73e37c517c1d68ebf518b40cf93667f39991b
|
[
"MIT"
] | null | null | null |
generate_landmarks.py
|
PandaWhoCodes/chutya_rating
|
88f73e37c517c1d68ebf518b40cf93667f39991b
|
[
"MIT"
] | null | null | null |
import sys
import os
import dlib
import glob
from skimage import io
predictor_path = "data/shape_predictor_68_face_landmarks.dat"
faces_folder_path = "data/pics"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(predictor_path)
def sortKeyFunc(s):
return int(os.path.basename(s)[:-4])
my_glob = glob.glob(os.path.join(faces_folder_path, "*.jpg"))
my_glob.sort(key=sortKeyFunc)
print(my_glob)
for f in my_glob:
print("Processing file: {}".format(f))
img = io.imread(f)
dets = detector(img, 1)
for k, d in enumerate(dets):
shape = predictor(img, d)
file_write = ""
for i in range(0, 68):
file_write += str(shape.part(i).x) + ", " + str(shape.part(i).y) + ", "
with open("data/landmarks.txt", "a") as f:
f.write(file_write)
f.write("\n")
| 25.939394
| 83
| 0.647196
| 130
| 856
| 4.1
| 0.476923
| 0.045028
| 0.056285
| 0.04878
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010309
| 0.206776
| 856
| 32
| 84
| 26.75
| 0.774669
| 0
| 0
| 0
| 0
| 0
| 0.116822
| 0.049065
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.192308
| 0.038462
| 0.269231
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0e46a90af345235275bf125d7191b762e1c076e
| 6,530
|
py
|
Python
|
geoutils/misc.py
|
erikmannerfelt/GeoUtils
|
96a044f7cca73f936e5b245a5e99e0d2102d279f
|
[
"BSD-3-Clause"
] | 23
|
2020-11-11T11:00:45.000Z
|
2022-03-28T07:06:56.000Z
|
geoutils/misc.py
|
erikmannerfelt/GeoUtils
|
96a044f7cca73f936e5b245a5e99e0d2102d279f
|
[
"BSD-3-Clause"
] | 223
|
2020-11-11T14:34:51.000Z
|
2022-03-31T14:14:58.000Z
|
geoutils/misc.py
|
erikmannerfelt/GeoUtils
|
96a044f7cca73f936e5b245a5e99e0d2102d279f
|
[
"BSD-3-Clause"
] | 14
|
2021-02-19T09:57:46.000Z
|
2022-03-21T09:49:12.000Z
|
"""Miscellaneous functions, mainly for testing."""
from __future__ import annotations
import functools
import warnings
import numpy as np
import rasterio as rio
import geoutils
from geoutils._typing import ArrayLike
from geoutils.georaster import Raster, RasterType
def array_equal(
array1: RasterType | ArrayLike,
array2: RasterType | ArrayLike,
equal_nan: bool = True,
tolerance: float = 0.0,
) -> bool:
"""
Check if two arrays or Rasters are equal.
This function mirrors (and partly uses) 'np.array_equal' with these exceptions:
1. Different dtypes are okay as long as they are equal (e.g. '1 == 1.0' is True)
2. Rasters are directly comparable.
3. masked_array masks are respected.
4. A tolerance argument is added.
5. The function works with numpy<=1.18.
:param array1: The first array-like object to compare.
:param array2: The second array-like object to compare.
:param equal_nan: Whether to compare NaNs as equal ('NaN == NaN' is True)
:param tolerance: The maximum allowed summed difference between the arrays.
Examples:
Any object that can be parsed as an array can be compared.
>>> arr1 = [1, 2, 3]
>>> arr2 = np.array([1., 2., 3.])
>>> array_equal(arr1, arr2)
True
Nans are equal by default, but can be disabled with 'equal_nan=False'
>>> arr3 = np.array([1., 2., np.nan])
>>> array_equal(arr1, arr3)
False
>>> array_equal(arr3, arr3.copy())
True
>>> array_equal(arr3, arr3, equal_nan=False)
False
The equality tolerance can be set with the 'tolerance' argument (defaults to 0).
>>> arr4 = np.array([1., 2., 3.1])
>>> array_equal(arr1, arr4)
False
>>> array_equal(arr1, arr4, tolerance=0.2)
True
Masks in masked_arrays are respected.
>>> arr5 = np.ma.masked_array(arr1, [False, False, True])
>>> array_equal(arr1, arr5)
False
>>> array_equal(arr3, arr5)
True
>>> array_equal(arr3, arr5, equal_nan=False)
False
"""
arrays: list[np.ndarray] = []
strings_compared = False # Flag to handle string arrays instead of numeric
# Convert both inputs to numpy ndarrays
for arr in array1, array2:
if any(s in np.dtype(type(np.asanyarray(arr)[0])).name for s in ("<U", "str")):
strings_compared = True
if isinstance(arr, Raster): # If a Raster subclass, take its data. I don't know why mypy complains here!
arr = arr.data # type: ignore
if isinstance(arr, np.ma.masked_array): # If a masked_array, replace the masked values with nans
if "float" not in np.dtype(arr.dtype).name:
arr = arr.astype(float)
arrays.append(arr.filled(np.nan)) # type: ignore
else:
arrays.append(np.asarray(arr))
if np.shape(arrays[0]) != np.shape(arrays[1]):
return False
if strings_compared: # If they are strings, the tolerance/nan handling is irrelevant.
return bool(np.array_equal(arrays[0], arrays[1]))
diff = np.diff(arrays, axis=0)
if "float" in np.dtype(diff.dtype).name and np.any(~np.isfinite(diff)):
# Check that the nan-mask is equal. If it's not, or nans are not allowed at all, return False
if not equal_nan or not np.array_equal(np.isfinite(arrays[0]), np.isfinite(arrays[1])):
return False
return bool(np.nansum(np.abs(diff)) <= tolerance)
def deprecate(removal_version: str | None = None, details: str | None = None): # type: ignore
"""
Trigger a DeprecationWarning for the decorated function.
:param func: The function to be deprecated.
:param removal_version: Optional. The version at which this will be removed.
If this version is reached, a ValueError is raised.
:param details: Optional. A description for why the function was deprecated.
:triggers DeprecationWarning: For any call to the function.
:raises ValueError: If 'removal_version' was given and the current version is equal or higher.
:returns: The decorator to decorate the function.
"""
def deprecator_func(func): # type: ignore
@functools.wraps(func)
def new_func(*args, **kwargs): # type: ignore
# True if it should warn, False if it should raise an error
should_warn = removal_version is None or removal_version > geoutils.version.version
# Add text depending on the given arguments and 'should_warn'.
text = (
f"Call to deprecated function '{func.__name__}'."
if should_warn
else f"Deprecated function '{func.__name__}' was removed in {removal_version}."
)
# Add the details explanation if it was given, and make sure the sentence is ended.
if details is not None:
details_frm = details.strip()
if details_frm[0].islower():
details_frm = details_frm[0].upper() + details_frm[1:]
text += " " + details_frm
if not any(text.endswith(c) for c in ".!?"):
text += "."
if should_warn and removal_version is not None:
text += f" This functionality will be removed in version {removal_version}."
elif not should_warn:
text += f" Current version: {geoutils.version.version}."
if should_warn:
warnings.warn(text, category=DeprecationWarning, stacklevel=2)
else:
raise ValueError(text)
return func(*args, **kwargs)
return new_func
return deprecator_func
def resampling_method_from_str(method_str: str) -> rio.warp.Resampling:
"""Get a rasterio resampling method from a string representation, e.g. "cubic_spline"."""
# Try to match the string version of the resampling method with a rio Resampling enum name
for method in rio.warp.Resampling:
if str(method).replace("Resampling.", "") == method_str:
resampling_method = method
break
# If no match was found, raise an error.
else:
raise ValueError(
f"'{method_str}' is not a valid rasterio.warp.Resampling method. "
f"Valid methods: {[str(method).replace('Resampling.', '') for method in rio.warp.Resampling]}"
)
return resampling_method
| 38.187135
| 113
| 0.622971
| 865
| 6,530
| 4.618497
| 0.277457
| 0.032541
| 0.017522
| 0.006758
| 0.033542
| 0.028536
| 0
| 0
| 0
| 0
| 0
| 0.014894
| 0.280245
| 6,530
| 170
| 114
| 38.411765
| 0.835106
| 0.456508
| 0
| 0.066667
| 0
| 0.013333
| 0.124359
| 0.032599
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.106667
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0e7cd1cacbdf9713f6574f513c54cc6fc8be57b
| 1,860
|
py
|
Python
|
sorting/merge_sort.py
|
matuzalemmuller/algoritmos
|
138e7c9747879a58fab48908541c175b8653da5c
|
[
"MIT"
] | null | null | null |
sorting/merge_sort.py
|
matuzalemmuller/algoritmos
|
138e7c9747879a58fab48908541c175b8653da5c
|
[
"MIT"
] | null | null | null |
sorting/merge_sort.py
|
matuzalemmuller/algoritmos
|
138e7c9747879a58fab48908541c175b8653da5c
|
[
"MIT"
] | null | null | null |
# UFSC - Campus Trindade
# PPGEAS - Introducao a Algoritmos
# Matuzalem Muller dos Santos
# 2019/1
# Commented code is for calculating algorithm completixy and printing variables
from random import randint
import time
import sys
def merge_sort(array):
# n = 0
if len(array) > 1:
half = len(array) // 2
left_array = array[:half]
right_array = array[half:]
# n += merge_sort(left_array)
# n += merge_sort(right_array)
merge_sort(left_array)
merge_sort(right_array)
left_mark, right_mark, position = 0, 0, 0
while left_mark < len(left_array) and right_mark < len(right_array):
if left_array[left_mark] < right_array[right_mark]:
array[position] = left_array[left_mark]
left_mark += 1
else:
array[position] = right_array[right_mark]
right_mark += 1
position += 1
# n += 1
while left_mark < len(left_array):
array[position] = left_array[left_mark]
left_mark += 1
position += 1
# n += 1
while right_mark < len(right_array):
array[position] = right_array[right_mark]
right_mark += 1
position += 1
# n += 1
# return array, n
return array
if __name__ == '__main__':
array = []
random_number = 0
try:
number_of_elements = int(sys.argv[1])
except:
number_of_elements = 10
for i in range(0, number_of_elements):
random_number = randint(1, 9_999_999_999)
array.append(random_number)
# print(array)
start_time = time.time()
# array, n = merge_sort(array)
array = merge_sort(array)
running_time = time.time() - start_time
# print(array)
# print(n)
print(running_time)
| 26.956522
| 79
| 0.58172
| 234
| 1,860
| 4.354701
| 0.277778
| 0.070658
| 0.05103
| 0.050049
| 0.288518
| 0.245339
| 0.201178
| 0.180569
| 0.180569
| 0.104024
| 0
| 0.029482
| 0.325269
| 1,860
| 69
| 80
| 26.956522
| 0.78247
| 0.178495
| 0
| 0.261905
| 0
| 0
| 0.005291
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02381
| false
| 0
| 0.071429
| 0
| 0.119048
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0e7ec352bf4d97e4861749ed21422b37bbf8b42
| 2,239
|
py
|
Python
|
binsync/data/struct.py
|
zachsez/binsync
|
39a53a84c640314adbf50e612177c4a56c43542c
|
[
"MIT"
] | 40
|
2021-05-09T03:24:46.000Z
|
2022-03-31T23:01:50.000Z
|
binsync/data/struct.py
|
zachsez/binsync
|
39a53a84c640314adbf50e612177c4a56c43542c
|
[
"MIT"
] | 53
|
2021-05-27T07:53:58.000Z
|
2022-03-27T21:35:26.000Z
|
binsync/data/struct.py
|
zachsez/binsync
|
39a53a84c640314adbf50e612177c4a56c43542c
|
[
"MIT"
] | 10
|
2021-05-13T22:09:38.000Z
|
2022-03-31T23:51:27.000Z
|
import toml
from typing import List, Dict
from .artifact import Artifact
class StructMember(Artifact):
"""
Describes a struct member that corresponds to a struct.
"""
__slots__ = (
"last_change",
"member_name",
"offset",
"type",
"size",
)
def __init__(self, member_name, offset, type_, size, last_change=None):
super(StructMember, self).__init__(last_change=last_change)
self.member_name: str = member_name
self.offset: int = offset
self.type: str = type_
self.size: int = size
@classmethod
def parse(cls, s):
sm = StructMember(None, None, None, None)
sm.__setstate__(toml.loads(s))
return sm
class Struct(Artifact):
"""
Describes a struct
"""
__slots__ = (
"last_change",
"name",
"size",
"struct_members",
)
def __init__(self, name: str, size: int, struct_members: List[StructMember], last_change=None):
super(Struct, self).__init__(last_change=last_change)
self.name = name
self.size = size
self.struct_members = struct_members
def __getstate__(self):
return {
"metadata": {
"name": self.name, "size": self.size, "last_change": self.last_change
},
"members": {"%x" % member.offset: member.__getstate__() for member in self.struct_members}
}
def __setstate__(self, state):
metadata = state["metadata"]
members = state["members"]
self.name = metadata["name"]
self.size = members["size"]
self.last_change = members.get("last_change", None)
self.struct_members = [
StructMember.parse(toml.dumps(member)) for _, member in members.items()
]
def add_struct_member(self, mname, moff, mtype, size):
self.struct_members.append(StructMember(mname, moff, mtype, size))
@classmethod
def parse(cls, s):
struct = Struct(None, None, None)
struct.__setstate__(s)
return struct
@classmethod
def load(cls, struct_toml):
s = Struct(None, None, None)
s.__setstate__(struct_toml)
return s
| 23.568421
| 102
| 0.590442
| 249
| 2,239
| 4.995984
| 0.216867
| 0.096463
| 0.038585
| 0.038585
| 0.16881
| 0.094855
| 0.051447
| 0
| 0
| 0
| 0
| 0
| 0.295668
| 2,239
| 94
| 103
| 23.819149
| 0.78884
| 0.03305
| 0
| 0.177419
| 0
| 0
| 0.065381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.129032
| false
| 0
| 0.048387
| 0.016129
| 0.306452
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0e84025ba8edc34831e545492abd9cc1b7a33c6
| 9,070
|
py
|
Python
|
__init__.py
|
itsmepvr/list_images_to_excel
|
a4da3948a289c91cbcab90980364e989af7f1118
|
[
"MIT"
] | null | null | null |
__init__.py
|
itsmepvr/list_images_to_excel
|
a4da3948a289c91cbcab90980364e989af7f1118
|
[
"MIT"
] | null | null | null |
__init__.py
|
itsmepvr/list_images_to_excel
|
a4da3948a289c91cbcab90980364e989af7f1118
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Author: Venkata Ramana P
<github.com/itsmepvr>
List files to an excel sheet
"""
import os, glob
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QMessageBox
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from os.path import expanduser
import xlsxwriter
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(569, 304)
MainWindow.setStyleSheet("background-color:rgba(0,0,0,0.5); font-weight:bold;")
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit.setGeometry(QtCore.QRect(10, 40, 391, 26))
self.lineEdit.setStyleSheet("background-color:rgb(255, 255, 255)")
self.lineEdit.setObjectName("lineEdit")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(410, 40, 151, 26))
self.pushButton.setStyleSheet("background-color:rgb(255, 255, 255)")
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(self.chooseFilesDirectory)
self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_2.setGeometry(QtCore.QRect(10, 80, 391, 26))
self.lineEdit_2.setStyleSheet("background-color:rgb(255, 255, 255)")
self.lineEdit_2.setText("")
self.lineEdit_2.setObjectName("lineEdit_2")
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(410, 80, 151, 26))
self.pushButton_2.setStyleSheet("background-color:rgb(255, 255, 255)")
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.clicked.connect(self.chooseExcelDirectory)
self.lineEdit_3 = QtWidgets.QLineEdit(self.centralwidget)
self.lineEdit_3.setGeometry(QtCore.QRect(10, 120, 391, 26))
self.lineEdit_3.setStyleSheet("background-color:rgb(255, 255, 255)")
self.lineEdit_3.setText("files_to_list")
self.lineEdit_3.setObjectName("lineEdit_3")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(410, 117, 141, 31))
self.label.setStyleSheet("color:rgb(255, 255, 255);\n"
"background-color:none;\n"
"font-weight:bold;")
self.label.setObjectName("label")
self.checkBox = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox.setEnabled(True)
self.checkBox.setGeometry(QtCore.QRect(170, 160, 121, 31))
self.checkBox.setTabletTracking(False)
self.checkBox.setAutoFillBackground(False)
self.checkBox.setStyleSheet("color:rgb(230, 75, 238);\n"
"background-color:none;\n"
"font-weight:bold;\n"
"font-size:25px;")
self.checkBox.setChecked(True)
self.checkBox.setObjectName("checkBox")
self.checkBox_2 = QtWidgets.QCheckBox(self.centralwidget)
self.checkBox_2.setGeometry(QtCore.QRect(300, 160, 131, 31))
self.checkBox_2.setStyleSheet("color:rgb(230, 75, 238);\n"
"background-color:none;\n"
"font-weight:bold;\n"
"font-size:25px;")
self.checkBox_2.setObjectName("checkBox_2")
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(270, 210, 121, 31))
self.pushButton_3.setStyleSheet("background-color: rgb(138, 226, 52);\n"
"color:black;\n"
"font-weight:bold;")
self.pushButton_3.setObjectName("pushButton_3")
self.pushButton_3.clicked.connect(self.checkFields)
self.pushButton_4 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_4.setGeometry(QtCore.QRect(400, 210, 131, 31))
self.pushButton_4.setStyleSheet("background-color: rgb(239, 41, 41);\n"
"color:black;\n"
"font-weight:bold;")
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_4.clicked.connect(self.quit)
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(70, 260, 461, 20))
self.label_2.setStyleSheet("color:rgb(252, 175, 62);\n"
"font: italic 11pt \"DejaVu Serif\";\n"
"")
self.label_2.setObjectName("label_2")
self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar.setGeometry(QtCore.QRect(40, 220, 201, 23))
self.progressBar.setStyleSheet("background-color:rgb(243, 243, 243)")
self.progressBar.setProperty("value", 24)
self.progressBar.setObjectName("progressBar")
self.progressBar.hide()
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.filePath = "/home/itsmepvr/.local/share/Anki2/3-4 Years Primary/collection.media"
self.excelPath = "/home/itsmepvr/Downloads"
self.excelName = "files_to_list"
self.ext = []
self.convert()
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "List Files to Excel"))
self.pushButton.setText(_translate("MainWindow", "Select Files Path"))
self.pushButton_2.setText(_translate("MainWindow", "Select Excel Path"))
self.label.setText(_translate("MainWindow", "Excel File Name"))
self.checkBox.setText(_translate("MainWindow", "Images"))
self.checkBox_2.setText(_translate("MainWindow", "Audios"))
self.pushButton_3.setText(_translate("MainWindow", "Convert"))
self.pushButton_4.setText(_translate("MainWindow", "Cancel"))
self.label_2.setText(_translate("MainWindow", "Developed by: Venkata Ramana P <github.com/itsmepvr>"))
def quit(self):
self.close()
def chooseFilesDirectory(self):
self.progressBar.hide()
src_dir = QFileDialog.getExistingDirectory(None, 'Select a folder:', expanduser("~"))
self.lineEdit.setText(src_dir)
def chooseExcelDirectory(self):
self.progressBar.hide()
src_dir = QFileDialog.getExistingDirectory(None, 'Select a folder:', expanduser("~"))
self.lineEdit_2.setText(src_dir)
def checkFields(self):
self.filePath = self.lineEdit.text()
self.excelPath = self.lineEdit_2.text()
self.excelName = self.lineEdit_3.text()
if not os.path.isdir(self.filePath):
QMessageBox.warning(None, "Warning", "Files path does not exists", QtWidgets.QMessageBox.Ok)
return
if not os.path.isdir(self.excelPath):
QMessageBox.warning(None, "Warning", "Excel path does not exists", QtWidgets.QMessageBox.Ok)
return
if self.excelName == '':
QMessageBox.warning(None, "Warning", "Excel file name cannot be empty", QtWidgets.QMessageBox.Ok)
return
if not (self.checkBox.isChecked() or self.checkBox_2.isChecked()):
QMessageBox.warning(None, "Warning", "Select any images/audios", QtWidgets.QMessageBox.Ok)
return
self.ext = []
if self.checkBox.isChecked():
self.ext.append("images")
if self.checkBox_2.isChecked():
self.ext.append("audios")
self.convert()
def convert(self):
files = self.getImages(self.filePath)
excel = os.path.join(self.excelPath, self.excelName+'.xlsx')
workbook = xlsxwriter.Workbook(excel)
worksheet = workbook.add_worksheet()
row = 0
incValue = 100/len(files)
progressCount = 0
self.progressBar.setValue(0)
self.progressBar.show()
for fl in files:
worksheet.write(row, 0, fl)
row += 1
progressCount += incValue
self.progressBar.setValue(progressCount)
self.progressBar.setValue(100)
workbook.close()
def getImages(self, path):
img = []
files = []
ext = []
if "images" in self.ext:
ext = ext + ['png', 'jpg', 'gif']
if "audios" in self.ext:
ext = ext + ['mp3', 'wav']
ext = ['png', 'jpg', 'gif']
# [files.extend(glob.glob(path + '/*.' + e)) for e in ext]
# files.sort()
# dd = os.listdir(path)
# dd.sort()
for file in os.listdir(path):
if file.endswith(".png") or file.endswith(".jpg"):
files.append(file)
return files
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 43.190476
| 110
| 0.661632
| 1,027
| 9,070
| 5.765336
| 0.218111
| 0.056747
| 0.046107
| 0.041885
| 0.279851
| 0.25418
| 0.143726
| 0.143726
| 0.137815
| 0.063503
| 0
| 0.043872
| 0.208379
| 9,070
| 209
| 111
| 43.397129
| 0.78078
| 0.022381
| 0
| 0.148352
| 0
| 0.005495
| 0.160962
| 0.040777
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043956
| false
| 0
| 0.054945
| 0
| 0.131868
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0e8a0baacae15b6d64e13be86210039b28af7e3
| 1,215
|
py
|
Python
|
exception.py
|
a2gs/pythonStudy
|
e790e223a05fd50a5bcaf1240ef24ff60f361cdd
|
[
"MIT"
] | null | null | null |
exception.py
|
a2gs/pythonStudy
|
e790e223a05fd50a5bcaf1240ef24ff60f361cdd
|
[
"MIT"
] | null | null | null |
exception.py
|
a2gs/pythonStudy
|
e790e223a05fd50a5bcaf1240ef24ff60f361cdd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class myBaseException(Exception):
def __init__(self, errNum, errMsg):
self.err = errNum
self.msg = errMsg
class myExcept_1(myBaseException):
def __init__(self):
super().__init__(13, "except 1")
class myExcept_2(myBaseException):
def __init__(self):
super().__init__(8, "except 2")
def func(b):
if(b == 1):
raise myExcept_1
elif(b == 2):
raise myExcept_2
elif(b == 3):
return
try:
func(1)
except myExcept_1 as e:
print(f'Erro number: {e.err} message: {e.msg}')
except myExcept_2 as e:
print(f'Erro number: {e.err} message: {e.msg}')
else:
print('No exception')
finally:
print('Do this')
print('Done1\n----------------------------------------')
try:
func(2)
except myExcept_1 as e:
print(f'Erro number: {e.err} message: {e.msg}')
except myExcept_2 as e:
print(f'Erro number: {e.err} message: {e.msg}')
else:
print('No exception')
finally:
print('Do this')
print('Done2\n----------------------------------------')
try:
func(3)
except myExcept_1 as e:
print(f'Erro number: {e.err} message: {e.msg}')
except myExcept_2 as e:
print(f'Erro number: {e.err} message: {e.msg}')
else:
print('No exception')
finally:
print('Do this')
| 19.918033
| 56
| 0.626337
| 187
| 1,215
| 3.909091
| 0.245989
| 0.114911
| 0.065663
| 0.073871
| 0.667579
| 0.667579
| 0.571819
| 0.571819
| 0.571819
| 0.571819
| 0
| 0.024038
| 0.144033
| 1,215
| 60
| 57
| 20.25
| 0.678846
| 0.035391
| 0
| 0.591837
| 0
| 0
| 0.332479
| 0.080342
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081633
| false
| 0
| 0
| 0
| 0.163265
| 0.285714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0e8e97e17556ae476f654f72b30433601cb2273
| 564
|
py
|
Python
|
datachecker/processors/conditionals.py
|
jayclassless/datachecker
|
dd42b95f87eaaca428595fe5da639f9a710afba8
|
[
"MIT"
] | null | null | null |
datachecker/processors/conditionals.py
|
jayclassless/datachecker
|
dd42b95f87eaaca428595fe5da639f9a710afba8
|
[
"MIT"
] | null | null | null |
datachecker/processors/conditionals.py
|
jayclassless/datachecker
|
dd42b95f87eaaca428595fe5da639f9a710afba8
|
[
"MIT"
] | null | null | null |
from ..errors import DataRequiredError, ShortCircuitSignal
from ..util import processor
__all__ = (
'required',
'optional',
)
@processor
def required():
def required_processor(data):
if data is None:
raise DataRequiredError()
return data
return required_processor
@processor
def optional(default=None):
def optional_processor(data):
if data is None:
signal = ShortCircuitSignal()
signal.data = default
raise signal
return data
return optional_processor
| 18.8
| 58
| 0.64539
| 55
| 564
| 6.472727
| 0.345455
| 0.143258
| 0.08427
| 0.106742
| 0.140449
| 0.140449
| 0
| 0
| 0
| 0
| 0
| 0
| 0.287234
| 564
| 29
| 59
| 19.448276
| 0.885572
| 0
| 0
| 0.272727
| 0
| 0
| 0.028419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0eca7b772b3e9b8c51b6d9de56d789e01ceaffc
| 226
|
py
|
Python
|
python/args/multiple.py
|
jdurbin/sandbox
|
ee982f7386ae02c5937dbaee867710b5cd2cc71b
|
[
"MIT"
] | null | null | null |
python/args/multiple.py
|
jdurbin/sandbox
|
ee982f7386ae02c5937dbaee867710b5cd2cc71b
|
[
"MIT"
] | null | null | null |
python/args/multiple.py
|
jdurbin/sandbox
|
ee982f7386ae02c5937dbaee867710b5cd2cc71b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys,argparse
parser = argparse.ArgumentParser()
parser.add_argument('-foo', nargs='+', help='foo values', required=False)
args = parser.parse_args()
for foo in args.foo:
print("Foo: ",foo)
| 20.545455
| 73
| 0.70354
| 32
| 226
| 4.90625
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005051
| 0.123894
| 226
| 11
| 74
| 20.545455
| 0.787879
| 0.09292
| 0
| 0
| 0
| 0
| 0.097561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0ed0165784914f7dc8282f6f368dd72c90b41f2
| 1,594
|
py
|
Python
|
src/parent_class/ParentPluralList.py
|
jameskabbes/parent_class
|
cecf938a2f9c66d8914967098b2064526bbec1c7
|
[
"MIT"
] | null | null | null |
src/parent_class/ParentPluralList.py
|
jameskabbes/parent_class
|
cecf938a2f9c66d8914967098b2064526bbec1c7
|
[
"MIT"
] | null | null | null |
src/parent_class/ParentPluralList.py
|
jameskabbes/parent_class
|
cecf938a2f9c66d8914967098b2064526bbec1c7
|
[
"MIT"
] | null | null | null |
from parent_class import ParentPlural
from typing import List
class ParentPluralList( ParentPlural ):
def __init__( self, att = 'list' ):
ParentPlural.__init__( self, att = att )
self.set_attr( self.att, [] )
def __len__( self ):
return len(self.get_list())
def __next__( self ):
self.i += 1
if self.i >= len(self):
raise StopIteration
else:
return self.get_list()[ self.i ]
def _add( self, value ):
list = self.get_list()
list.append( value )
self.set_list( list )
def _remove( self, Inst, all_occurences = False ) -> bool:
"""remove the Inst from the class List"""
removed = False
inds = []
Insts = list(self)
for i in range(len(self)):
if Insts[i] == Inst:
inds.append(i)
removed = True
if not all_occurences:
break
self._remove_inds( inds )
return removed
def _remove_inds( self, inds: List[int] ):
"""Given a list of indices, remove the Objects at those indicies from the class List"""
list = self.get_list()
inds.sort( reverse=True )
for ind in inds:
del list[ind]
self.set_list( list )
def set_list( self, list ):
self.set_attr( self.att, list )
def get_list( self ):
return self.get_attr( self.att )
if __name__ == '__main__':
a = ParentPluralList()
a.print_atts()
| 21.835616
| 95
| 0.527604
| 187
| 1,594
| 4.256684
| 0.320856
| 0.070352
| 0.055276
| 0.037688
| 0.090452
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001002
| 0.373902
| 1,594
| 73
| 96
| 21.835616
| 0.796593
| 0.0734
| 0
| 0.093023
| 0
| 0
| 0.00818
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.186047
| false
| 0
| 0.046512
| 0.046512
| 0.348837
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0ed1693b1a77aee2f28b941c3d753890e7d9408
| 1,454
|
py
|
Python
|
prototypes/harvesters/cxidb_harvester.py
|
materials-data-facility/connect
|
9ec5b61750bf6fa579bf3ec122f31880d3c049b8
|
[
"Apache-2.0"
] | 1
|
2019-09-13T18:35:56.000Z
|
2019-09-13T18:35:56.000Z
|
prototypes/harvesters/cxidb_harvester.py
|
materials-data-facility/connect_server
|
9ec5b61750bf6fa579bf3ec122f31880d3c049b8
|
[
"Apache-2.0"
] | 15
|
2018-11-01T18:08:11.000Z
|
2021-12-06T17:55:03.000Z
|
prototypes/harvesters/cxidb_harvester.py
|
materials-data-facility/connect
|
9ec5b61750bf6fa579bf3ec122f31880d3c049b8
|
[
"Apache-2.0"
] | 1
|
2020-11-30T17:02:41.000Z
|
2020-11-30T17:02:41.000Z
|
import requests
from json import dump
import os
from shutil import rmtree
from tqdm import tqdm
#Collects available data from CXIDB and saves to the given directory
#out_dir: The path to the directory (which will be created) for the data files
#existing_dir:
# -1: Remove out_dir if it exists
# 0: Error if out_dir exists (Default)
# 1: Overwrite files in out_dir if there are path collisions
#verbose: Print status messages? Default False
def harvest(out_dir, existing_dir=0, verbose=False):
if os.path.exists(out_dir):
if existing_dir == 0:
exit("Directory '" + out_dir + "' exists")
elif not os.path.isdir(out_dir):
exit("Error: '" + out_dir + "' is not a directory")
elif existing_dir == -1:
rmtree(out_dir)
os.mkdir(out_dir)
else:
os.mkdir(out_dir)
#Fetch list of ids
id_res = requests.get("http://cxidb.org/index.json")
if id_res.status_code != 200:
exit("IDs GET failure: " + str(id_res.status_code) + " error")
id_list = id_res.json()
for id_entry in tqdm(id_list, desc="Fetching metadata", disable= not verbose):
id_data = requests.get("http://cxidb.org/" + id_entry)
if id_data.status_code != 200:
exit("ID fetch failure: " + str(id_data.status_code) + " error")
with open(os.path.join(out_dir, id_entry), 'w') as out_file:
dump(id_data.json(), out_file)
| 36.35
| 82
| 0.643741
| 221
| 1,454
| 4.072398
| 0.371041
| 0.086667
| 0.026667
| 0.028889
| 0.051111
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010949
| 0.246217
| 1,454
| 39
| 83
| 37.282051
| 0.810219
| 0.252407
| 0
| 0.076923
| 0
| 0
| 0.144847
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.192308
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0f238f39a9f9a11e1383c07dd66cb298f4de952
| 21,160
|
py
|
Python
|
worldengine/cli/main.py
|
stefan-feltmann/lands
|
b2f1fc3aab4895763160a135d085a17dceb5f58e
|
[
"MIT"
] | null | null | null |
worldengine/cli/main.py
|
stefan-feltmann/lands
|
b2f1fc3aab4895763160a135d085a17dceb5f58e
|
[
"MIT"
] | null | null | null |
worldengine/cli/main.py
|
stefan-feltmann/lands
|
b2f1fc3aab4895763160a135d085a17dceb5f58e
|
[
"MIT"
] | null | null | null |
import sys
from argparse import ArgumentParser
import os
import pickle
import random
import worldengine.generation as geo
from worldengine.common import array_to_matrix, set_verbose, print_verbose
from worldengine.draw import draw_ancientmap_on_file, draw_biome_on_file, draw_ocean_on_file, \
draw_precipitation_on_file, draw_grayscale_heightmap_on_file, draw_simple_elevation_on_file, \
draw_temperature_levels_on_file, draw_riversmap_on_file
from worldengine.plates import world_gen, generate_plates_simulation
from worldengine.step import Step
from worldengine.world import World
from worldengine.version import __version__
VERSION = __version__
OPERATIONS = 'world|plates|ancient_map|info'
SEA_COLORS = 'blue|brown'
STEPS = 'plates|precipitations|full'
def generate_world(world_name, width, height, seed, num_plates, output_dir,
step, ocean_level, world_format='pickle', verbose=True, black_and_white=False):
w = world_gen(world_name, width, height, seed, num_plates, ocean_level,
step, verbose=verbose)
print('') # empty line
print('Producing ouput:')
sys.stdout.flush()
# Save data
filename = "%s/%s.world" % (output_dir, world_name)
with open(filename, "wb") as f:
if world_format == 'pickle':
pickle.dump(w, f, pickle.HIGHEST_PROTOCOL)
elif world_format == 'protobuf':
f.write(w.protobuf_serialize())
else:
print("Unknown format '%s', not saving " % world_format)
print("* world data saved in '%s'" % filename)
sys.stdout.flush()
# Generate images
filename = '%s/%s_ocean.png' % (output_dir, world_name)
draw_ocean_on_file(w.ocean, filename)
print("* ocean image generated in '%s'" % filename)
if step.include_precipitations:
filename = '%s/%s_precipitation.png' % (output_dir, world_name)
draw_precipitation_on_file(w, filename, black_and_white)
print("* precipitation image generated in '%s'" % filename)
filename = '%s/%s_temperature.png' % (output_dir, world_name)
draw_temperature_levels_on_file(w, filename, black_and_white)
print("* temperature image generated in '%s'" % filename)
if step.include_biome:
filename = '%s/%s_biome.png' % (output_dir, world_name)
draw_biome_on_file(w, filename)
print("* biome image generated in '%s'" % filename)
filename = '%s/%s_elevation.png' % (output_dir, world_name)
sea_level = w.sea_level()
draw_simple_elevation_on_file(w.elevation['data'], filename, width=width,
height=height, sea_level=sea_level)
print("* elevation image generated in '%s'" % filename)
return w
def generate_grayscale_heightmap(world, filename):
draw_grayscale_heightmap_on_file(world, filename)
print("+ grayscale heightmap generated in '%s'" % filename)
def generate_rivers_map(world, filename):
draw_riversmap_on_file(world, filename)
print("+ rivers map generated in '%s'" % filename)
def generate_plates(seed, world_name, output_dir, width, height,
num_plates=10):
"""
Eventually this method should be invoked when generation is called at
asked to stop at step "plates", it should not be a different operation
:param seed:
:param world_name:
:param output_dir:
:param width:
:param height:
:param num_plates:
:return:
"""
elevation, plates = generate_plates_simulation(seed, width, height,
num_plates=num_plates)
world = World(world_name, width, height, seed, num_plates, -1.0, "plates")
world.set_elevation(array_to_matrix(elevation, width, height), None)
world.set_plates(array_to_matrix(plates, width, height))
# Generate images
filename = '%s/plates_%s.png' % (output_dir, world_name)
# TODO calculate appropriate sea_level
sea_level = 1.0
draw_simple_elevation_on_file(world.elevation['data'], filename, width,
height, sea_level)
print("+ plates image generated in '%s'" % filename)
geo.center_land(world)
filename = '%s/centered_plates_%s.png' % (output_dir, world_name)
draw_simple_elevation_on_file(world.elevation['data'], filename, width,
height, sea_level)
print("+ centered plates image generated in '%s'" % filename)
def check_step(step_name):
step = Step.get_by_name(step_name)
if step is None:
print("ERROR: unknown step name, using default 'full'")
return Step.get_by_name("full")
else:
return step
def operation_ancient_map(world, map_filename, resize_factor, sea_color,
draw_biome, draw_rivers, draw_mountains,
draw_outer_land_border):
draw_ancientmap_on_file(world, map_filename, resize_factor, sea_color,
draw_biome, draw_rivers, draw_mountains,
draw_outer_land_border)
print("+ ancient map generated in '%s'" % map_filename)
def __get_last_byte__(filename):
with open(filename, 'rb') as input_file:
data = tmp_data = input_file.read(1024 * 1024)
while tmp_data:
tmp_data = input_file.read(1024 * 1024)
if tmp_data:
data = tmp_data
return ord(data[len(data) - 1])
def __varint_to_value__(varint):
# See https://developers.google.com/protocol-buffers/docs/encoding for details
# to convert it to value we must start from the first byte
# and add to it the second last multiplied by 128, the one after
# multiplied by 128 ** 2 and so on
if len(varint) == 1:
return varint[0]
else:
return varint[0] + 128 * __varint_to_value__(varint[1:])
def __get_tag__(filename):
with open(filename, 'rb') as ifile:
# drop first byte, it should tell us the protobuf version and it
# should be normally equal to 8
data = ifile.read(1)
if not data:
return None
done = False
tag_bytes = []
# We read bytes until we find a bit with the MSB not set
while data and not done:
data = ifile.read(1)
if not data:
return None
value = ord(data)
tag_bytes.append(value % 128)
if value < 128:
done = True
# to convert it to value we must start from the last byte
# and add to it the second last multiplied by 128, the one before
# multiplied by 128 ** 2 and so on
return __varint_to_value__(tag_bytes)
def __seems_protobuf_worldfile__(world_filename):
worldengine_tag = __get_tag__(world_filename)
return worldengine_tag == World.worldengine_tag()
def __seems_pickle_file__(world_filename):
last_byte = __get_last_byte__(world_filename)
return last_byte == ord('.')
def load_world(world_filename):
pb = __seems_protobuf_worldfile__(world_filename)
pi = __seems_pickle_file__(world_filename)
if pb and pi:
print("we cannot distinguish if the file is a pickle or a protobuf "
"world file. Trying to load first as protobuf then as pickle "
"file")
try:
return World.open_protobuf(world_filename)
except Exception:
try:
return World.from_pickle_file(world_filename)
except Exception:
raise Exception("Unable to load the worldfile neither as protobuf or pickle file")
elif pb:
return World.open_protobuf(world_filename)
elif pi:
return World.from_pickle_file(world_filename)
else:
raise Exception("The given worldfile does not seem a pickle or a protobuf file")
def print_world_info(world):
print(" name : %s" % world.name)
print(" width : %i" % world.width)
print(" height : %i" % world.height)
print(" seed : %i" % world.seed)
print(" no plates : %i" % world.n_plates)
print(" ocean level : %f" % world.ocean_level)
print(" step : %s" % world.step.name)
print(" has biome : %s" % world.has_biome())
print(" has humidity : %s" % world.has_humidity())
print(" has irrigation : %s" % world.has_irrigation())
print(" has permeability : %s" % world.has_permeability())
print(" has watermap : %s" % world.has_watermap())
print(" has precipitations : %s" % world.has_precipitations())
print(" has temperature : %s" % world.has_temperature())
def main():
parser = ArgumentParser(
usage="usage: %(prog)s [options] [" + OPERATIONS + "]")
parser.add_argument('OPERATOR', nargs='?')
parser.add_argument('FILE', nargs='?')
parser.add_argument(
'-o', '--output-dir', dest='output_dir',
help="generate files in DIR [default = '%(default)s']",
metavar="DIR", default='.')
parser.add_argument(
'-n', '--worldname', dest='world_name',
help="set world name to STR. output is stored in a " +
"world file with the name format 'STR.world'. If " +
"a name is not provided, then seed_N.world, " +
"where N=SEED",
metavar="STR")
# TODO: add description of protocol buffer
parser.add_argument('-b', '--protocol-buffer', dest='protobuf',
action="store_true",
help="Save world file using protocol buffer format. " +
"Default = store using pickle format",
default=False)
parser.add_argument('-s', '--seed', dest='seed', type=int,
help="Use seed=N to initialize the pseudo-random " +
"generation. If not provided, one will be " +
"selected for you.",
metavar="N")
parser.add_argument('-t', '--step', dest='step',
help="Use step=[" + STEPS + "] to specify how far " +
"to proceed in the world generation process. " +
"[default='%(default)s']",
metavar="STR", default="full")
# TODO --step appears to be duplicate of OPERATIONS. Especially if
# ancient_map is added to --step
parser.add_argument('-x', '--width', dest='width', type=int,
help="N = width of the world to be generated " +
"[default=%(default)s]",
metavar="N",
default='512')
parser.add_argument('-y', '--height', dest='height', type=int,
help="N = height of the world to be generated " +
"[default=%(default)s]",
metavar="N",
default='512')
parser.add_argument('-q', '--number-of-plates', dest='number_of_plates',
type=int,
help="N = number of plates [default = %(default)s]",
metavar="N", default='10')
parser.add_argument('--recursion_limit', dest='recursion_limit', type=int,
help="Set the recursion limit [default = %(default)s]",
metavar="N", default='2000')
parser.add_argument('-v', '--verbose', dest='verbose', action="store_true",
help="Enable verbose messages", default=False)
parser.add_argument('--version', dest='version', action="store_true",
help="Display version information", default=False)
parser.add_argument('--bw', '--black-and-white', dest='black_and_white',
action="store_true",
help="generate maps in black and white",
default=False)
# -----------------------------------------------------
g_generate = parser.add_argument_group(
"Generate Options", "These options are only useful in plate and " +
"world modes")
g_generate.add_argument('-r', '--rivers', dest='rivers_map',
help="generate rivers map in FILE", metavar="FILE")
g_generate.add_argument('--gs', '--grayscale-heightmap',
dest='grayscale_heightmap',
help='produce a grayscale heightmap in FILE',
metavar="FILE")
g_generate.add_argument('--ocean_level', dest='ocean_level', type=float,
help='elevation cut off for sea level " +'
'[default = %(default)s]',
metavar="N", default=1.0)
# -----------------------------------------------------
g_ancient_map = parser.add_argument_group(
"Ancient Map Options", "These options are only useful in " +
"ancient_map mode")
g_ancient_map.add_argument('-w', '--worldfile', dest='world_file',
help="FILE to be loaded", metavar="FILE")
g_ancient_map.add_argument('-g', '--generatedfile', dest='generated_file',
help="name of the FILE", metavar="FILE")
g_ancient_map.add_argument(
'-f', '--resize-factor', dest='resize_factor', type=int,
help="resize factor (only integer values). " +
"Note this can only be used to increase " +
"the size of the map [default=%(default)s]",
metavar="N", default='1')
g_ancient_map.add_argument('--sea_color', dest='sea_color',
help="string for color [" + SEA_COLORS + "]",
metavar="S", default="brown")
g_ancient_map.add_argument('--not-draw-biome', dest='draw_biome',
action="store_false",
help="Not draw biome",
default=True)
g_ancient_map.add_argument('--not-draw-mountains', dest='draw_mountains',
action="store_false",
help="Not draw mountains",
default=True)
g_ancient_map.add_argument('--not-draw-rivers', dest='draw_rivers',
action="store_false",
help="Not draw rivers",
default=True)
g_ancient_map.add_argument('--draw-outer-border', dest='draw_outer_border',
action="store_true",
help="Draw outer land border",
default=False)
# TODO: allow for RGB specification as [r g b], ie [0.5 0.5 0.5] for gray
args = parser.parse_args()
if args.version:
usage()
if os.path.exists(args.output_dir):
if not os.path.isdir(args.output_dir):
raise Exception("Output dir exists but it is not a dir")
else:
print('Directory does not exist, we are creating it')
os.makedirs(args.output_dir)
# it needs to be increased to be able to generate very large maps
# the limit is hit when drawing ancient maps
sys.setrecursionlimit(args.recursion_limit)
if args.number_of_plates < 1 or args.number_of_plates > 100:
usage(error="Number of plates should be a in [1, 100]")
operation = "world"
if args.OPERATOR is None:
pass
elif args.OPERATOR is not None and args.OPERATOR.lower() not in OPERATIONS:
parser.print_help()
usage("Only 1 operation allowed [" + OPERATIONS + "]")
else:
operation = args.OPERATOR.lower()
if args.OPERATOR == 'info':
if args.FILE is None:
parser.print_help()
usage("For operation info only the filename should be specified")
if not os.path.exists(args.FILE):
usage("The specified world file does not exist")
random.seed()
if args.seed:
seed = int(args.seed)
else:
seed = random.randint(0, 65536)
if args.world_name:
world_name = args.world_name
else:
world_name = "seed_%i" % seed
step = check_step(args.step)
world_format = 'pickle'
if args.protobuf:
world_format = 'protobuf'
generation_operation = (operation == 'world') or (operation == 'plates')
produce_grayscale_heightmap = args.grayscale_heightmap
if produce_grayscale_heightmap and not generation_operation:
usage(
error="Grayscale heightmap can be produced only during world " +
"generation")
if args.rivers_map and not generation_operation:
usage(error="Rivers map can be produced only during world generation")
print('Worldengine - a world generator (v. %s)' % VERSION)
print('-----------------------')
print(' operation : %s generation' % operation)
if generation_operation:
print(' seed : %i' % seed)
print(' name : %s' % world_name)
print(' width : %i' % args.width)
print(' height : %i' % args.height)
print(' number of plates : %i' % args.number_of_plates)
print(' world format : %s' % world_format)
print(' black and white maps : %s' % args.black_and_white)
print(' step : %s' % step.name)
if produce_grayscale_heightmap:
print(
' + greyscale heightmap in "%s"' % produce_grayscale_heightmap)
else:
print(' (no greyscale heightmap)')
if args.rivers_map:
print(' + rivers map in "%s"' % args.rivers_map)
else:
print(' (no rivers map)')
if operation == 'ancient_map':
print(' resize factor : %i' % args.resize_factor)
print(' world file : %s' % args.world_file)
print(' sea color : %s' % args.sea_color)
print(' draw biome : %s' % args.draw_biome)
print(' draw rivers : %s' % args.draw_rivers)
print(' draw mountains : %s' % args.draw_mountains)
print(' draw land outer border : %s' % args.draw_outer_border)
set_verbose(args.verbose)
if operation == 'world':
print('') # empty line
print('starting (it could take a few minutes) ...')
world = generate_world(world_name, args.width, args.height,
seed, args.number_of_plates, args.output_dir,
step, args.ocean_level, world_format,
args.verbose, black_and_white=args.black_and_white)
if produce_grayscale_heightmap:
generate_grayscale_heightmap(world, produce_grayscale_heightmap)
if args.rivers_map:
generate_rivers_map(world, args.rivers_map)
elif operation == 'plates':
print('') # empty line
print('starting (it could take a few minutes) ...')
generate_plates(seed, world_name, args.output_dir, args.width,
args.height, num_plates=args.number_of_plates)
elif operation == 'ancient_map':
print('') # empty line
print('starting (it could take a few minutes) ...')
# First, some error checking
if args.sea_color == "blue":
sea_color = (142, 162, 179, 255)
elif args.sea_color == "brown":
sea_color = (212, 198, 169, 255)
else:
usage("Unknown sea color: " + args.sea_color +
" Select from [" + SEA_COLORS + "]")
if not args.world_file:
usage(
"For generating an ancient map is necessary to specify the " +
"world to be used (-w option)")
world = load_world(args.world_file)
print_verbose(" * world loaded")
if not args.generated_file:
args.generated_file = "ancient_map_%s.png" % world.name
operation_ancient_map(world, args.generated_file,
args.resize_factor, sea_color,
args.draw_biome, args.draw_rivers,
args.draw_mountains, args.draw_outer_border)
elif operation == 'info':
world = load_world(args.FILE)
print_world_info(world)
else:
raise Exception(
'Unknown operation: valid operations are %s' % OPERATIONS)
print('...done')
def usage(error=None):
print(
' -------------------------------------------------------------------')
print(' Federico Tomassetti and Bret Curtis, 2011-2015')
print(' Worldengine - a world generator (v. %s)' % VERSION)
print(' ')
print(' worldengine <world_name> [operation] [options]')
print(' possible operations: %s' % OPERATIONS)
print(' use -h to see options')
print(
' -------------------------------------------------------------------')
if error:
print("ERROR: %s" % error)
sys.exit(' ')
# -------------------------------
if __name__ == "__main__":
main()
| 41.653543
| 98
| 0.57689
| 2,471
| 21,160
| 4.739781
| 0.148118
| 0.025359
| 0.023224
| 0.015369
| 0.277749
| 0.224129
| 0.176059
| 0.129525
| 0.088029
| 0.075564
| 0
| 0.008201
| 0.302694
| 21,160
| 507
| 99
| 41.7357
| 0.785564
| 0.065501
| 0
| 0.19202
| 0
| 0
| 0.256003
| 0.019696
| 0
| 0
| 0
| 0.001972
| 0
| 1
| 0.037406
| false
| 0.002494
| 0.029925
| 0
| 0.104738
| 0.187032
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0f3965138dd98bc050b860206e62e5235246cb0
| 1,432
|
py
|
Python
|
poc-api-gateway/init-project.py
|
ronald-pineda/poc
|
b5bdb7eff963b251ea70cf02570c7d0b6cbef5e3
|
[
"Apache-2.0"
] | null | null | null |
poc-api-gateway/init-project.py
|
ronald-pineda/poc
|
b5bdb7eff963b251ea70cf02570c7d0b6cbef5e3
|
[
"Apache-2.0"
] | null | null | null |
poc-api-gateway/init-project.py
|
ronald-pineda/poc
|
b5bdb7eff963b251ea70cf02570c7d0b6cbef5e3
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import sys
import os
def create_project_folders(projectFolders):
create_folder(fullServicesFolder)
for projectIndex in range(len(projectFolders)):
projectFolder = fullServicesFolder + "/" + projectFolders[projectIndex]
create_folder(projectFolder)
open(projectFolder + "/build.gradle", "w").close()
create_project_subfolders(projectFolder)
f = open(os.getcwd() + "/settings.gradle", "a")
f.write("include '" + servicesFolder +
":" + projectFolders[projectIndex] + "'\n")
f.close()
def create_project_subfolders(projectFolder):
# print(projectFolder)
for srcFolderIndex in range(len(srcFolders)):
srcFolder = projectFolder + "/" + srcFolders[srcFolderIndex] + "/"
# print(srcFolder)
create_folder(srcFolder)
def create_folder(path):
try:
os.mkdir(path)
except OSError:
print ("create_folder failed : %s" % path)
else:
print ("create_folder succeed : %s" % path)
servicesFolder = sys.argv[1]
projectFolders = sys.argv[2].split(',')
# projectFolders = "project1 project2".split(' ')
# servicesFolderName = "/services"
srcFolders = "src src/main src/main/java src/main/resources src/test src/test/java src/test/resources".split(
' ')
currentPath = os.getcwd()
fullServicesFolder = currentPath + "/" + servicesFolder
create_project_folders(projectFolders)
| 30.468085
| 109
| 0.672486
| 143
| 1,432
| 6.636364
| 0.41958
| 0.075869
| 0.03372
| 0.071654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003484
| 0.198324
| 1,432
| 46
| 110
| 31.130435
| 0.823171
| 0.094274
| 0
| 0
| 0
| 0.032258
| 0.146285
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.064516
| 0
| 0.16129
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0f74b9d843423b3b2ee4eb76a8944598071e925
| 10,133
|
py
|
Python
|
lstm.py
|
mustyoshi/scikit-ta
|
c2a8f4ce2deadaa13d8bd8891aabd096b397e330
|
[
"MIT"
] | 1
|
2018-03-12T19:50:14.000Z
|
2018-03-12T19:50:14.000Z
|
lstm.py
|
mustyoshi/scikit-ta
|
c2a8f4ce2deadaa13d8bd8891aabd096b397e330
|
[
"MIT"
] | null | null | null |
lstm.py
|
mustyoshi/scikit-ta
|
c2a8f4ce2deadaa13d8bd8891aabd096b397e330
|
[
"MIT"
] | 1
|
2020-02-22T21:59:39.000Z
|
2020-02-22T21:59:39.000Z
|
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib import rnn
import pandas as pd
import numpy as np
import random
import time
import os
import datetime
from tensorflow.python.client import timeline
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.pipeline import Pipeline
import mods
#from . import ProductionPipe
if __name__ == "__main__":
# Training Parameters
device="/gpu:0"
learning_rate = .0001
batch_size = (20*5)
display_step = 10
timesteps = 1 # timesteps
num_classes = 2 # MNIST total classes (0-9 digits)
def splitDataFrameIntoSmaller(df, chunkSize = batch_size):
listOfDf = list()
numberChunks = len(df) // chunkSize + 1
for i in range(numberChunks):
listOfDf.append(df[i*chunkSize:(i+1)*chunkSize])
return listOfDf
to_drop = ['Date','Time','Open','High','Low','Close','Volume',"Period_4_Lookforward_p1","Period_4_Lookforward_p2"]
to_bool = []
create = True
if(create):
#df = pd.read_csv('dhy.us.txt',names=["Date","Time","Open","High","Low","Close","Volume","Ope"])
df = pd.read_csv('bac.us.txt')
origs = df[['Close']].copy()
#df = pd.read_csv('combined.csv',header=None,names=["Open","High","Low","Close","Volume"])
prod_pipe = mods.CreatePipeline() #Pipeline([('ft1',ft1),('ft2',ft2),('ft3',ft3)])
df['Period_4_Lookforward_p1'] = (df['Close'].shift(-4).astype(float) > df['Close'].astype(float))
df['Period_4_Lookforward_p2'] =(np.min([df['Low'].shift(-n) for n in range(1,4)],axis=0) > (df['Close'].astype(float)*.99)) #np.amax([df['Close'].shift(-n).astype(float) for n in range(1,4)],axis=0) > df['Close']
df['Period_4_Lookforward'] = (df['Period_4_Lookforward_p1'].astype(bool) == True) & (df['Period_4_Lookforward_p2'].astype(bool) == True)
df = prod_pipe.transform(df)
#to_bool.append('Period_4_Lookforward')
#for b in to_bool:
# df[b] = df[b].astype(bool)
df = df.dropna()
df.drop(df.index[:32], inplace=True,errors="ignore")
#df.to_csv('Hours.csv',index=False)
#df = df.drop(to_drop,axis=1)
df = df.astype(float)
df.to_csv('Hours.csv',index=False,float_format="%.8g")
else:
df = pd.read_csv('Hours.csv')
print(sum(df['Period_4_Lookforward'].values),'total ups',sum(df['Period_4_Lookforward'].values)/len(df))
num_input = len(df.columns)-1
num_hidden =num_input*20 # int((num_input*num_input)//2) # hidden layer num of features
train = df[:int(len(df)*0.9)]
test = df[len(train):]
dfs = splitDataFrameIntoSmaller(train)
training_steps = len(dfs)*100
del dfs[-1]
random.shuffle(dfs)
dfs.append(test)
print(num_input,'inputs')
print(num_hidden,'nodes per layer')
print(len(dfs),'batches')
print(len(test),'test parts')
ind = 0
# Network Parameters
with tf.device(device):
# tf Graph input
X = tf.placeholder("float", [None, timesteps, num_input])
Y = tf.placeholder("float", [None, num_classes])
# Define weights
weights = {
'out': tf.Variable(tf.random_normal([num_hidden, num_classes]))
}
biases = {
'out': tf.Variable(tf.random_normal([num_classes]))
}
def RNN(x, weights, biases):
# Prepare data shape to match `rnn` function requirements
# Current data input shape: (batch_size, timesteps, n_input)
# Required shape: 'timesteps' tensors list of shape (batch_size, n_input)
# Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
x = tf.unstack(x, timesteps, 1)
# Define a lstm cell with tensorflow
lstm_cell1 = rnn.LSTMBlockCell(num_hidden, forget_bias=1.0)
#lstm_cell = rnn.BasicRNNCell(num_hidden)
#lstm_cell = rnn.PhasedLSTMCell(num_hidden)
#lstm_cell2 = rnn.PhasedLSTMCell(num_hidden)
lstm_cell1 = tf.nn.rnn_cell.DropoutWrapper(lstm_cell1, output_keep_prob=0.75)
lstm_cell2 = rnn.LSTMBlockCell(num_hidden, forget_bias=1.0,use_peephole=True)
lstm_cell1 = tf.nn.rnn_cell.DropoutWrapper(lstm_cell1, output_keep_prob=0.75)
lstm_cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell1,lstm_cell2]*4)
# Get lstm cell output
outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# Linear activation, using rnn inner loop last output
return tf.matmul(outputs[-1], weights['out']) + biases['out']
logits = RNN(X, weights, biases)
with tf.device("/cpu:0"):
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
loss_op = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
logits=logits, labels=Y))
#optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
#optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
optimizer = tf.train.AdamOptimizer(learning_rate)
#optimizer = tf.train.FtrlOptimizer(learning_rate)
train_op = optimizer.minimize(loss_op)
# Evaluate model (with test logits, for dropout to be disabled)
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
y_p = tf.argmax(prediction, 1)
init = tf.global_variables_initializer()
# Initialize the variables (i.e. assign their default value)
test_len = len(dfs[-1])
test_data = dfs[-1]
closes = origs.loc[test_data.index].values
test_data.reset_index(drop=True,inplace=True)
test_true = test_data['Period_4_Lookforward'].values
test_label = np.array([test_data['Period_4_Lookforward'] == 0,test_data['Period_4_Lookforward'] == 1]).reshape((-1,2))
test_data = test_data.drop(['Period_4_Lookforward'],axis=1)
test_data = test_data.as_matrix()
test_data = test_data.reshape((-1, timesteps, num_input))
max_fails = 10
fails = 0
min_improv = .00000001
min_loss = 99
max_f1 = 0
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
config.intra_op_parallelism_threads=4
config.inter_op_parallelism_threads=4
with tf.Session(config=config) as sess:
# Run the initializer
sess.run(init)
start_time = time.time()
for step in range(1, training_steps+1):
batch_x= dfs[ind]
#print(len(batch_x),'rows')
ind = (ind + 1)%(len(dfs)-1)
y_true = batch_x['Period_4_Lookforward'].values
batch_y =np.array([batch_x['Period_4_Lookforward'] == 0,batch_x['Period_4_Lookforward'] == 1]).reshape((-1,2))
batch_x = batch_x.drop(['Period_4_Lookforward'],axis=1)
# Reshape data to get 28 seq of 28 elements
batch_x = batch_x.as_matrix()
batch_x = batch_x.reshape((batch_size, timesteps, num_input))
# Run optimization op (backprop)
sess.run(train_op, feed_dict={X: batch_x, Y: batch_y})
# if(learning_rate > .0001):
# learning_rate = learning_rate/10
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# train_op = optimizer.minimize(loss_op)
if step % display_step == 0 or step == 1:
# Calculate batch loss and accuracy
loss, acc,y_pred = sess.run([loss_op, accuracy,y_p], feed_dict={X: batch_x,
Y: batch_y})
loss2, acc2,test_pred = sess.run([loss_op, accuracy,y_p], feed_dict={X: test_data,
Y: test_label})
test_f1 = f1_score(test_true,test_pred)
print("Step " + "{:07d}".format(step) + ",L= " + \
"{:.6f}".format(loss) + ",Tr=" + \
"{:.3f}".format(acc) + ",Te=" + \
"{:.3f}".format(acc2) + ",F1(Tr)={:.3f}".format(f1_score(y_true,y_pred)) + \
",F1(Te)={:.8f}".format(test_f1))
if(loss < (min_loss-min_improv) or test_f1 > max_f1):
min_loss = loss
fails = 0
max_f1 = max(max_f1,test_f1)
else:
fails = fails + 1
if(fails > max_fails):
print('Ended early due to failure to improve')
break
duration = time.time() - start_time
print("\nOptimization Finished in {:.4f}s ({:0.8f} per step)\n\tMax of {:.4f}".format(duration,duration/step,max_f1))
# Calculate accuracy for 128 mnist test images
print("Final Testing Accuracy {:0.4f}%".format(f1_score(test_true,sess.run(y_p, feed_dict={X: test_data, Y: test_label}))))
last_price = 0
gain = 1
ind = 0
min_gain = 1
max_gain = 1
for row in test_data:
output = sess.run(y_p,feed_dict={X:[row]})[0]
if(output == 1):
if(last_price == 0):
last_price = closes[ind]
if(closes[ind] < last_price):
gain = gain * (1+((last_price - closes[ind]))*20)
min_gain = min(gain,min_gain)
max_gain = max(gain,max_gain)
last_price = 0
else:
if(last_price != 0):
gain = gain * (1+((last_price - closes[ind]))*20)
min_gain = min(gain,min_gain)
max_gain = max(gain,max_gain)
last_price = 0
ind = ind + 1
print(ind,"rows gives",gain)
print(min_gain," | ",max_gain)
#saver = tf.train.Saver()
#saver.save(sess, "D:\\dev\\forex_17\\model.ckpt")
| 42.936441
| 220
| 0.589954
| 1,332
| 10,133
| 4.283784
| 0.24024
| 0.022082
| 0.056782
| 0.024536
| 0.269716
| 0.215738
| 0.188398
| 0.134595
| 0.080442
| 0.080442
| 0
| 0.027562
| 0.27672
| 10,133
| 235
| 221
| 43.119149
| 0.750989
| 0.192835
| 0
| 0.108434
| 0
| 0.006024
| 0.095794
| 0.01697
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012048
| false
| 0
| 0.084337
| 0
| 0.108434
| 0.072289
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0f85d4de8f913a7c50b0d59c38e3f8dc18c049b
| 2,375
|
py
|
Python
|
src/napari_live_recording/__init__.py
|
jethro33/napari-live-recording
|
6c3fcc33bd18cd090e3f89971b630d1800e29e4d
|
[
"MIT"
] | 7
|
2021-10-11T17:45:33.000Z
|
2022-02-07T16:10:42.000Z
|
src/napari_live_recording/__init__.py
|
jethro33/napari-live-recording
|
6c3fcc33bd18cd090e3f89971b630d1800e29e4d
|
[
"MIT"
] | 2
|
2021-11-01T09:00:11.000Z
|
2022-01-24T16:21:05.000Z
|
src/napari_live_recording/__init__.py
|
jethro33/napari-live-recording
|
6c3fcc33bd18cd090e3f89971b630d1800e29e4d
|
[
"MIT"
] | null | null | null |
from napari.viewer import Viewer
import napari_live_recording.devices as devices
from napari_live_recording.widgets import CameraSelection
from napari_live_recording.control import Controller
from qtpy.QtWidgets import QWidget, QFormLayout, QGroupBox
class NapariLiveRecording(QWidget):
def __init__(self, napari_viewer : Viewer) -> None:
super().__init__()
self.viewer = napari_viewer
self.mainLayout = QFormLayout()
self.selectionWidget = CameraSelection()
self.selectionWidget.setAvailableCameras(list(devices.devicesDict.keys()))
self.controller = Controller(self)
self.mainLayout.addRow(self.selectionWidget.group)
self.viewer.layers.events.removed.connect(self.controller.clearAlbumBuffer)
# Creates a new camera object and passes it to the controller
# whenever the add button is pressed
self.selectionWidget.newCameraRequested.connect(self.addNewCamera)
self.controller.cameraDeleted.connect(self.deleteCameraWidget)
self.setLayout(self.mainLayout)
def addNewCamera(self, type: str, name, idx: str) -> None:
camera = devices.devicesDict[type](name, idx)
self.mainLayout.addRow(self.controller.addCamera(camera))
def deleteCameraWidget(self, widget: QGroupBox) -> None:
self.mainLayout.removeRow(widget)
def refreshViewer(self, img, layerName) -> None:
""" Slot triggered every time a camera acquires a frame.
Creates a new layer on the viewer with the received image as content.
If the layer already exists, it updates its content.
Args:
img (np.ndarray): image data.
layerName (str): name of the layer to create/update.
"""
if img is not None:
try:
# layer is recreated in case the image changes type (i.e. grayscale -> RGB and viceversa)
if img.ndim != self.viewer.layers[layerName].data.ndim:
self.viewer.layers.remove(layerName)
self.viewer.add_image(img, name = layerName)
else:
self.viewer.layers[layerName].data = img
except KeyError:
# needed in case the layer of that live recording does not exist
self.viewer.add_image(img, name = layerName)
| 45.673077
| 105
| 0.663579
| 268
| 2,375
| 5.813433
| 0.414179
| 0.044929
| 0.041078
| 0.029525
| 0.080873
| 0.043646
| 0.043646
| 0
| 0
| 0
| 0
| 0
| 0.257263
| 2,375
| 52
| 106
| 45.673077
| 0.88322
| 0.218947
| 0
| 0.060606
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.151515
| 0
| 0.30303
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
b0f8a636893a665d94b32f86e052882874326b1a
| 4,277
|
py
|
Python
|
Chapter08/inceptionV3.py
|
wikibook/deep-learning-reference
|
18d2ca2ded4256b2ddbac6e76d57531ca13e6e30
|
[
"MIT"
] | 2
|
2020-02-13T05:45:20.000Z
|
2020-04-11T05:58:02.000Z
|
Chapter08/inceptionV3.py
|
wikibook/deep-learning-reference
|
18d2ca2ded4256b2ddbac6e76d57531ca13e6e30
|
[
"MIT"
] | null | null | null |
Chapter08/inceptionV3.py
|
wikibook/deep-learning-reference
|
18d2ca2ded4256b2ddbac6e76d57531ca13e6e30
|
[
"MIT"
] | null | null | null |
# Deep Learning Quick Reference Chapter 8: Transfer Learning
# Mike Bernico <mike.bernico@gmail.com>
# seed random number generators before importing keras
import numpy as np
np.random.seed(42)
import tensorflow as tf
tf.set_random_seed(42)
from keras.applications.inception_v3 import InceptionV3
from keras.models import Model
from keras.layers import Dense, GlobalAveragePooling2D
from keras.callbacks import TensorBoard, EarlyStopping, CSVLogger, ModelCheckpoint
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
import os
import argparse
def build_model_feature_extraction():
# create the base pre-trained model
base_model = InceptionV3(weights='imagenet', include_top=False)
# add a global spatial average pooling layer
x = base_model.output
x = GlobalAveragePooling2D()(x)
# let's add a fully-connected layer
x = Dense(1024, activation='relu')(x)
# and a logistic layer
predictions = Dense(1, activation='sigmoid')(x)
# this is the model we will train
model = Model(inputs=base_model.input, outputs=predictions)
# first: train only the top layers (which were randomly initialized)
# i.e. freeze all convolutional InceptionV3 layers
for layer in base_model.layers:
layer.trainable = False
# compile the model (should be done *after* setting layers to non-trainable)
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
return model
def build_model_fine_tuning(model, learning_rate=0.0001, momentum=0.9):
for layer in model.layers[:249]:
layer.trainable = False
for layer in model.layers[249:]:
layer.trainable = True
model.compile(optimizer=SGD(lr=learning_rate, momentum=momentum), loss='binary_crossentropy', metrics=['accuracy'])
return model
def create_callbacks(name):
tensorboard_callback = TensorBoard(log_dir=os.path.join(os.getcwd(), "tb_log", name), write_graph=True, write_grads=False)
checkpoint_callback = ModelCheckpoint(filepath="./model-weights" + name + ".{epoch:02d}-{val_loss:.6f}.hdf5", monitor='val_loss',
verbose=0, save_best_only=True)
return [tensorboard_callback, checkpoint_callback]
def setup_data(train_data_dir, val_data_dir, img_width=299, img_height=299, batch_size=16):
train_datagen = ImageDataGenerator(rescale=1./255)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = val_datagen.flow_from_directory(
val_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
return train_generator, validation_generator
def main():
data_dir = "data/train/"
val_dir = "data/val/"
epochs = 10
batch_size = 30
model = build_model_feature_extraction()
train_generator, val_generator = setup_data(data_dir, val_dir)
callbacks_fe = create_callbacks(name='feature_extraction')
callbacks_ft = create_callbacks(name='fine_tuning')
# stage 1 fit
model.fit_generator(
train_generator,
steps_per_epoch=train_generator.n // batch_size,
epochs=epochs,
validation_data=val_generator,
validation_steps=val_generator.n // batch_size,
callbacks=callbacks_fe,
verbose=1)
scores = model.evaluate_generator(val_generator, steps=val_generator.n // batch_size)
print("Step 1 Scores: Loss: " + str(scores[0]) + " Accuracy: " + str(scores[1]))
# stage 2 fit
model = build_model_fine_tuning(model)
model.fit_generator(
train_generator,
steps_per_epoch=train_generator.n // batch_size,
epochs=epochs,
validation_data=val_generator,
validation_steps=val_generator.n // batch_size,
callbacks=callbacks_ft,
verbose=2)
scores = model.evaluate_generator(val_generator, steps=val_generator.n // batch_size)
print("Step 2 Scores: Loss: " + str(scores[0]) + " Accuracy: " + str(scores[1]))
if __name__ == "__main__":
main()
| 34.491935
| 133
| 0.710545
| 547
| 4,277
| 5.323583
| 0.347349
| 0.037088
| 0.030907
| 0.039148
| 0.337912
| 0.296016
| 0.296016
| 0.296016
| 0.23489
| 0.208791
| 0
| 0.018556
| 0.193594
| 4,277
| 123
| 134
| 34.772358
| 0.825747
| 0.123451
| 0
| 0.296296
| 0
| 0
| 0.073399
| 0.008572
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061728
| false
| 0
| 0.123457
| 0
| 0.234568
| 0.024691
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|