hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bb7233933506ce378e6905eadbfa9e01a8d6c38d
| 2,295
|
py
|
Python
|
enaml/tests/widgets/test_spin_box.py
|
mmckerns/enaml
|
ebf417b4dce9132bffa038a588ad90436a59d37e
|
[
"BSD-3-Clause"
] | 11
|
2015-01-04T14:29:23.000Z
|
2019-12-25T05:38:37.000Z
|
enaml/tests/widgets/test_spin_box.py
|
mmckerns/enaml
|
ebf417b4dce9132bffa038a588ad90436a59d37e
|
[
"BSD-3-Clause"
] | 36
|
2015-02-20T00:56:53.000Z
|
2020-12-04T10:02:14.000Z
|
enaml/tests/widgets/test_spin_box.py
|
mmckerns/enaml
|
ebf417b4dce9132bffa038a588ad90436a59d37e
|
[
"BSD-3-Clause"
] | 3
|
2015-11-19T15:11:37.000Z
|
2019-03-11T23:45:02.000Z
|
#------------------------------------------------------------------------------
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from enaml.validation.api import IntValidator
from .enaml_test_case import EnamlTestCase
class TestSpinBox(EnamlTestCase):
""" Unit tests for the SpinBox widget.
"""
def setUp(self):
enaml_source = """
from enaml.widgets.api import SpinBox, Window
enamldef MainView(Window):
SpinBox:
pass
"""
self.parse_and_create(enaml_source)
self.server_widget = self.find_server_widget(self.view, "SpinBox")
self.client_widget = self.find_client_widget(self.client_view, "QtSpinBox")
def test_set_maximum(self):
""" Test the setting of a SpinBox's maximum attribute
"""
with self.app.process_events():
self.server_widget.maximum = 1000
self.assertEquals(self.client_widget.maximum(), self.server_widget.maximum)
def test_set_minimum(self):
""" Test the setting of a SpinBox's minimum attribute
"""
with self.app.process_events():
self.server_widget.minimum = 10
self.assertEquals(self.client_widget.minimum(), self.server_widget.minimum)
def test_set_single_step(self):
""" Test the setting of a SpinBox's single_step attribute
"""
with self.app.process_events():
self.server_widget.single_step = 25
self.assertEquals(self.client_widget.singleStep(), self.server_widget.single_step)
def test_set_value(self):
""" Test the setting of a SpinBox's value attribute
"""
with self.app.process_events():
self.server_widget.value = 50
self.assertEquals(self.client_widget.value(), self.server_widget.value)
def test_set_wrap(self):
""" Test the setting of a SpinBox's wrap attribute
"""
with self.app.process_events():
self.server_widget.wrapping = True
self.assertEquals(self.client_widget.wrapping(), self.server_widget.wrapping)
### Need to add tests for special_value_text, prefix, suffix and read_only
if __name__ == '__main__':
import unittest
unittest.main()
| 31.875
| 90
| 0.623094
| 267
| 2,295
| 5.138577
| 0.29588
| 0.104956
| 0.12828
| 0.065598
| 0.427114
| 0.284257
| 0.284257
| 0.284257
| 0.178571
| 0
| 0
| 0.007778
| 0.215686
| 2,295
| 71
| 91
| 32.323944
| 0.754444
| 0.270588
| 0
| 0.138889
| 0
| 0
| 0.07664
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 1
| 0.166667
| false
| 0.027778
| 0.111111
| 0
| 0.305556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb72f9435fdc29920b70ac72d5ae0238e0aa1869
| 1,351
|
py
|
Python
|
oandapyV20-examples-master/src/console/greenlets/accountdetails.py
|
cdibble2011/OANDA
|
68327d6d65dd92952d7a1dc49fe29efca766d900
|
[
"MIT"
] | 127
|
2017-02-28T17:34:14.000Z
|
2022-01-21T13:14:30.000Z
|
oandapyV20-examples-master/src/console/greenlets/accountdetails.py
|
cdibble2011/OANDA
|
68327d6d65dd92952d7a1dc49fe29efca766d900
|
[
"MIT"
] | 36
|
2018-06-07T21:34:13.000Z
|
2022-03-13T21:01:43.000Z
|
oandapyV20-examples-master/src/console/greenlets/accountdetails.py
|
cdibble2011/OANDA
|
68327d6d65dd92952d7a1dc49fe29efca766d900
|
[
"MIT"
] | 76
|
2017-01-02T14:15:07.000Z
|
2022-03-28T03:49:45.000Z
|
# -*- coding: utf-8 -*-
import gevent
from oandapyV20.endpoints.accounts import AccountDetails, AccountChanges
class GAccountDetails(gevent.Greenlet):
"""Greenlet to handle account details/changes.
Initially get the AccountDetails and then keep polling
for account changes.
In case of changes put those on the NAV-Queue
"""
def __init__(self, api, accountID, queue, sleepTime=4):
super(GAccountDetails, self).__init__()
self.api = api
self.accountID = accountID
self.queue = queue
self.sleepTime = sleepTime
def _run(self):
# setup the summary request
r = AccountDetails(accountID=self.accountID)
rv = self.api.request(r)
lastTransactionID = rv.get("lastTransactionID")
lastLastTransactionID = lastTransactionID
r = None
while True:
if not r or lastLastTransactionID != lastTransactionID:
params = {"sinceTransactionID":
int(rv.get("lastTransactionID"))}
r = AccountChanges(accountID=self.accountID, params=params)
lastLastTransactionID = lastTransactionID
rv = self.api.request(r)
lastTransactionID = rv.get('lastTransactionID')
self.queue.put_nowait(rv)
gevent.sleep(self.sleepTime)
| 32.95122
| 75
| 0.637306
| 133
| 1,351
| 6.398496
| 0.466165
| 0.032902
| 0.077556
| 0.037603
| 0.13161
| 0.13161
| 0.13161
| 0.13161
| 0.13161
| 0
| 0
| 0.004103
| 0.278312
| 1,351
| 40
| 76
| 33.775
| 0.868718
| 0.159141
| 0
| 0.16
| 0
| 0
| 0.06205
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.08
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb73bd26c0031f64dbf994ec3a8a3952a7f0e16a
| 802
|
py
|
Python
|
wbsv/main.py
|
yswallow/wbsv-cli
|
30b68d0d1efd56fba99286d53470a39d317d6d9d
|
[
"MIT"
] | null | null | null |
wbsv/main.py
|
yswallow/wbsv-cli
|
30b68d0d1efd56fba99286d53470a39d317d6d9d
|
[
"MIT"
] | null | null | null |
wbsv/main.py
|
yswallow/wbsv-cli
|
30b68d0d1efd56fba99286d53470a39d317d6d9d
|
[
"MIT"
] | null | null | null |
import sys
from . import Archive
from . import Find
from . import ParseArgs
from . import Interact
def iter_urls(opt):
"""Iterate given urls for saving."""
try:
for x in opt["urls"]:
Archive.archive(Find.extract_uri_recursive(x, opt["level"]),
x, opt["retry"])
except KeyboardInterrupt:
print("[!]Interrupted!", file=sys.stderr)
print("[!]Halt.", file=sys.stderr)
exit(1)
def main():
"""Main function."""
opt = ParseArgs.parse_args()
if len(opt["urls"]) == 0:
Interact.interactive(opt)
elif opt["only-target"]:
[Archive.archive([x], x, opt["retry"]) for x in opt["urls"]]
exit(0)
else:
iter_urls(opt)
exit(0)
if __name__ == "__main__":
main()
| 20.05
| 72
| 0.557357
| 98
| 802
| 4.428571
| 0.459184
| 0.092166
| 0.050691
| 0.041475
| 0.059908
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006993
| 0.286783
| 802
| 39
| 73
| 20.564103
| 0.751748
| 0.05611
| 0
| 0.076923
| 0
| 0
| 0.092493
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.192308
| 0
| 0.269231
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb74b1ea7d5e069ac223adabbb64c46d9e5159e9
| 5,613
|
py
|
Python
|
Sapphire/CallStatement.py
|
Rhodolite/Parser-py
|
7743799794d92aa8560db11f1d6d5f00e5ac1925
|
[
"MIT"
] | null | null | null |
Sapphire/CallStatement.py
|
Rhodolite/Parser-py
|
7743799794d92aa8560db11f1d6d5f00e5ac1925
|
[
"MIT"
] | null | null | null |
Sapphire/CallStatement.py
|
Rhodolite/Parser-py
|
7743799794d92aa8560db11f1d6d5f00e5ac1925
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2017-2018 Joy Diamond. All rights reserved.
#
@gem('Sapphire.CallStatement')
def gem():
require_gem('Sapphire.BookcaseExpression')
require_gem('Sapphire.MemberExpression')
require_gem('Sapphire.Method')
require_gem('Sapphire.Tree')
class CallStatementBase(SapphireTrunk):
__slots__ = ((
'frill', # VW_Frill | Commented_VW_Frill
'left', # Expression
'arguments', # Arguments*
))
class_order = CLASS_ORDER__CALL_STATEMENT
is_any_else = false
is_any_except_or_finally = false
is_else_header_or_fragment = false
is_statement_header = false
is_statement = true
def __init__(t, frill, left, arguments):
assert type(left) is not VW_Frill
t.frill = frill
t.left = left
t.arguments = arguments
def __repr__(t):
return arrange('<%s %r %r %r>', t.__class__.__name__, t.frill, t.left, t.arguments)
def add_comment(t, comment):
frill = t.frill
assert frill.comment is 0
return t.conjure_call(
conjure_commented_vw_frill(comment, frill.v, frill.w),
t.left,
t.arguments,
)
def count_newlines(t):
return t.frill.count_newlines() + t.left.count_newlines() + t.arguments.count_newlines()
def find_require_gem(t, e):
if not t.left.is_name('require_gem'):
return
assert t.arguments.is_arguments_1
e.add_require_gem(t.arguments.a)
@property
def indentation(t):
return t.frill.v
def display_token(t):
frill = t.frill
comment = frill.comment
return arrange('<%s +%d%s %s %s %s>',
t.display_name,
frill.v.total,
('' if comment is 0 else '' + comment.display_token()),
t.left .display_token(),
t.arguments.display_token(),
frill.w .display_token())
def dump_token(t, f, newline = true):
frill = t.frill
comment = frill.comment
if comment is 0:
f.partial('<%s +%d ', t.display_name, frill.v.total)
t .left .dump_token(f)
t .arguments.dump_token(f)
r = frill.w .dump_token(f, false)
return f.token_result(r, newline)
with f.indent(arrange('<%s +%d', t.display_name, frill.v.total), '>'):
comment .dump_token(f)
t.left .dump_token(f)
t.arguments.dump_token(f)
frill.w .dump_token(f)
order = order__frill_ab
def scout_variables(t, art):
t.left .scout_variables(art)
t.arguments.scout_variables(art)
def write(t, w):
frill = t.frill
comment = frill.comment
if comment is not 0:
comment.write(w)
w(frill.v.s)
t.left .write(w)
t.arguments.write(w)
w(frill.w.s)
CallStatementBase.a = CallStatementBase.left
CallStatementBase.b = CallStatementBase.arguments
CallStatementBase.k1 = CallStatementBase.frill
CallStatementBase.k2 = CallStatementBase.left
CallStatementBase.k3 = CallStatementBase.arguments
@share
class CallStatement(CallStatementBase):
__slots__ = (())
display_name = 'call-statement'
@share
class MethodCallStatement(CallStatementBase):
__slots__ = (())
display_name = 'method-call-statement'
def produce_conjure_call_statement(name, meta):
cache = create_cache(name, conjure_nub)
return produce_conjure_unique_triple__312(name, meta, cache)
conjure_call_statement = produce_conjure_call_statement('call-statement', CallStatement)
conjure_method_call_statement = produce_conjure_call_statement('method-call-statement', MethodCallStatement)
static_conjure_call_statement = static_method(conjure_call_statement)
static_conjure_method_call_statement = static_method(conjure_method_call_statement)
MemberExpression.call_statement = static_conjure_method_call_statement
PearlToken .call_statement = static_conjure_call_statement
SapphireTrunk .call_statement = static_conjure_call_statement
CallStatement .conjure_call = static_conjure_call_statement
MethodCallStatement.conjure_call = static_conjure_method_call_statement
CallStatement.transform = produce_transform__frill__ab_with_priority(
'call_statement',
PRIORITY_POSTFIX,
PRIORITY_COMPREHENSION,
conjure_call_statement,
)
MethodCallStatement.transform = produce_transform__frill__ab_with_priority(
'method_call_statement',
PRIORITY_POSTFIX,
PRIORITY_COMPREHENSION,
conjure_method_call_statement,
)
| 31.533708
| 112
| 0.549973
| 550
| 5,613
| 5.287273
| 0.187273
| 0.116231
| 0.068776
| 0.053645
| 0.293673
| 0.241747
| 0.169188
| 0.069464
| 0.05227
| 0.024072
| 0
| 0.005389
| 0.371815
| 5,613
| 177
| 113
| 31.711864
| 0.819342
| 0.020666
| 0
| 0.162393
| 0
| 0
| 0.051749
| 0.024964
| 0
| 0
| 0
| 0
| 0.025641
| 1
| 0.102564
| false
| 0
| 0
| 0.025641
| 0.299145
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb75831e0db77f35e095a17d5451a6e61a18c00c
| 546
|
py
|
Python
|
languages/python/sqlalchemy-oso/tests/test_partial.py
|
johnhalbert/oso
|
3185cf3740b74c3c1deaca5b9ec738325de4c8a2
|
[
"Apache-2.0"
] | null | null | null |
languages/python/sqlalchemy-oso/tests/test_partial.py
|
johnhalbert/oso
|
3185cf3740b74c3c1deaca5b9ec738325de4c8a2
|
[
"Apache-2.0"
] | null | null | null |
languages/python/sqlalchemy-oso/tests/test_partial.py
|
johnhalbert/oso
|
3185cf3740b74c3c1deaca5b9ec738325de4c8a2
|
[
"Apache-2.0"
] | null | null | null |
"""Unit tests for partial implementation."""
from polar.expression import Expression
from polar.variable import Variable
from sqlalchemy_oso.partial import dot_op_path
def test_dot_op_path():
single = Expression("Dot", [Variable("_this"), "created_by"])
assert dot_op_path(single) == ["created_by"]
double = Expression("Dot", [single, "username"])
assert dot_op_path(double) == ["created_by", "username"]
triple = Expression("Dot", [double, "first"])
assert dot_op_path(triple) == ["created_by", "username", "first"]
| 32.117647
| 69
| 0.705128
| 69
| 546
| 5.333333
| 0.376812
| 0.067935
| 0.122283
| 0.122283
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 546
| 16
| 70
| 34.125
| 0.786325
| 0.069597
| 0
| 0
| 0
| 0
| 0.175299
| 0
| 0
| 0
| 0
| 0
| 0.3
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb75907adc83d289e117c742bd2e7ed7ea682464
| 426
|
py
|
Python
|
lib/version.py
|
Durendal/electrum-rby
|
0dadd13467d44bcc7128f0dec0fa1aeff8d22576
|
[
"MIT"
] | null | null | null |
lib/version.py
|
Durendal/electrum-rby
|
0dadd13467d44bcc7128f0dec0fa1aeff8d22576
|
[
"MIT"
] | 1
|
2021-11-15T17:47:29.000Z
|
2021-11-15T17:47:29.000Z
|
lib/version.py
|
Durendal/electrum-rby
|
0dadd13467d44bcc7128f0dec0fa1aeff8d22576
|
[
"MIT"
] | 1
|
2017-11-13T23:19:46.000Z
|
2017-11-13T23:19:46.000Z
|
ELECTRUM_VERSION = '3.0' # version of the client package
PROTOCOL_VERSION = '0.10' # protocol version requested
# The hash of the mnemonic seed must begin with this
SEED_PREFIX = '01' # Standard wallet
SEED_PREFIX_2FA = '101' # Two-factor authentication
def seed_prefix(seed_type):
if seed_type == 'standard':
return SEED_PREFIX
elif seed_type == '2fa':
return SEED_PREFIX_2FA
| 32.769231
| 60
| 0.683099
| 58
| 426
| 4.810345
| 0.551724
| 0.179211
| 0.09319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.040123
| 0.239437
| 426
| 12
| 61
| 35.5
| 0.820988
| 0.349765
| 0
| 0
| 0
| 0
| 0.084871
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb75ca51f4748a57620c013b53d94680ace60cc1
| 1,579
|
py
|
Python
|
src/url.py
|
nahueldebellis/TwitchTournamentGenerator
|
a0a203e08d836ad744850839385324c54314b8a4
|
[
"MIT"
] | null | null | null |
src/url.py
|
nahueldebellis/TwitchTournamentGenerator
|
a0a203e08d836ad744850839385324c54314b8a4
|
[
"MIT"
] | null | null | null |
src/url.py
|
nahueldebellis/TwitchTournamentGenerator
|
a0a203e08d836ad744850839385324c54314b8a4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from pyshorteners import Shortener
"""Url class"""
class Url():
"""This class format participants and add to an url and short the url"""
cant_participants = 0
bracket = 0
def __init__(self):
self.short_url = Shortener()
self.url_final = ['https://scorecounter.com/tournament/?set=', '51001111000000']
self.concat = '&'
self.treintaydos = '5'
self.dieciseis = '4'
self.ocho = '3'
def add(self, participant):
"""add new pasticipant to the bracket"""
Url.bracket = Url.bracket if Url.cant_participants % 2 else Url.bracket+1
Url.cant_participants = Url.cant_participants+1
position = 'home' if Url.cant_participants % 2 else 'visitor'
self.url_final.append(f'{self.concat}{position}1-{Url.bracket}={participant}')
def show(self):
"""concat the url and return the string of shorter url"""
if Url.cant_participants <= 32:
self.url_final[1] = self.treintaydos+self.url_final[1][1:]
if Url.cant_participants <= 16:
self.url_final[1] = self.dieciseis+self.url_final[1][1:]
if Url.cant_participants <= 8:
self.url_final[1] = self.ocho+self.url_final[1][1:]
Url.cant_participants = 0
Url.bracket = 0
self.format_url_spaces()
print(self.url_final)
return self.short_url.isgd.short(self.url_final)
def format_url_spaces(self):
"""replace space with %20 in the url"""
self.url_final = ''.join(self.url_final).replace(' ', '%20')
| 41.552632
| 88
| 0.627612
| 214
| 1,579
| 4.485981
| 0.317757
| 0.0875
| 0.15
| 0.08125
| 0.194792
| 0.127083
| 0.072917
| 0.072917
| 0.072917
| 0
| 0
| 0.036667
| 0.240025
| 1,579
| 37
| 89
| 42.675676
| 0.763333
| 0.131729
| 0
| 0
| 0
| 0
| 0.094382
| 0.038951
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.033333
| 0
| 0.3
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb76a8caabf2f194e804291ce67ba419fda452c3
| 5,302
|
py
|
Python
|
UniPCoA.py
|
AdeBC/UniRPyCoA
|
f5b54297daf07856d9a88ebc8e6277e7be9b7ecc
|
[
"MIT"
] | null | null | null |
UniPCoA.py
|
AdeBC/UniRPyCoA
|
f5b54297daf07856d9a88ebc8e6277e7be9b7ecc
|
[
"MIT"
] | null | null | null |
UniPCoA.py
|
AdeBC/UniRPyCoA
|
f5b54297daf07856d9a88ebc8e6277e7be9b7ecc
|
[
"MIT"
] | null | null | null |
import os
import pandas as pd
from plotnine import *
import plotnine
from matplotlib import pyplot as plt
import matplotlib
from scipy.spatial.distance import pdist, squareform
from skbio.stats.ordination import pcoa
from skbio.diversity import beta_diversity
from skbio.io import read
from skbio.tree import TreeNode
import argparse
from scipy.spatial.distance import pdist, squareform
def loadTree(tree):
with open(tree, 'r') as f:
tree = read(f, format="newick", into=TreeNode)
return tree
if __name__ == '__main__':
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--abundance', type=str, default='species_abundance.csv', help='The input abundance data, columns represent samples and rows represent taxa.')
parser.add_argument('-m', '--metadata', type=str, default='metadata.csv', help='The input metadata, use column "Env" to specify the group of the input samples.')
parser.add_argument('-o', '--output', type=str, default='PLots.Unifrac', help='The folder to save output table and plots.')
parser.add_argument('-t', '--tree', type=str, default='LTPs132_SSU_tree.newick', help='The input phylogenetic tree, in Newick format.')
parser.add_argument('--metric', type=str, default='weighted_unifrac', help='The metric for beta_diversity calculation.')
args = parser.parse_args()
print('Loading data...')
X = pd.read_csv(args.abundance, index_col=0).T
Y = pd.read_csv(args.metadata).set_index('SampleID')
use_phylogeny = args.metric in ['weighted_unifrac', 'unweighted_unifrac']
if use_phylogeny:
tree = loadTree(tree=args.tree)
print('Processing the phylogenetic tree...')
for n in tree.postorder():
if n.name != None and '_ ' in n.name:
n.name = n.name.split('_ ')[1]
names = [n.name for n in tree.postorder()]
print('Processing the abundance data...')
ids = X.index.tolist()
otu_ids = X.columns.tolist()
X = X.reset_index().melt(id_vars=['index'], value_vars=X.columns, var_name='taxonomy', value_name='abundance')
taxa = pd.DataFrame(X.taxonomy.apply(lambda x: dict(map(lambda y: y.split('__'), filter(lambda x: not x.endswith('__'), x.split(';'))))).tolist())
X = pd.concat([X.drop(columns=['taxonomy']), taxa], axis=1)
X = X.melt(id_vars=['index','abundance'], value_vars=taxa.columns, var_name='rank', value_name='taxonomy')
X = X.groupby(by=['index', 'taxonomy'], as_index=False).sum().pivot_table(columns='taxonomy', index='index', values='abundance')
if use_phylogeny:
X = X.loc[:, X.columns.to_series().isin(names)]
ids = X.index.tolist()
otu_ids = X.columns.tolist()
try:
print('Trying calculating {} beta_diversity using scikit-bio & scikit-learn package...'.format(args.metric))
print('This could be time-consuming.')
if use_phylogeny:
mat = beta_diversity(args.metric, X, ids, tree=tree, otu_ids=otu_ids, validate=False).data
else:
mat = beta_diversity(args.metric, X, ids, otu_ids=otu_ids, validate=False).data
except ValueError:
print('Failed, the metric you selected is not supported by neither scikit-bio nor scikit-learn.')
print('Trying using SciPy...')
mat = squareform(pdist(X, metric=args.metric))
print('Succeeded!')
pcs = pd.DataFrame(pcoa(mat, number_of_dimensions=2).samples.values.tolist(), index=X.index, columns=['PC1', 'PC2'])
pcs = pd.concat([pcs, Y], axis=1)
print('Visualizing the data using plotnine package...')
p = (ggplot(pcs, aes(x='PC1', y='PC2', color='Env'))
+ geom_point(size=0.2)
+ scale_color_manual(['#E64B35FF','#4DBBD5FF','#00A087FF','#3C5488FF','#F39B7FFF','#8491B4FF','#91D1C2FF'])
+ theme(panel_grid_major = element_blank(), panel_grid_minor = element_blank(), panel_background = element_blank())
+ theme(axis_line = element_line(color="gray", size = 1))
+ stat_ellipse()
+ xlab('PC1')
+ ylab('PC2')
)
box_1 = (ggplot(pcs, aes(x='Env', y='PC1', color='Env'))
+ geom_boxplot(width=0.3, show_legend=False)
+ scale_color_manual(['#E64B35FF','#4DBBD5FF','#00A087FF','#3C5488FF','#F39B7FFF','#8491B4FF','#91D1C2FF'])
+ theme(figure_size=[4.8, 1])
+ theme(panel_grid_major = element_blank(), panel_grid_minor = element_blank(), panel_background = element_blank())
+ theme(axis_line = element_line(color="gray", size = 1))
+ xlab('Env')
+ ylab('PC1')
+ coord_flip()
)
box_2 = (ggplot(pcs, aes(x='Env', y='PC2', color='Env'))
+ geom_boxplot(width=0.3, show_legend=False)
+ scale_color_manual(['#E64B35FF','#4DBBD5FF','#00A087FF','#3C5488FF','#F39B7FFF','#8491B4FF','#91D1C2FF'])
+ theme(figure_size=[4.8, 1])
+ theme(panel_grid_major = element_blank(), panel_grid_minor = element_blank(), panel_background = element_blank())
+ theme(axis_line = element_line(color="gray", size = 1))
+ xlab('Env')
+ ylab('PC2')
+ coord_flip()
)
if not os.path.isdir(args.output):
os.mkdir(args.output)
p.save(os.path.join(args.output, 'PCoA.pdf'), width=4.8, height=4.8)
box_1.save(os.path.join(args.output, 'PC1_boxplot.pdf'), width=4.8, height=1)
box_2.save(os.path.join(args.output, 'PC2_boxplot.pdf'), width=4.8, height=1)
pcs.to_csv(os.path.join(args.output, 'Principle_coordinations.csv'))
print('Plots are saved in {}. Import them into Illustrator for further improvements.'.format(args.output))
| 49.092593
| 169
| 0.704828
| 777
| 5,302
| 4.664093
| 0.293436
| 0.029801
| 0.028146
| 0.015453
| 0.344095
| 0.318433
| 0.289183
| 0.218543
| 0.218543
| 0.199227
| 0
| 0.029374
| 0.120332
| 5,302
| 108
| 170
| 49.092593
| 0.747642
| 0
| 0
| 0.277228
| 0
| 0
| 0.254573
| 0.013389
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009901
| false
| 0
| 0.138614
| 0
| 0.158416
| 0.09901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb7baf8c8805cb067d4cf73845cfee7e1f0d116f
| 2,835
|
py
|
Python
|
invoice/spy/notify_osd.py
|
simone-campagna/invoice
|
6446cf6ebb158b895cd11d707eb019ae23833881
|
[
"Apache-2.0"
] | null | null | null |
invoice/spy/notify_osd.py
|
simone-campagna/invoice
|
6446cf6ebb158b895cd11d707eb019ae23833881
|
[
"Apache-2.0"
] | 16
|
2015-01-30T16:28:54.000Z
|
2015-03-02T14:18:56.000Z
|
invoice/spy/notify_osd.py
|
simone-campagna/invoice
|
6446cf6ebb158b895cd11d707eb019ae23833881
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2015 Simone Campagna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "Simone Campagna"
__all__ = [
'available'
'notify',
]
import os
try: # pragma: no cover
import notify2
HAS_NOTIFY2 = True
except ImportError:
HAS_NOTIFY2 = False
from . import text_formatter
_NOTIFICATION = None
def available(): # pragma: no cover
return HAS_NOTIFY2
_PACKAGE_DIR = os.path.dirname(__file__)
_ICONS = {
'info': os.path.join(_PACKAGE_DIR, 'icons', 'logo_info.jpg'),
'warning': os.path.join(_PACKAGE_DIR, 'icons', 'logo_warning.jpg'),
'error': os.path.join(_PACKAGE_DIR, 'icons', 'logo_error.jpg'),
}
if HAS_NOTIFY2: # pragma: no cover
def notify(logger, validation_result, scan_events, updated_invoice_collection, event_queue, spy_notify_level):
notification_required, kind, title, text, detailed_text = text_formatter.formatter(
validation_result=validation_result,
scan_events=scan_events,
updated_invoice_collection=updated_invoice_collection,
event_queue=event_queue,
mode=text_formatter.MODE_SHORT,
spy_notify_level=spy_notify_level,
)
if notification_required:
global _NOTIFICATION
summary = title + ' [{}]'.format(kind.upper())
message = text
if detailed_text:
message += '\n\n' + detailed_text
icon = _ICONS[kind]
if _NOTIFICATION is None:
notify2.init("Invoice spy [{}]".format(kind.upper()))
_NOTIFICATION = notify2.Notification(summary=summary, message=message, icon=icon)
notification = _NOTIFICATION
urgency_d = {
'info': notify2.URGENCY_LOW,
'warning': notify2.URGENCY_NORMAL,
'error': notify2.URGENCY_CRITICAL,
}
notification.update(summary=summary, message=message, icon=icon)
notification.set_urgency(urgency_d[kind])
#if notify_pyqt4.available():
# callback = lambda : notify_pyqt4.notify(logger, kind, title, text, detailed_text)
# notification.add_action("fai qualcosa", "qualcosa", callback, user_data=None)
notification.show()
else:
notify = None
| 35
| 114
| 0.65679
| 329
| 2,835
| 5.431611
| 0.431611
| 0.033576
| 0.021824
| 0.028539
| 0.193061
| 0.102406
| 0.102406
| 0
| 0
| 0
| 0
| 0.009836
| 0.246914
| 2,835
| 80
| 115
| 35.4375
| 0.827166
| 0.288889
| 0
| 0
| 0
| 0
| 0.072755
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.076923
| 0.019231
| 0.134615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb7bc4b7cb8f753fbf39ae6cb16944a05b0ab207
| 3,878
|
py
|
Python
|
src/discoursegraphs/readwrite/salt/labels.py
|
arne-cl/discoursegraphs
|
4e14688e19c980ac9bbac75ff1bf5d751ef44ac3
|
[
"BSD-3-Clause"
] | 41
|
2015-02-20T00:35:39.000Z
|
2022-03-15T13:54:13.000Z
|
src/discoursegraphs/readwrite/salt/labels.py
|
arne-cl/discoursegraphs
|
4e14688e19c980ac9bbac75ff1bf5d751ef44ac3
|
[
"BSD-3-Clause"
] | 68
|
2015-01-09T18:07:38.000Z
|
2021-10-06T16:30:43.000Z
|
src/discoursegraphs/readwrite/salt/labels.py
|
arne-cl/discoursegraphs
|
4e14688e19c980ac9bbac75ff1bf5d751ef44ac3
|
[
"BSD-3-Clause"
] | 8
|
2015-02-20T00:35:48.000Z
|
2021-10-30T14:09:03.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module handles the parsing of SALT labels.
There are three types of labels (SFeature, SElementId, SAnnotation).
Labels can occur as children of these elements: 'layers', 'nodes', 'edges'
and '{sDocumentStructure}SDocumentGraph'.
"""
from lxml.builder import ElementMaker
from discoursegraphs.readwrite.salt.util import (get_xsi_type, string2xmihex,
NAMESPACES)
XSI = "http://www.w3.org/2001/XMLSchema-instance"
class SaltLabel(object):
"""
Two or more ``SaltLabel``s are attached to each element in a SaltXMI
file: one label representing the name (``SNAME``) of the element, one
representing its ID and one label for each kind of annotation associated
with that element.
"""
def __init__(self, name, value, xsi_type, namespace=None, hexvalue=None):
"""
create a SaltLabel from scratch.
Parameters
----------
name : str
the name of the label, e.g. ``SNAME`` or ``id``
namespace : str or None
the namespace of the label, e.g. ``salt`` or ``graph``
value : str
the actual label value, e.g. ``sSpan19`` or ``NP``
hexvalue: str or None
a weird hex-based representation of the value, which always starts
with ``ACED00057``. If it is not set, we can automatically generate
it, but we can't guarantee that it matches the value SaltNPepper
would have generated.
xsi_type : str
the type of the label, e.g. ``saltCore:SFeature`` or
``saltCore:SAnnotation``
"""
self.xsi_type = xsi_type
self.namespace = namespace if namespace else None
self.name = name
self.value = value
self.hexvalue = hexvalue if hexvalue else string2xmihex(value)
@classmethod
def from_etree(cls, etree_element):
"""
creates a ``SaltLabel`` from an etree element representing a label
element in a SaltXMI file.
A label element in SaltXMI looks like this::
<labels xsi:type="saltCore:SFeature" namespace="salt"
name="SNAME" value="ACED0005740007735370616E3139"
valueString="sSpan19"/>
Parameters
----------
etree_element : lxml.etree._Element
an etree element parsed from a SaltXMI document
"""
return cls(name=etree_element.attrib['name'],
value=etree_element.attrib['valueString'],
xsi_type=get_xsi_type(etree_element),
namespace=etree_element.attrib.get('namespace'),
hexvalue=etree_element.attrib['value'])
def to_etree(self):
"""
creates an etree element of a ``SaltLabel`` that mimicks a SaltXMI
<labels> element
"""
attribs = {
'{{{pre}}}type'.format(pre=NAMESPACES['xsi']): self.xsi_type,
'namespace': self.namespace, 'name': self.name,
'value': self.hexvalue, 'valueString': self.value}
non_empty_attribs = {key: val for (key, val) in attribs.items()
if val is not None}
E = ElementMaker()
return E('labels', non_empty_attribs)
def get_namespace(label):
"""
returns the namespace of an etree element or None, if the element
doesn't have that attribute.
"""
if 'namespace' in label.attrib:
return label.attrib['namespace']
else:
return None
def get_annotation(label):
"""
returns an annotation (key, value) tuple given an etree element
(with tag 'labels' and xsi type 'SAnnotation'), e.g. ('tiger.pos', 'ART')
"""
assert get_xsi_type(label) == 'saltCore:SAnnotation'
return (label.attrib['name'], label.attrib['valueString'])
| 35.577982
| 79
| 0.60624
| 463
| 3,878
| 5.008639
| 0.339093
| 0.06727
| 0.030185
| 0.01423
| 0.033635
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014477
| 0.287519
| 3,878
| 108
| 80
| 35.907407
| 0.824828
| 0.478339
| 0
| 0
| 0
| 0
| 0.104067
| 0
| 0
| 0
| 0
| 0
| 0.028571
| 1
| 0.142857
| false
| 0
| 0.057143
| 0
| 0.371429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb7c19c39a756e836f832fe37756f912b98af313
| 1,214
|
py
|
Python
|
examples/stream_entries.py
|
feedly/python-api-client
|
a211734a77337145efa0d1a1ddfe484f74530998
|
[
"MIT"
] | 31
|
2018-08-20T08:35:09.000Z
|
2022-03-21T04:17:27.000Z
|
examples/stream_entries.py
|
feedly/python-api-client
|
a211734a77337145efa0d1a1ddfe484f74530998
|
[
"MIT"
] | 8
|
2018-10-17T18:09:44.000Z
|
2021-12-14T10:03:34.000Z
|
examples/stream_entries.py
|
feedly/python-api-client
|
a211734a77337145efa0d1a1ddfe484f74530998
|
[
"MIT"
] | 7
|
2018-09-04T01:10:48.000Z
|
2021-08-19T11:07:54.000Z
|
from feedly.api_client.session import FeedlySession
from feedly.api_client.stream import StreamOptions
from feedly.api_client.utils import run_example
def example_stream_entries():
"""
This example will prompt you to enter a category name, download the 10 latest articles from it, and display their
titles.
"""
# Prompt for the category name/id to use
user_category_name_or_id = input("> User category name or id: ")
# Create the session using the default auth directory
session = FeedlySession()
# Fetch the category by its name/id
# To use an enterprise category, change to `session.user.enterprise_categories`. Tags are also supported.
category = session.user.user_categories.get(user_category_name_or_id)
# Stream 10 articles with their contents from the category
for article in category.stream_contents(options=StreamOptions(max_count=10)):
# Print the title of each article
print(article["title"])
if __name__ == "__main__":
# Will prompt for the token if missing, and launch the example above
# If a token expired error is raised, will prompt for a new token and restart the example
run_example(example_stream_entries)
| 39.16129
| 117
| 0.74547
| 175
| 1,214
| 5.005714
| 0.451429
| 0.068493
| 0.044521
| 0.065068
| 0.068493
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006129
| 0.193575
| 1,214
| 30
| 118
| 40.466667
| 0.888662
| 0.490939
| 0
| 0
| 0
| 0
| 0.069374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.272727
| 0
| 0.363636
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb84b60c8bd64fa0c7fda5ba539335cf5ce1fc5a
| 10,746
|
py
|
Python
|
plistutils/nskeyedarchiver.py
|
sathwikv143/plistutils
|
fc7783449da1ed222547ceb5c416402216fa9b34
|
[
"BSD-3-Clause"
] | 35
|
2017-10-17T17:24:16.000Z
|
2022-03-18T22:10:47.000Z
|
plistutils/nskeyedarchiver.py
|
sathwikv143/plistutils
|
fc7783449da1ed222547ceb5c416402216fa9b34
|
[
"BSD-3-Clause"
] | 1
|
2021-07-09T01:06:30.000Z
|
2021-07-09T01:06:30.000Z
|
plistutils/nskeyedarchiver.py
|
sathwikv143/plistutils
|
fc7783449da1ed222547ceb5c416402216fa9b34
|
[
"BSD-3-Clause"
] | 4
|
2018-11-17T15:52:36.000Z
|
2022-02-28T08:01:14.000Z
|
import logging
from uuid import UUID
from biplist import Data, Uid
from plistutils.utils import parse_mac_absolute_time
logger = logging.getLogger(__name__)
class NSKeyedArchiveException(Exception):
pass
class NSKeyedArchiveParser(object):
# https://developer.apple.com/documentation/foundation/nskeyedarchiver
KNOWN_VERSIONS = [100000]
def __init__(self, fullpath):
self.fullpath = fullpath
@staticmethod
def is_known_nskeyedarchive(plist_data, fullpath):
if plist_data:
archiver = plist_data.get('$archiver')
version = plist_data.get('$version')
# NR -> iOS NanoRegistry KeyedArchiver (inherits from NSKeyedArchiver)
if archiver in ['NRKeyedArchiver', 'NSKeyedArchiver']:
if version in NSKeyedArchiveParser.KNOWN_VERSIONS:
return True
else:
logger.error("Unknown NSKeyedArchiver version '{}' in file {}, please report.", version, fullpath)
return False
def parse_archive(self, plist_data):
"""
:param plist_data: pre-parsed plist data
:return: parsed dict
"""
ret = {}
objects_list = plist_data.get('$objects')
if objects_list:
for name, val in plist_data.get('$top', {}).items():
if isinstance(val, Uid):
top = objects_list[val.integer]
try:
ret[name] = self.process_obj(top, objects_list)
except RecursionError:
# failsafe
logger.error(
"Could not parse NSKeyedArchive '{}' in top key '{}' due to infinite recursion",
self.fullpath, name)
else:
ret[name] = val
return ret
def process_obj(self, obj, objects_list, parents=None):
if parents is None:
parents = set()
obj_id = id(obj)
if obj_id in parents:
raise NSKeyedArchiveException("Infinite loop detected while parsing NSKeyedArchive data in '{}'".format(self.fullpath))
else:
parents.add(obj_id)
ret = obj
if isinstance(obj, dict):
ret = self.convert_dict(obj, objects_list, parents)
elif isinstance(obj, list):
ret = [self.process_obj(x, objects_list, parents) for x in obj]
elif isinstance(obj, Uid):
ret = self.process_obj(objects_list[obj.integer], objects_list, parents)
elif isinstance(obj, (bool, bytes, int, float)) or obj is None:
ret = obj
elif isinstance(obj, str):
ret = self.convert_string(obj)
elif isinstance(obj, Data):
ret = bytes(obj)
else:
logger.warning("Unexpected data type '{}' in '{}', please report.", type(obj).__name__, self.fullpath)
parents.remove(obj_id)
return ret
def _process_ns_dictionary(self, _class_name, d, objects_list, parents):
if 'NS.keys' in d and 'NS.objects' in d:
assembled_dict = {}
for idx, k in enumerate(d['NS.keys']):
assembled_dict[self.process_obj(k, objects_list, parents)] = self.process_obj(d['NS.objects'][idx],
objects_list, parents)
return assembled_dict
return d
def _process_ns_url(self, _class_name, d, objects_list, parents):
base = self.process_obj(d.get('NS.base', ''), objects_list, parents)
relative = self.process_obj(d.get('NS.relative', ''), objects_list, parents)
return '/'.join([x for x in [base, relative] if x])
def _process_ns_uuid(self, _class_name, d, _objects_list, _parents):
uuid_bytes = d.get('NS.uuidbytes', '')
if len(uuid_bytes) == 16:
return str(UUID(bytes=uuid_bytes))
return uuid_bytes
def _process_ns_sequence(self, _class_name, d, objects_list, parents):
array_members = d.get('NS.objects')
return [self.process_obj(member, objects_list, parents) for member in array_members]
def _process_ns_data(self, _class_name, d, _objects_list, _parents):
data = d.get('NS.data', None)
if isinstance(data, dict) and self.is_known_nskeyedarchive(data, ''):
return self.parse_archive(data)
return data
def _process_ns_null(self, _class_name, d, _objects_list, _parents):
return None
def _process_ns_string(self, _class_name, d, _objects_list, _parents):
return d.get('NS.string', None)
def _process_ns_attributed_string(self, class_name, d, objects_list, parents):
# Sample:
# {'NSAttributeInfo': Uid(74), '$class': Uid(51), 'NSString': Uid(68), 'NSAttributes': Uid(69)}
# TODO if demand - process NSAttributes, NSAttributeInfo (font, color, style, etc)
return self.process_obj(d.get('NSString'), objects_list, parents)
def _process_ns_range(self, _class_name, d, objects_list, parents):
# length: The number of items in the range (can be 0). LONG_MAX is the maximum value you should use for length.
# location: The start index (0 is the first). LONG_MAX is the maximum value you should use for location.
#
return {
'length': self.process_obj(d.get('NS.rangeval.length'), objects_list, parents),
'location': self.process_obj(d.get('NS.rangeval.location'), objects_list, parents)
}
def _process_ns_value(self, class_name, d, objects_list, parents):
# An NSValue object can hold any of the scalar types such as int, float, and char,
# as well as pointers, structures, and object id references.
#
# NS.special: 1 : NSPoint, 2 : NSSize, 3 : NSRect, 4 : NSRange, 12 : NSEdgeInsets
#
# NSConcreteValue varies based on type, which is typically provided by the @encode compiler directive
# https://developer.apple.com/library/content/documentation/Cocoa/Conceptual/ObjCRuntimeGuide/Articles/ocrtTypeEncodings.html#//apple_ref/doc/uid/TP40008048-CH100
# These types are voluminous, and we need samples to support them.
# https://github.com/apple/swift-corelibs-foundation/blob/master/Foundation/NSSpecialValue.swift
ns_value_special_types = {
# 1: 'NSPoint'
# 2: 'NSSize'
# 3: 'NSRect' https://github.com/apple/swift-corelibs-foundation/blob/master/TestFoundation/Resources/NSKeyedUnarchiver-RectTest.plist
4: NSKeyedArchiveParser._process_ns_range,
# 12: 'NSEdgeInsets' https://github.com/apple/swift-corelibs-foundation/blob/master/TestFoundation/Resources/NSKeyedUnarchiver-EdgeInsetsTest.plist
}
special_type = d.get('NS.special')
if special_type: # NSSpecialValue
if special_type in ns_value_special_types:
return ns_value_special_types[special_type](self, class_name, d, objects_list, parents)
else:
logger.error("Unsupported NSValue special type {} in NSKeyedArchiver data, please report.", special_type)
else: # NSConcreteValue
logger.error("Unsupported NSConcreteValue type in NSKeyedArchiver data, please report.", special_type)
return None
def _process_ns_list_item(self, _class_name, d, objects_list, parents):
# TODO 'properties' is an NSDictionary
return {
'url': self.process_obj(d.get('URL', None), objects_list, parents),
'bookmark': self.process_obj(d.get('bookmark', None), objects_list, parents),
'name': self.process_obj(d.get('name', None), objects_list, parents),
'order': self.process_obj(d.get('order', None), objects_list, parents),
'uuid': self.process_obj(d.get('uniqueIdentifier', None), objects_list, parents)
}
def _process_ns_date(self, _class_name, d, _objects_list, _parents):
return parse_mac_absolute_time(d.get('NS.time'))
def _process_default(self, class_name, d, _objects_list, _parents):
logger.warning(
"Unknown NSKeyedArchiver class name {} with data ({}) in '{}', please report.", class_name, d, self.fullpath)
@classmethod
def get_processors(cls):
return {
'NSArray': cls._process_ns_sequence,
'NSAttributedString': cls._process_ns_attributed_string,
# 'NSCache'
# 'NSColor' simple sample: {'NSColorSpace': 3, 'NSWhite': b'0\x00'},
# 'NSCompoundPredicate'
'NSData': cls._process_ns_data,
'NSDate': cls._process_ns_date,
'NSDictionary': cls._process_ns_dictionary,
# 'NSError'
# 'NSFont' sample: {'NSName': 'Helvetica', 'NSSize': 12.0, 'NSfFlags': 16},
# 'NSGeometry'
# 'NSLocale'
'NSMutableArray': cls._process_ns_sequence,
'NSMutableAttributedString': cls._process_ns_attributed_string,
'NSMutableData': cls._process_ns_data,
'NSMutableDictionary': cls._process_ns_dictionary,
'NSMutableSet': cls._process_ns_sequence,
'NSMutableString': cls._process_ns_string,
# 'NSNotification' https://github.com/apple/swift-corelibs-foundation/blob/master/TestFoundation/Resources/NSKeyedUnarchiver-NotificationTest.plist
'NSNull': cls._process_ns_null,
# 'NSNumber'
# 'NSOrderedSet' https://github.com/apple/swift-corelibs-foundation/blob/master/TestFoundation/Resources/NSKeyedUnarchiver-OrderedSetTest.plist
# 'NSParagraphStyle' sample: {'NSAlignment': 4, 'NSTabStops': '$null'},
# 'NSPredicate'
# 'NSProgressFraction'
# 'NSRange'
# 'NSRegularExpression'
'NSSet': cls._process_ns_sequence,
'NSString': cls._process_ns_string,
'NSURL': cls._process_ns_url,
'NSUUID': cls._process_ns_uuid,
'NSValue': cls._process_ns_value,
'SFLListItem': cls._process_ns_list_item
}
def convert_dict(self, d, objects_list, parents):
if '$class' in d:
try:
class_name = self.process_obj(d['$class'], objects_list, parents).get('$classname')
return self.get_processors().get(class_name, NSKeyedArchiveParser._process_default)(self, class_name, d, objects_list, parents)
except (AttributeError, KeyError, ValueError):
pass
return d
def convert_string(self, obj):
if obj == '$null':
return None
return obj
| 45.72766
| 170
| 0.622557
| 1,215
| 10,746
| 5.283951
| 0.240329
| 0.066822
| 0.095327
| 0.047352
| 0.261682
| 0.223053
| 0.185047
| 0.131464
| 0.0919
| 0.06947
| 0
| 0.006515
| 0.271543
| 10,746
| 234
| 171
| 45.923077
| 0.813618
| 0.210311
| 0
| 0.138365
| 0
| 0
| 0.117067
| 0.002971
| 0
| 0
| 0
| 0.004274
| 0
| 1
| 0.125786
| false
| 0.012579
| 0.025157
| 0.044025
| 0.327044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb87fa54ef182344fda1ae0ba9713c3ff055e11e
| 9,337
|
py
|
Python
|
Tools/Builder/build.py
|
hung0913208/Base
|
420b4ce8e08f9624b4e884039218ffd233b88335
|
[
"BSD-3-Clause"
] | null | null | null |
Tools/Builder/build.py
|
hung0913208/Base
|
420b4ce8e08f9624b4e884039218ffd233b88335
|
[
"BSD-3-Clause"
] | null | null | null |
Tools/Builder/build.py
|
hung0913208/Base
|
420b4ce8e08f9624b4e884039218ffd233b88335
|
[
"BSD-3-Clause"
] | 2
|
2020-11-04T08:00:37.000Z
|
2020-11-06T08:33:33.000Z
|
#!/usr/bin/python3
#
# Project: build
# Description: this is a very simple build tool which imitates from Bazel
#
import subprocess
import argparse
import shutil
import glob
import sys
import os
from core import *
from languages import *
from plugins import *
class Build(Plugin):
def __init__(self, root, rebuild=False, **kwargs):
super(Build, self).__init__()
if root[0] != '/':
root = '%s/%s' % (os.getcwd(), root)
# @NOTE: load optional parameters
self._output = kwargs.get('build') or ('%s/build' % root)
self._root = root
# @NOTE: force Builder to remove and build again
if rebuild is False and os.path.exists(self._output):
shutil.rmtree(self._output)
# @NOTE: load our builder's objection
self._manager = Manager(root, **kwargs)
self._manager.install([
self,
Git(**kwargs),
Http(**kwargs)
])
self._manager.support([
C(**kwargs),
D(**kwargs)
])
def prepare(self):
""" prepare everything before building this repository
"""
workspace = '%s/.workspace' % self._root
try:
if os.path.exists(workspace):
Logger.debug("found .workspace file %s -> going to parse this file now" % workspace)
if self.parse_workspace_file(workspace) is False:
return False
else:
return self._manager.perform(root=self._root, output=self._output)
else:
return False
except Exception as error:
Logger.error('Got an exception: %s -> going to teardown this project' % str(error))
Logger.exception()
self._manager.teardown(self._root)
def derived(self):
""" list derived classes of Build
"""
result = super(Build, self).derived()
if not result is None:
result.append('Build')
return result
def define(self):
pass
@staticmethod
def run(command):
try:
cmd = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
error_console = cmd.stderr.read()
output_console = cmd.stdout.read()
cmd.communicate()
cmd.wait()
return True
except Exception as error:
Logger.error('Error when perform %s: %s' % (command, str(error)))
return False
def analyze(self, path=None):
""" analyze a repository
"""
path = self._root if path is None else path
need_performing = False
try:
for path in glob.glob('%s/*' % path):
if os.path.isdir(path):
exclusive = '%s/.excluse' % path
build = '%s/.build' % path
if os.path.exists(exclusive):
Logger.debug("found .excluse file %s -> going to run it now" % exclusive)
if Build.run(exclusive) is False:
return False
else:
continue
elif os.path.exists(build) and not os.path.exists(exclusive):
Logger.debug("found .build file %s -> going to parse this file now" % build)
if self.parse_build_file(build) is False:
return False
elif self.analyze(path) is False:
return False
elif self.analyze(path) is False:
return False
else:
return True
except Exception as error:
# @NOTE: got an exception teardown now
Logger.error('Got an exception: %s -> going to teardown this project' % str(error))
Logger.exception()
return False
def build(self):
""" build a repository
"""
return self._manager.perform(root=self._root, output=self._output)
def release(self):
self._manager.teardown(self._root, self._output)
def parse_workspace_file(self, workspace_file):
""" parse file .workspace
"""
# @NOTE: we must use dir that contains the 'workspace_file' since .workspace
# usually define its resouce with this dir
self._manager.set_current_dir('workspace', os.path.dirname(workspace_file))
with open(workspace_file) as fp:
source = fp.read()
for item in iter_function(source):
function = self._manager.find_function(item['function'], 'workspace')
variables = {}
if 'variables' in item:
for var in item['variables']:
if isinstance(var, dict):
variables[list(var.keys())[0]] = list(var.values())[0]
if function is None:
raise AssertionError('can\'t determine %s' % item['function'])
else:
function(**variables)
return True
def parse_build_file(self, build_file):
""" parse file .build
"""
# @NOTE: we must use dir that contains the 'build_file' since .build
# usually define its resouce with this dir
self._manager.set_current_dir('build', os.path.dirname(build_file))
with open(build_file) as fp:
source = fp.read()
for item in iter_function(source):
function = self._manager.find_function(item['function'], 'build')
variables = {}
if 'variables' in item:
for var in item['variables']:
if isinstance(var, dict):
variables[list(var.keys())[0]] = list(var.values())[0]
if function is None:
Logger.warning('can\'t determine %s -> ignore it now' % item['function'])
continue
else:
function(**variables)
return True
class Serving(Plugin):
def __init__(self, **kwargs):
super(Serving, self).__init__()
self._error = False
@property
def error(self):
return self._error
def define(self):
pass
def check(self):
pass
def parse():
parser = argparse.ArgumentParser()
parser.add_argument('--rebuild', type=int, default=1,
help='build everything from scratch')
parser.add_argument('--silence', type=int, default=0,
help='make Builder more quieted')
parser.add_argument('--root', type=str, default=os.getcwd(),
help='where project is defined')
parser.add_argument('--debug', type=int, default=0,
help='enable debug info')
parser.add_argument('--stacktrace', type=str, default=None,
help='enable stacktrace info')
parser.add_argument('--use_package_management', type=int, default=1,
help='enable using package management')
parser.add_argument('--auto_update_packages', type=int,
default=0, help='enable auto update packages')
parser.add_argument('--on_serving', type=int, default=0,
help='use Builder on serving mode when they receive '
'tasks from afar')
parser.add_argument('--mode', type=int, default=0,
help='select mode of this process if on_serving is on')
return parser.parse_args()
if __name__ == '__main__':
flags = parse()
if flags.debug != 0 and flags.silence == 0:
# @NOTE: by default we only use showing stacktrace if flag debug is on
Logger.set_level(DEBUG)
if not flags.stacktrace is None:
if flags.stacktrace.lower() == 'debug':
Logger.set_stacktrace(DEBUG)
elif flags.stacktrace.lower() == 'warning':
Logger.set_stacktrace(WARN)
elif flags.stacktrace.lower() == 'error':
Logger.set_stacktrace(FATAL)
Logger.silence(flags.silence == 1)
if flags.on_serving == 0:
builder = Build(flags.root,
auto_update_packages=flags.auto_update_packages==1,
use_package_management=flags.use_package_management==1,
silence=(flags.silence == 1),
rebuild=(flags.rebuild == 1))
code = 255
if builder.prepare() is False:
Logger.debug('prepare fail -> exit with code 255')
elif builder.analyze() is False:
Logger.debug('build fail -> exit with code 255')
elif builder.build() is False:
Logger.debug('build fail -> exit with code 255')
else:
code = 0
builder.release()
sys.exit(code)
else:
recepter = Serving(root=flags.root,
auto_update_packages=flags.auto_update_packages==1,
use_package_management=flags.use_package_management==1,
silence=(flags.silence == 1))
sys.exit(255 if recepter.error is True else 0)
| 34.076642
| 100
| 0.543108
| 1,023
| 9,337
| 4.845552
| 0.210166
| 0.02441
| 0.030865
| 0.018156
| 0.36978
| 0.322776
| 0.287876
| 0.264676
| 0.240871
| 0.240871
| 0
| 0.006794
| 0.353647
| 9,337
| 273
| 101
| 34.201465
| 0.814582
| 0.081825
| 0
| 0.340102
| 0
| 0
| 0.113223
| 0.005397
| 0
| 0
| 0
| 0
| 0.005076
| 1
| 0.076142
| false
| 0.015228
| 0.045685
| 0.005076
| 0.218274
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb8aaea1d863f144dd7a710dd878ed727beb22e5
| 414
|
py
|
Python
|
label.py
|
dotrungkien/face_recognition
|
52c552c4f73850e62db88d0dc7271d73e4150180
|
[
"MIT"
] | null | null | null |
label.py
|
dotrungkien/face_recognition
|
52c552c4f73850e62db88d0dc7271d73e4150180
|
[
"MIT"
] | null | null | null |
label.py
|
dotrungkien/face_recognition
|
52c552c4f73850e62db88d0dc7271d73e4150180
|
[
"MIT"
] | null | null | null |
import cv2
import sys
import numpy as np
from scipy.io import loadmat
def convert():
labels = loadmat('tmp/data/devkit/cars_meta.mat')
car_labels = []
for label in labels['class_names'][0]:
car_labels.append(label[0])
labels_file = open("tmp/data/devkit/car_labels.txt", "w")
labels_file.write("\n".join(car_labels))
labels_file.close()
if __name__ == '__main__':
convert()
| 21.789474
| 61
| 0.673913
| 61
| 414
| 4.295082
| 0.622951
| 0.137405
| 0.099237
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008876
| 0.183575
| 414
| 18
| 62
| 23
| 0.766272
| 0
| 0
| 0
| 0
| 0
| 0.195652
| 0.142512
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.285714
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb8d90bc55457ae6e3a765f4679f3b20738e394c
| 581
|
py
|
Python
|
leetcode/medium/single-number-ii.py
|
rainzhop/cumulus-tank
|
09ebc7858ea53630e30606945adfea856a80faa3
|
[
"MIT"
] | null | null | null |
leetcode/medium/single-number-ii.py
|
rainzhop/cumulus-tank
|
09ebc7858ea53630e30606945adfea856a80faa3
|
[
"MIT"
] | null | null | null |
leetcode/medium/single-number-ii.py
|
rainzhop/cumulus-tank
|
09ebc7858ea53630e30606945adfea856a80faa3
|
[
"MIT"
] | null | null | null |
# https://leetcode.com/problems/single-number-ii/
#
# Given an array of integers, every element appears three times except for one. Find that single one.
#
# Note:
# Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
class Solution(object):
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
d = {}
for i in nums:
d.setdefault(i, 0)
d[i] = d[i] + 1
if d[i] == 3:
d.pop(i)
return d.keys()[0]
| 27.666667
| 108
| 0.562823
| 79
| 581
| 4.139241
| 0.772152
| 0.018349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010152
| 0.321859
| 581
| 20
| 109
| 29.05
| 0.819797
| 0.507745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb8db06160aa8c394dde6ee5900fec9ece4ddde7
| 5,347
|
py
|
Python
|
wiki/test/test_wikisection.py
|
IgalMilman/DnDHelper
|
334822a489e7dc2b5ae17230e5c068b89c6c5d10
|
[
"MIT"
] | null | null | null |
wiki/test/test_wikisection.py
|
IgalMilman/DnDHelper
|
334822a489e7dc2b5ae17230e5c068b89c6c5d10
|
[
"MIT"
] | null | null | null |
wiki/test/test_wikisection.py
|
IgalMilman/DnDHelper
|
334822a489e7dc2b5ae17230e5c068b89c6c5d10
|
[
"MIT"
] | null | null | null |
import os
import uuid
from datetime import datetime
import mock
import pytz
from django.conf import settings
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from utils.widget import quill
from wiki.models import wikipage, wikisection
from wiki.models.wikipage import Keywords, WikiPage
from wiki.models.wikisection import WikiSection
class WikiSectionTestCase(TestCase):
def setUp(self):
self.firstUser = User(is_superuser=True, username='test1', password='test1', email='test1@example.com', first_name='testname1', last_name='testlast2')
self.secondUser = User(is_superuser=False, username='test2', password='test2', email='test2@example.com', first_name='testname2', last_name='testlast2')
self.firstUser.save()
self.secondUser.save()
self.wikiuuid1 = uuid.uuid4()
self.wikiuuid2 = uuid.uuid4()
self.wikisqtext = '{"ops":[{"insert":"123123\\n"}]}'
self.wikistext = 'text'
self.wikisuuid1 = uuid.uuid4()
self.wikisuuid2 = uuid.uuid4()
self.wikisuuid3 = uuid.uuid4()
self.wikipath = 'wiki'
self.createdtime = datetime.now(pytz.utc)
self.wikiPage1 = WikiPage(unid=self.wikiuuid1, createdon=self.createdtime, updatedon=self.createdtime, createdby=self.firstUser, updatedby=self.secondUser, title='testpage1')
self.wikiPage2 = WikiPage(unid=self.wikiuuid2, createdon=self.createdtime, updatedon=self.createdtime, createdby=self.firstUser, updatedby=self.secondUser, title='testpage2')
self.wikiPage1.save()
self.wikiPage2.save()
self.wikisection1 = WikiSection(unid=self.wikisuuid1, createdon=self.createdtime, updatedon=self.createdtime, createdby=self.firstUser, updatedby=self.secondUser, title='testsec1', pageorder=1, text=self.wikisqtext, wikipage=self.wikiPage1)
self.wikisection2 = WikiSection(unid=self.wikisuuid2, createdon=self.createdtime, updatedon=self.createdtime, createdby=None, updatedby=None, title='testsec2', pageorder=2, text=self.wikistext, wikipage=self.wikiPage1)
self.wikisection3 = WikiSection(unid=self.wikisuuid3, createdon=self.createdtime, updatedon=self.createdtime, createdby=self.firstUser, updatedby=self.secondUser, title='testsec3', pageorder=3, text=self.wikistext, wikipage=self.wikiPage1)
self.wikisection1.save()
self.wikisection1.createdon=self.createdtime
self.wikisection1.updatedon=self.createdtime
self.wikisection1.save()
self.wikisection2.save()
self.wikisection3.save()
def test_wiki_section_get_files_folder(self):
settings.WIKI_SECTION_FILES = self.wikipath
os.makedirs = mock.Mock(return_value=None, spec='os.makedirs')
os.path.exists = mock.Mock(return_value=False, spec='os.path.exists')
self.assertEqual(self.wikisection1.get_files_folder(), os.path.join(self.wikipath, str(self.wikisuuid1)))
os.path.exists.assert_called_once_with(os.path.join(self.wikipath, str(self.wikisuuid1)))
os.makedirs.assert_called_once()
def test_wiki_section_generate_link(self):
wikisection.reverse = mock.Mock(return_value=self.wikipath, spec='django.urls.reverse')
self.assertEqual(self.wikisection1.generate_link(), self.wikipath)
wikisection.reverse.assert_called_once_with('wiki_page', kwargs={'wikipageuuid': self.wikiPage1.unid})
def test_wiki_section_get_link(self):
wikisection.reverse = mock.Mock(return_value=self.wikipath, spec='django.urls.reverse')
self.assertEqual(self.wikisection1.get_link(), self.wikipath)
wikisection.reverse.assert_called_once_with('wiki_page', kwargs={'wikipageuuid': self.wikiPage1.unid})
def test_wiki_section_createtime(self):
self.assertEqual(self.wikisection1.createtime(), self.createdtime.astimezone(pytz.timezone('America/New_York')))
def test_wiki_section_updatetime(self):
self.assertEqual(self.wikisection1.updatetime(), self.createdtime.astimezone(pytz.timezone('America/New_York')))
def test_wiki_section_createuser(self):
self.assertEqual(self.wikisection1.createuser(), self.firstUser.get_full_name())
def test_wiki_section_updateuser(self):
self.assertEqual(self.wikisection1.updateuser(), self.secondUser.get_full_name())
def test_wiki_section_createuser_none(self):
self.assertIsNone(self.wikisection2.createuser())
def test_wiki_section_updateuser_none(self):
self.assertIsNone(self.wikisection2.updateuser())
def test_wiki_section_str(self):
self.assertEqual(str(self.wikisection1), 'Wiki section: testsec1. UNID: ' + str(self.wikisuuid1))
def test_wiki_section_is_quil_content_true(self):
self.assertTrue(self.wikisection1.is_quill_content())
def test_wiki_section_is_quil_content_false(self):
self.assertTrue(self.wikisection2.is_quill_content())
def test_wiki_section_get_quill_content(self):
self.assertEqual(self.wikisection1.get_quill_content(), quill.get_quill_text(self.wikisqtext))
def test_wiki_page_get_sections_number_3(self):
self.assertEqual(len(self.wikiPage1.wikisection_set.all()), 3)
def test_wiki_page_get_sections_number_0(self):
self.assertEqual(len(self.wikiPage2.wikisection_set.all()), 0)
| 53.47
| 248
| 0.747522
| 650
| 5,347
| 5.98
| 0.2
| 0.057885
| 0.042449
| 0.060201
| 0.47929
| 0.388732
| 0.36815
| 0.272704
| 0.272704
| 0.251608
| 0
| 0.017312
| 0.135777
| 5,347
| 99
| 249
| 54.010101
| 0.823848
| 0
| 0
| 0.075
| 0
| 0
| 0.0634
| 0.005985
| 0
| 0
| 0
| 0
| 0.2375
| 1
| 0.2
| false
| 0.025
| 0.1625
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb8e03933f18743e4789f0bc3df9d4b4ca88a87c
| 2,205
|
py
|
Python
|
Shivarth_Project(2).py
|
rodincode/python
|
5bcc53b6103e53b37a3e40635502cbca53fec43e
|
[
"MIT"
] | 1
|
2021-02-11T04:42:28.000Z
|
2021-02-11T04:42:28.000Z
|
Shivarth_Project(2).py
|
rodincode/python
|
5bcc53b6103e53b37a3e40635502cbca53fec43e
|
[
"MIT"
] | null | null | null |
Shivarth_Project(2).py
|
rodincode/python
|
5bcc53b6103e53b37a3e40635502cbca53fec43e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 20 13:26:46 2019
@author: LENOVO
"""
import pandas as pd
filename = r"C:\Users\LENOVO\Downloads\Tweets.csv"
df = pd.read_csv(filename,encoding="unicode_escape")
all_data = df.drop_duplicates(keep='first', inplace=False)
cleaned_data = all_data.dropna()
sentences = cleaned_data['text']
y = cleaned_data['airline_sentiment']
numerical_outcomes=y.replace(["positive","negative","neutral"],[1,0,2])
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
eng_stops = set(stopwords.words('english'))
# Create word tokens
def removing_stop_words(sentences):
no_stops=[]
for word in sentences:
if word not in eng_stops:
new_sentences=no_stops.append(word)
new_sentences=removing_stop_words(sentences)
from sklearn.model_selection import train_test_split # imports module from package
x_train, x_test, y_train, y_test = train_test_split(new_sentences, y, test_size=0.25, random_state=1000)
from sklearn.feature_extraction.text import CountVectorizer
#from io import StringIO
vectorizer = CountVectorizer()
vectorizer.fit(x_train)
#docs_new_train = [ StringIO.StringIO(x) for x in x_train]
#docs_new_test = [ StringIO.StringIO(x) for x in x_test]
X_train = vectorizer.transform(x_train)
X_test = vectorizer.transform(x_test)
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression()
classifier.fit(X_train, y_train)
score = classifier.score(X_test, y_test)
print("\n Accuracy:", score) #model accuracy
#
#########################################################
###Predicting the sentiment of user input
#########################################################
txt = input("Enter expression: ")
test_sentences = [txt]
test_bag = vectorizer.transform(test_sentences)
result_label = classifier.predict(test_bag) #predicting the class
result_score = classifier.predict_proba(test_bag) #Probobilities of belonging to both classes
#
if result_label==1:
print("Positive", result_score)
else:
print("Negative", result_score)
| 22.05
| 105
| 0.676644
| 280
| 2,205
| 5.114286
| 0.457143
| 0.02514
| 0.023743
| 0.036313
| 0.03352
| 0.03352
| 0.03352
| 0
| 0
| 0
| 0
| 0.013172
| 0.173696
| 2,205
| 100
| 106
| 22.05
| 0.772777
| 0.168254
| 0
| 0
| 0
| 0
| 0.100562
| 0.022486
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.153846
| 0
| 0.179487
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb8fa677b509d4b926b1c8e7fd1bc0528332c98d
| 909
|
py
|
Python
|
pinax/lms/activities/migrations/0007_migrate.py
|
pinax/pinax-lms-activities
|
e73109038e1e0a8c71cc52f278e03bf645f3a16a
|
[
"MIT"
] | 10
|
2015-03-04T01:37:02.000Z
|
2019-06-04T04:59:44.000Z
|
pinax/lms/activities/migrations/0007_migrate.py
|
pinax/pinax-lms-activities
|
e73109038e1e0a8c71cc52f278e03bf645f3a16a
|
[
"MIT"
] | 8
|
2016-01-16T14:58:16.000Z
|
2020-06-22T20:30:14.000Z
|
pinax/lms/activities/migrations/0007_migrate.py
|
pinax/pinax-lms-activities
|
e73109038e1e0a8c71cc52f278e03bf645f3a16a
|
[
"MIT"
] | 4
|
2015-09-18T02:04:39.000Z
|
2020-10-14T20:10:57.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def forwards(apps, schema_editor):
if not schema_editor.connection.alias == "default":
return
ActivityState = apps.get_model("pinax_lms_activities", "ActivityState")
ActivitySessionState = apps.get_model("pinax_lms_activities", "ActivitySessionState")
for activity_session_state in ActivitySessionState.objects.all():
activity_state = ActivityState.objects.get(
user=activity_session_state.user,
activity_key=activity_session_state.activity_key,
)
activity_session_state.activity_state = activity_state
activity_session_state.save()
class Migration(migrations.Migration):
dependencies = [
("pinax_lms_activities", "0006_auto_20160206_2029"),
]
operations = [
migrations.RunPython(forwards),
]
| 30.3
| 89
| 0.716172
| 94
| 909
| 6.574468
| 0.510638
| 0.121359
| 0.161812
| 0.055016
| 0.210356
| 0.210356
| 0
| 0
| 0
| 0
| 0
| 0.023192
| 0.193619
| 909
| 29
| 90
| 31.344828
| 0.819918
| 0.023102
| 0
| 0
| 0
| 0
| 0.138826
| 0.025959
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb93be6fbacaf91fef33d78741e67ae984dd8a0a
| 6,830
|
py
|
Python
|
pycap/ethernet.py
|
Blueswing/pycap
|
19e579ec0c362939f1c7ebe87773e24e36ccdec2
|
[
"MIT"
] | null | null | null |
pycap/ethernet.py
|
Blueswing/pycap
|
19e579ec0c362939f1c7ebe87773e24e36ccdec2
|
[
"MIT"
] | null | null | null |
pycap/ethernet.py
|
Blueswing/pycap
|
19e579ec0c362939f1c7ebe87773e24e36ccdec2
|
[
"MIT"
] | null | null | null |
import struct
import subprocess
from abc import ABCMeta
from functools import lru_cache
from typing import Union, Tuple, Optional
from .base import Header, Protocol
from .constants import *
ETH_TYPE_IP = 0x0800
ETH_TYPE_ARP = 0x0806
ETH_TYPE_RARP = 0x8035
ETH_TYPE_SNMP = 0x814c
ETH_TYPE_IPV6 = 0x086dd
ETH_TYPE_MPLS_UNICAST = 0x8847
ETH_TYPE_MPLS_MULTICAST = 0x8848
ETH_TYPE_PPPOE_DISCOVERY = 0x8864
ETH_TYPE_PPPOE_SESSION = 0x8864
_ETH_TYPE_MAP = {
ETH_TYPE_IP: PROTOCOL_IP,
ETH_TYPE_ARP: PROTOCOL_ARP,
ETH_TYPE_RARP: PROTOCOL_RARP,
ETH_TYPE_SNMP: PROTOCOL_SNMP,
ETH_TYPE_IPV6: PROTOCOL_IPV6,
ETH_TYPE_MPLS_UNICAST: PROTOCOL_MPLS,
ETH_TYPE_MPLS_MULTICAST: PROTOCOL_MPLS,
ETH_TYPE_PPPOE_DISCOVERY: PROTOCOL_PPPOE,
ETH_TYPE_PPPOE_SESSION: PROTOCOL_PPPOE
}
ETH_P_ALL = 0x3 # capture all ethernet types
ETH_P_NOT_SET = 0x0 # only receive
_ETH_II_FMT = '>BBBBBBBBBBBBH'
_ETH_802_3_FMT = '>BBBBBBBBBBBBHL'
"""
This packet structure describes the pseudo-header added by Linux system.
+---------------------------+
| Packet type |
| (2 Octets) |
+---------------------------+
| ARPHRD_ type |
| (2 Octets) |
+---------------------------+
| Link-layer address length |
| (2 Octets) |
+---------------------------+
| Link-layer address |
| (8 Octets) |
+---------------------------+
| Protocol type |
| (2 Octets) |
+---------------------------+
The packet type field is in network byte order (big-endian); it contains a value that is one of:
0, if the packet was specifically sent to us by somebody else;
1, if the packet was broadcast by somebody else;
2, if the packet was multicast, but not broadcast, by somebody else;
3, if the packet was sent to somebody else by somebody else;
4, if the packet was sent by us.
reference:
https://www.tcpdump.org/linktypes/LINKTYPE_LINUX_SLL.html
"""
_LINK_LAYER_PACKET_TYPE_MAP = {
0x0: 'unicast to us',
0x1: 'boardcast to us',
0x2: 'multicast to us',
0x3: 'not sent to us',
0x4: 'sent by us'
}
_interfaces = None
def get_interface_names():
global _interfaces
if _interfaces is None:
import os
_interfaces = os.listdir('/sys/class/net')
return _interfaces
class MACAddress:
def __init__(self, mac: Union[int, bytes, str]):
if isinstance(mac, str):
self._mac_s = mac
tmp = mac.split(':')
if len(tmp) != 6:
raise Exception('invalid mac address')
mac_i = 0
for x in tmp:
mac_i <<= 8
mac_i += int(x, 16)
self._mac_i = mac_i
self._mac_b = self._mac_i.to_bytes(6, BYTE_ORDER_NET)
elif isinstance(mac, bytes):
self._mac_b = mac[:6]
self._mac_i = int.from_bytes(self._mac_b, BYTE_ORDER_NET)
self._mac_s = ':'.join('{:02x}'.format(a) for a in self._mac_b)
else:
self._mac_i = mac
self._mac_b = mac.to_bytes(6, BYTE_ORDER_NET)
self._mac_s = ':'.join('{:02x}'.format(a) for a in self._mac_b)
def as_int(self):
return self._mac_i
def as_bytes(self):
return self._mac_b
def as_str(self):
return self._mac_s
def __str__(self):
return f'MACAddress(\'{self._mac_s}\')'
def __repr__(self):
return self.__str__()
@lru_cache(10)
def get_mac_address(interface_name) -> MACAddress:
res = subprocess.getoutput(f'cat /sys/class/net/{interface_name}/address')
if len(res.split(':')) != 6:
raise Exception('MAC address not found')
return MACAddress(res)
def describe_eth_type(eth_type: int):
if eth_type in _ETH_TYPE_MAP:
return _ETH_TYPE_MAP[eth_type]
return f'Unknown {eth_type}'
def describe_packet_type(packet_type: int):
if packet_type in _LINK_LAYER_PACKET_TYPE_MAP:
return _LINK_LAYER_PACKET_TYPE_MAP[packet_type]
return f'Unknown {packet_type}'
class EthernetPacketInfo(Header):
def __init__(self):
self.net_if = ''
self.protocol = 0
self.src_mac = 0
self.packet_type = 0
self.address_type = 0
def describe(self) -> dict:
return {
'network_interface': self.net_if,
'protocol': describe_eth_type(self.protocol),
'src_mac': MACAddress(self.src_mac),
'packet_type': describe_packet_type(self.packet_type),
'address_type': self.address_type
}
def parse_ethernet_packet_info(raw_data):
net_if, proto, packet_type, address_type, mac = raw_data
obj = EthernetPacketInfo()
obj.net_if = net_if
obj.protocol = proto
obj.src_mac = int.from_bytes(mac, BYTE_ORDER_NET)
obj.packet_type = packet_type
obj.address_type = address_type
return obj
class EthernetHeader(Header, metaclass=ABCMeta):
def __init__(self, dst_mac, src_mac):
self.dst_mac = dst_mac
self.src_mac = src_mac
class EthernetIIHeader(EthernetHeader):
def __init__(self, dst_mac, src_mac):
super().__init__(dst_mac, src_mac)
self.eth_type = 0
@property
def upper_layer_protocol(self) -> Optional[str]:
return describe_eth_type(self.eth_type)
def describe(self) -> dict:
return {
'src_mac': MACAddress(self.src_mac),
'dst_mac': MACAddress(self.dst_mac),
'eth_type': describe_eth_type(self.eth_type)
}
class Ethernet802_3Header(EthernetHeader):
def __init__(self, dst_mac, src_mac):
super().__init__(dst_mac, src_mac)
self.length = 0
self.llc = 0
self.snap = 0
def describe(self) -> dict:
return {}
class Ethernet(Protocol):
def unpack_data(self, data: bytes) -> Tuple[Union[EthernetIIHeader, Ethernet802_3Header], bytes]:
"""
Ethernet II header, RFC 894
6 bytes destination MAC address
6 bytes source MAC address
2 bytes Ethernet type
46 ~ 1500 bytes payload
Ethernet 802.3 header, RFC 1042, IEEE 802
6 bytes destination MAC address
6 bytes source MAC address
2 bytes length
3 bytes LLC
5 bytes SNAP
38 ~ 1492 bytes payload
"""
header, payload = data[:14], data[14:]
res = struct.unpack(_ETH_II_FMT, header)
dst_mac = int.from_bytes(res[:6], BYTE_ORDER_NET)
src_mac = int.from_bytes(res[6:12], BYTE_ORDER_NET)
if res[12] > 1500:
hdr = EthernetIIHeader(dst_mac, src_mac)
hdr.eth_type = res[12]
else:
hdr = Ethernet802_3Header(dst_mac, src_mac)
# todo
return hdr, payload
| 28.22314
| 101
| 0.615959
| 910
| 6,830
| 4.305495
| 0.220879
| 0.060745
| 0.018377
| 0.02144
| 0.210567
| 0.146759
| 0.090097
| 0.084227
| 0.084227
| 0.084227
| 0
| 0.031902
| 0.270278
| 6,830
| 241
| 102
| 28.340249
| 0.754213
| 0.056369
| 0
| 0.10596
| 0
| 0
| 0.063677
| 0.007347
| 0
| 0
| 0.014318
| 0.004149
| 0
| 1
| 0.13245
| false
| 0
| 0.05298
| 0.059603
| 0.337748
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb965bb20b20b11f20bb0fdf749f18c3050f9707
| 893
|
py
|
Python
|
Intermediate+/57/notes/server.py
|
Matthew1906/100DaysOfPython
|
94ffff8f5535ce5d574f49c0d7971d64a4575aad
|
[
"MIT"
] | 1
|
2021-12-25T02:19:18.000Z
|
2021-12-25T02:19:18.000Z
|
Intermediate+/57/notes/server.py
|
Matthew1906/100DaysOfPython
|
94ffff8f5535ce5d574f49c0d7971d64a4575aad
|
[
"MIT"
] | null | null | null |
Intermediate+/57/notes/server.py
|
Matthew1906/100DaysOfPython
|
94ffff8f5535ce5d574f49c0d7971d64a4575aad
|
[
"MIT"
] | 1
|
2021-11-25T10:31:47.000Z
|
2021-11-25T10:31:47.000Z
|
from flask import Flask, render_template
import random, datetime as dt, requests
app = Flask(__name__)
# Jinja = templating language
@app.route('/')
def home():
random_number = random.randint(1,3)
return render_template("index.html", random_number = random_number, year = dt.datetime.now().year)
@app.route('/guess/<name>')
def guess(name):
gender_response = requests.get('https://api.genderize.io', params = {'name':name}).json()['gender']
age_response = requests.get('https://api.agify.io', params = {'name':name}).json()['age']
return render_template("guess.html", name = name ,gender = gender_response, age = age_response)
@app.route('/blog/<num>')
def blog(num):
blogs = requests.get('https://api.npoint.io/7bce33b15a477a7a6c81').json()
return render_template("blog.html", blogs = blogs, idx = int(num))
if __name__ == '__main__':
app.run(debug=True)
| 37.208333
| 103
| 0.693169
| 121
| 893
| 4.92562
| 0.421488
| 0.09396
| 0.100671
| 0.095638
| 0.157718
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017995
| 0.128779
| 893
| 24
| 104
| 37.208333
| 0.748072
| 0.030235
| 0
| 0
| 0
| 0
| 0.190751
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.111111
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb9676589fb7e0a374aed04eb4cfbe0922559c82
| 3,041
|
py
|
Python
|
game.py
|
MrEliptik/game_of_life
|
e0ff937ac1cf1a879e20c109a69700c77db71fcc
|
[
"MIT"
] | null | null | null |
game.py
|
MrEliptik/game_of_life
|
e0ff937ac1cf1a879e20c109a69700c77db71fcc
|
[
"MIT"
] | null | null | null |
game.py
|
MrEliptik/game_of_life
|
e0ff937ac1cf1a879e20c109a69700c77db71fcc
|
[
"MIT"
] | null | null | null |
import pygame
import random
import time
import numpy as np
WHITE = 255, 255, 255
BLACK = 0, 0, 0
size = width, height = 480, 320
row = 32
col = 48
cell_width = (width//col)
cell_height = (height//row)
font_size = 60
FPS = 30
LIVE_P_MAX = 0.5;
LIVE_P_MIN = 0.01;
_grid = np.full((row, col), None)
screen = None
refresh_start_time = 0
def init_screen():
pygame.init()
screen = pygame.display.set_mode(size, pygame.FULLSCREEN)
screen.fill(BLACK)
return screen
def refresh():
pygame.display.update()
def display(grid):
screen.fill(BLACK)
for i in range(row):
for j in range(col):
if grid[i][j] == 1:
# left, top, width, height
pygame.draw.rect(screen, WHITE, (j*cell_width, i*cell_height, cell_width, cell_height), False)
refresh()
def random_init_grid(grid):
for i in range(row):
for j in range(col):
p = random.random() * (LIVE_P_MAX - LIVE_P_MIN) + LIVE_P_MIN
if(random.random() < p): grid[i][j] = 1
else: grid[i][j] = None
def get_cell(grid, cell):
val = None
try:
val = grid[cell[0]][cell[1]]
except:
val = None
return val
def get_neighbors(grid, cell):
x, y = cell
return (get_cell(grid, (x, y-1)), get_cell(grid, (x-1, y-1)),
get_cell(grid, (x-1, y)), get_cell(grid, (x-1, y+1)),
get_cell(grid, (x, y+1)), get_cell(grid, (x+1, y+1)),
get_cell(grid, (x+1, y)), get_cell(grid, (x+1, y-1)))
def get_living_neighbors(neighbors):
living_count = 0
for neighbor in neighbors:
if neighbor == 1: living_count += 1
return living_count
def update_grid(grid):
new_grid = np.full((row, col), None)
for i in range(row):
for j in range(col):
neighbors = get_neighbors(grid, (i,j))
living = get_living_neighbors(neighbors)
# Any live cell with three live neighbors survivesl
if ((living == 2 or living == 3) and grid[i][j] == 1): new_grid[i][j] = 1
# Any dead cell with three live neighbors becomes a live cell
if (living == 3 and grid[i][j] == None): new_grid[i][j] = 1
# All others cells are dead (tmp is initialized at 0)
return new_grid
if __name__ == "__main__":
refresh_start_time = time.time()
running = True
inpt = "y"
screen = init_screen()
random_init_grid(_grid)
display(_grid)
while(running):
start = time.time()
if ((time.time() - refresh_start_time) > 60):
random_init_grid(_grid)
display(_grid)
refresh_start_time = time.time()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONUP:
random_init_grid(_grid)
display(_grid)
# Copy new grid
_grid[:] = update_grid(_grid)
display(_grid)
while(time.time() - start < (1/FPS)):
pass
| 25.771186
| 110
| 0.577442
| 445
| 3,041
| 3.770787
| 0.229213
| 0.037545
| 0.058999
| 0.057211
| 0.302741
| 0.213945
| 0.11919
| 0.11919
| 0.11919
| 0.11919
| 0
| 0.028385
| 0.293325
| 3,041
| 117
| 111
| 25.991453
| 0.752443
| 0.065768
| 0
| 0.215909
| 0
| 0
| 0.003175
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0.011364
| 0.045455
| 0
| 0.193182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb9746ac2f24608c4e049924c7fcb26f2cfddb65
| 794
|
py
|
Python
|
azext_iot/monitor/models/target.py
|
lucadruda/azure-iot-cli-extension
|
9d2f677d19580f8fbac860e079550167e743a237
|
[
"MIT"
] | 79
|
2017-09-25T19:29:17.000Z
|
2022-03-30T20:55:57.000Z
|
azext_iot/monitor/models/target.py
|
lucadruda/azure-iot-cli-extension
|
9d2f677d19580f8fbac860e079550167e743a237
|
[
"MIT"
] | 305
|
2018-01-17T01:12:10.000Z
|
2022-03-23T22:38:11.000Z
|
azext_iot/monitor/models/target.py
|
lucadruda/azure-iot-cli-extension
|
9d2f677d19580f8fbac860e079550167e743a237
|
[
"MIT"
] | 69
|
2017-11-14T00:30:46.000Z
|
2022-03-01T17:11:45.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
class Target:
def __init__(
self,
hostname: str,
path: str,
partitions: list,
auth, # : uamqp.authentication.SASTokenAsync,
):
self.hostname = hostname
self.path = path
self.auth = auth
self.partitions = partitions
self.consumer_group = None
def add_consumer_group(self, consumer_group: str):
self.consumer_group = consumer_group
| 33.083333
| 94
| 0.492443
| 69
| 794
| 5.521739
| 0.57971
| 0.170604
| 0.133858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001605
| 0.215365
| 794
| 23
| 95
| 34.521739
| 0.609952
| 0.487406
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb9985f1334655b6b9bebcdea894cb35e74ef811
| 8,551
|
py
|
Python
|
py2dm/_parser/_pyparser.py
|
leonhard-s/Py2DM
|
a2c4c193dfa4494f2c9117f580f99f0dbdc579fc
|
[
"MIT"
] | 6
|
2021-01-28T10:59:21.000Z
|
2022-03-30T08:00:06.000Z
|
py2dm/_parser/_pyparser.py
|
leonhard-s/Py2DM
|
a2c4c193dfa4494f2c9117f580f99f0dbdc579fc
|
[
"MIT"
] | 7
|
2020-10-28T13:01:13.000Z
|
2022-03-08T19:21:05.000Z
|
py2dm/_parser/_pyparser.py
|
leonhard-s/Py2DM
|
a2c4c193dfa4494f2c9117f580f99f0dbdc579fc
|
[
"MIT"
] | null | null | null |
"""Python implementation of the 2DM card parser."""
from typing import IO, List, Optional, Tuple, Union
from ..errors import CardError, FormatError, ReadError
_MetadataArgs = Tuple[
int, # num_nodes
int, # num_elements
int, # num_node_strings
Optional[str], # name
Optional[int], # num_materials_per_elem
int, # nodes start
int, # elements start
int] # node strings start
_ELEMENT_CARDS = [
'E2L',
'E3L',
'E3T',
'E4Q',
'E6T',
'E8Q',
'E9Q'
]
def parse_element(line: str, allow_float_matid: bool = True,
allow_zero_index: bool = False
) -> Tuple[int, Tuple[int, ...], Tuple[Union[int, float], ...]]:
"""Parse a string into an element.
This converts a valid element definition string into a tuple that
can be used to instantiate the corresponding
:class:`py2dm.Element` subclass.
"""
# Parse line
chunks = line.split('#', maxsplit=1)[0].split()
# Length (generic)
if len(chunks) < 4:
raise CardError('Element definitions require at least 3 fields '
f'(id, node_1, node_2), got {len(chunks)-1}')
# 2DM card
card = chunks[0]
if not _card_is_element(card):
raise CardError(f'Invalid element card "{card}"')
# Length (card known)
num_nodes = _nodes_per_element(card)
assert num_nodes > 0
if len(chunks) < num_nodes + 2:
raise CardError(
f'{card} element definition requires at least {num_nodes-1} '
f'fields (id, node_1, ..., node_{num_nodes-1}), got {len(chunks)-1}')
# Element ID
id_ = int(chunks[1])
if id_ <= 0 and not (id_ == 0 and allow_zero_index):
raise FormatError(f'Invalid element ID: {id_}')
# Node IDs
nodes: List[int] = []
for node_str in chunks[2:num_nodes+2]:
node_id = int(node_str)
if node_id < 0 and not (node_id == 0 and allow_zero_index):
raise FormatError(f'Invalid node ID: {node_id}')
nodes.append(node_id)
# Material IDs
materials: List[Union[int, float]] = []
for mat_str in chunks[num_nodes+2:]:
mat_id: Union[int, float]
try:
mat_id = int(mat_str)
except ValueError as err:
if not allow_float_matid:
raise err from err
mat_id = float(mat_str)
materials.append(mat_id)
return id_, tuple(nodes), tuple(materials)
def parse_node(line: str, allow_zero_index: bool = False
) -> Tuple[int, float, float, float]:
"""Parse a string into a node.
This converts a valid node definition string into a tuple that can
be used to isntantiate the corresponding :class:`py2dm.Node`
object.
"""
# Parse line
chunks = line.split('#', maxsplit=1)[0].split()
# Length
if len(chunks) < 5:
raise CardError(f'Node definitions require at least 4 fields '
f'(id, x, y, z), got {len(chunks)-1}')
# 2DM card
card = chunks[0]
if card != "ND":
raise CardError(f'Invalid node card "{card}"')
# Node ID
id_ = int(chunks[1])
if id_ <= 0 and not (id_ == 0 and allow_zero_index):
raise FormatError(f'Invalid node ID: {id_}')
# Coordinates
pos_x, pos_y, pos_z = tuple((float(s) for s in chunks[2:5]))
# TODO: Warn about unused fields
return id_, pos_x, pos_y, pos_z
def parse_node_string(line: str, allow_zero_index: bool = False,
nodes: Optional[List[int]] = None
) -> Tuple[List[int], bool, str]:
"""Parse a string into a node string.
This converts a valid node string definition string into a tuple
that can be used to instantiate the corresponding
:class:`py2dm.NodeString`.
As nodestring can span multiple lines, the node string should only
be created once the `done` flag (second entry in the returned
tuple) is set to True.
"""
# Set default value
if nodes is None:
nodes = []
# Parse line
chunks = line.split('#', maxsplit=1)[0].split()
# Length
if len(chunks) < 2:
raise CardError('Node string definitions require at least 1 field '
f'(node_id), got {len(chunks)-1}')
# 2DM card
card = chunks[0]
if card != 'NS':
raise CardError(f'Invalid node string card "{card}"')
# Node IDs
is_done: bool = False
name = ''
for index, node_str in enumerate(chunks[1:]):
node_id = int(node_str)
if node_id == 0 and not allow_zero_index:
raise FormatError(f'Invalid node ID: {node_id}')
if node_id < 0:
# End of node string
is_done = True
nodes.append(abs(node_id))
# Check final identifier
if index+2 < len(chunks):
name = chunks[index+2]
break
nodes.append(node_id)
return nodes, is_done, name
def scan_metadata(file_: IO[str], filename: str,
allow_zero_index: bool = False) -> _MetadataArgs:
num_materials_per_elem: Optional[int] = None
name: Optional[str] = None
num_nodes = 0
num_elements = 0
num_node_strings = 0
mesh2d_found: bool = False
# Consecutive numbering validation
last_node = -1
last_element = -1
# File seek offsets
nodes_start = 0
elements_start = 0
node_strings_start = 0
file_.seek(0)
for index, line_raw in enumerate(iter(file_.readline, '')):
# Skip blank lines
line = line_raw.split('#', maxsplit=1)[0].strip()
if not line:
continue
if not mesh2d_found:
if line.startswith('MESH2D'):
mesh2d_found = True
else:
raise ReadError(
'File is not a 2DM mesh file', filename)
if line.startswith('ND'):
id_ = int(line.split(maxsplit=2)[1])
if id_ == 0 and not allow_zero_index:
raise FormatError(
'Zero index encountered in non-zero-indexed file',
filename, index+1)
num_nodes += 1
if last_node != -1 and last_node+1 != id_:
raise FormatError('Node IDs have holes',
filename, index+1)
last_node = id_
if nodes_start == 0:
nodes_start = file_.tell() - len(line_raw) - 1
continue
if line.split(maxsplit=1)[0] in _ELEMENT_CARDS:
id_ = int(line.split(maxsplit=2)[1])
if id_ == 0 and not allow_zero_index:
raise FormatError(
'Zero index encountered in non-zero-indexed file',
filename, index+1)
num_elements += 1
if last_element != -1 and last_element+1 != id_:
raise FormatError('Element IDs have holes',
filename, index+1)
last_element = id_
if elements_start == 0:
elements_start = file_.tell() - len(line_raw) - 1
continue
if (line.startswith('NS')
and '-' in line.split('#', maxsplit=1)[0]):
num_node_strings += 1
if node_strings_start == 0:
node_strings_start = file_.tell() - len(line_raw) - 1
elif line.startswith('MESHNAME') or line.startswith('GM'):
# NOTE: This fails for meshes with double quotes in their
# mesh name, but that is an unreasonable thing to want to
# do anyway. "We'll fix it later" (tm)
chunks = line.split('"', maxsplit=2)
if len(chunks) < 2:
chunks = line.split(maxsplit=2)
name = chunks[1]
elif line.startswith('NUM_MATERIALS_PER_ELEM'):
num_materials_per_elem = int(line.split(maxsplit=2)[1])
if not mesh2d_found:
raise ReadError('MESH2D tag not found', filename)
return (num_nodes, num_elements, num_node_strings, name,
num_materials_per_elem, nodes_start, elements_start,
node_strings_start)
def _card_is_element(card: str) -> bool:
return card in ('E2L', 'E3L', 'E3T', 'E4Q', 'E6T', 'E8Q', 'E9Q')
def _nodes_per_element(card: str) -> int:
if card == 'E2L':
return 2
if card in ('E3L', 'E3T'):
return 3
if card == 'E4Q':
return 4
if card == 'E6T':
return 6
if card == 'E8Q':
return 8
if card == 'E9Q':
return 9
return -1
| 34.479839
| 82
| 0.571161
| 1,123
| 8,551
| 4.190561
| 0.182547
| 0.021674
| 0.029749
| 0.019125
| 0.366341
| 0.305992
| 0.286443
| 0.242456
| 0.232257
| 0.232257
| 0
| 0.022418
| 0.321834
| 8,551
| 247
| 83
| 34.619433
| 0.789102
| 0.153666
| 0
| 0.220994
| 0
| 0.005525
| 0.119522
| 0.00309
| 0
| 0
| 0
| 0.004049
| 0.005525
| 1
| 0.033149
| false
| 0
| 0.01105
| 0.005525
| 0.110497
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bb9b59ff879eaecfcc8190f0acec7f2068109681
| 1,024
|
py
|
Python
|
src/commands/refactor/refactor_preview.py
|
PranjalPansuriya/JavaScriptEnhancements
|
14af4162e86585153cbd4614ad96dff64a0d3192
|
[
"MIT"
] | 690
|
2017-04-11T06:45:01.000Z
|
2022-03-21T23:20:29.000Z
|
src/commands/refactor/refactor_preview.py
|
PranjalPansuriya/JavaScriptEnhancements
|
14af4162e86585153cbd4614ad96dff64a0d3192
|
[
"MIT"
] | 74
|
2017-11-22T18:05:26.000Z
|
2021-05-05T16:25:31.000Z
|
src/commands/refactor/refactor_preview.py
|
PranjalPansuriya/JavaScriptEnhancements
|
14af4162e86585153cbd4614ad96dff64a0d3192
|
[
"MIT"
] | 42
|
2017-04-13T10:22:40.000Z
|
2021-05-27T19:19:04.000Z
|
import sublime, sublime_plugin
from ...libs import util
class RefactorPreview():
view = None
title = None
window = None
def __init__(self, title):
self.title = title
self.window = sublime.active_window()
self.view = None
for v in self.window.views():
if v.name() == self.title:
self.view = v
self.view.run_command("javascript_enhancements_erase_text_view")
self.window.focus_view(self.view)
break
if not self.view:
self.window.focus_group(1)
self.view = self.window.new_file()
self.view.set_name(self.title)
self.view.set_syntax_file('Packages/Default/Find Results.hidden-tmLanguage')
self.view.set_scratch(True)
def append_text(self, text):
if self.view:
self.view.run_command("javascript_enhancements_append_text_view", args={"text": text})
@staticmethod
def close(title):
window = sublime.active_window()
for v in window.views():
if v.name() == title:
v.close()
break
| 27.675676
| 92
| 0.657227
| 138
| 1,024
| 4.695652
| 0.355072
| 0.135802
| 0.060185
| 0.07716
| 0.237654
| 0.123457
| 0
| 0
| 0
| 0
| 0
| 0.001263
| 0.226563
| 1,024
| 37
| 93
| 27.675676
| 0.816919
| 0
| 0
| 0.0625
| 0
| 0
| 0.126829
| 0.121951
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09375
| false
| 0
| 0.0625
| 0
| 0.28125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bba24784bd9ee9a55803728f5cef4460717a8929
| 7,228
|
py
|
Python
|
tests/env/experiments_tools_2.py
|
weifanjiang/CSSPy
|
361d18d7b9c08bcff11a18524a718b3522c48786
|
[
"MIT"
] | 3
|
2018-10-04T14:00:50.000Z
|
2021-12-11T08:57:26.000Z
|
tests/env/experiments_tools_2.py
|
weifanjiang/CSSPy
|
361d18d7b9c08bcff11a18524a718b3522c48786
|
[
"MIT"
] | null | null | null |
tests/env/experiments_tools_2.py
|
weifanjiang/CSSPy
|
361d18d7b9c08bcff11a18524a718b3522c48786
|
[
"MIT"
] | null | null | null |
import sys
sys.path.insert(0, '..')
import numpy as np
import pandas as pd
from itertools import combinations
from scipy.stats import binom
import scipy.special
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from IPython.display import display, HTML
#sys.path.append("../")
from FrameBuilder.eigenstepsbuilder import *
from decimal import *
from copy import deepcopy
import matplotlib.lines as mlines
import matplotlib.transforms as mtransforms
from env.numerical_analysis_dpp import *
def plot_results_of_multi_experiments(N,real_dim,r,T_,k_,mean,cov_,static_list_):
print(np.diag(cov_))
lv_scores_vector = k_/real_dim*np.ones(real_dim) # The vector of leverage scores (the last one)
T = deepcopy(T_) # The number of experiments
versions_number = 1
k = deepcopy(k_)
cov_1 = deepcopy(cov_)
volume_sampling_fro_list = []
projection_dpp_fro_list = []
p_eff_list = []
cardinal_list = []
avoiding_proba_list = []
static_list = deepcopy(static_list_)
volume_sampling_fro_list = []
projection_dpp_fro_list = []
#derandomized_projection_dpp_fro_list = []
greedy_selection_fro_list = []
effective_kernel_fro_list = []
p_eff_list = []
cardinal_list = []
for t in range(T):
print("t")
print(t)
#print("real_dim")
#print(real_dim)
random_cardinal_list = list(np.random.choice(static_list, 1))
NAL_1 = Numrerical_Analysis_DPP(N,real_dim,r,k,versions_number,mean,cov_1,lv_scores_vector,random_cardinal_list)
projection_DPP_res_fro_1 = NAL_1.get_expected_error_fro_for_projection_DPP()
volume_sampling_res_fro_1 = NAL_1.get_expected_error_fro_for_volume_sampling()
#derandomized_DPP_res_fro_1 = NAL_1.get_error_fro_for_derandomized_projection_DPP_selection()
greedy_selection_res_fro_1 = NAL_1.get_error_fro_for_deterministic_selection()
effective_kernel_sampling_res_fro_1 = NAL_1.get_expected_error_fro_for_effective_kernel_sampling()
# upper_tight_bound_projection_DPP_res_fro_1 = NAL_1.get_tight_upper_bound_error_fro_for_projection_DPP()
# alpha_sum_res_1 = NAL_1.get_alpha_sum_k_leverage_scores(1)
# sum_U_res_1 = NAL_1.get_sum_k_leverage_scores()
p_eff_res_1 = NAL_1.get_p_eff_leverage_scores()
avoiding_proba_res_1 = NAL_1.get_avoiding_probability()
avoiding_proba_list.append(avoiding_proba_res_1)
greedy_selection_fro_list.append(greedy_selection_res_fro_1)
#derandomized_projection_dpp_fro_list.append(derandomized_DPP_res_fro_1)
effective_kernel_fro_list.append(list(effective_kernel_sampling_res_fro_1))
volume_sampling_fro_list.append(list(volume_sampling_res_fro_1))
projection_dpp_fro_list.append(list(projection_DPP_res_fro_1))
p_eff_list.append(list(p_eff_res_1))
cardinal_list.append(random_cardinal_list)
print("next")
flattened_cardinal_list= [item for items in cardinal_list for item in items]
flattened_p_eff_list= [item for items in p_eff_list for item in items]
theoretical_projection_DPP_error_bound_list = from_p_eff_to_error_bound(flattened_cardinal_list,k,real_dim)
plt.scatter(cardinal_list,projection_dpp_fro_list,label="Projection DPP Sampling",marker='_')
plt.scatter(cardinal_list,volume_sampling_fro_list,label="Volume Sampling",marker='_')
#plt.scatter(cardinal_list,derandomized_projection_dpp_fro_list,label="derandomized projection dpp", marker='_')
plt.scatter(cardinal_list,greedy_selection_fro_list,label = "greedy", marker='_',color = 'purple')
plt.scatter(cardinal_list,theoretical_projection_DPP_error_bound_list,color='red',marker='_',label="Theoretical bound for Projection DPP Sampling")
plt.xlabel(r'$p$', fontsize=12)
plt.ylabel(r'$\mathbb{E}_{S \sim \mathcal{P}}(\|X- \pi_{S}(X)\|_{Fr}^{2})$', fontsize=12)
plt.title('The case k = '+str(k)+', '+str(T)+' matrices, flat spectrum after k+1')
#plt.xticks(map(int, Y_cov[:-1]))
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
plt.show()
theoretical_effective_kernel_error_bound_list = from_p_eff_to_error_bound_2(flattened_p_eff_list,k,real_dim)
#theoretical_effective_kernel_error_bound_list = from_p_eff_to_error_bound(flattened_p_eff_list,k,real_dim)
plt.scatter(p_eff_list,effective_kernel_fro_list,label="Effective Kernel Sampling",marker='_')
plt.scatter(p_eff_list,volume_sampling_fro_list,label="Volume Sampling",marker='_')
#plt.scatter(p_eff_list,derandomized_projection_dpp_fro_list,label="derandomized projection dpp", marker='_')
plt.scatter(p_eff_list,theoretical_effective_kernel_error_bound_list,color='red',marker='_',label="Theoretical bound for Effective Kernel Sampling")
plt.scatter(p_eff_list,greedy_selection_fro_list,label = "greedy", marker='_',color = 'purple')
plt.xlabel(r'$p_{eff}(\frac{1}{2})$', fontsize=12)
plt.ylabel(r'$\mathrm{\mathbb{E}_{S \sim \mathcal{P}}(\|X- \pi_{S}(X)\|_{Fr}^{2})$', fontsize=12)
plt.title('The case k = '+str(k)+', '+str(T)+' matrices, flat spectrum after k+1')
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
plt.show()
plt.scatter(cardinal_list,projection_dpp_fro_list,label="Projection DPP Sampling",marker='_')
plt.scatter(cardinal_list,volume_sampling_fro_list,label="Volume Sampling",marker='_')
#plt.scatter(cardinal_list,derandomized_projection_dpp_fro_list,label="derandomized projection dpp", marker='_')
plt.scatter(cardinal_list,theoretical_projection_DPP_error_bound_list,color='red',marker='_',label="Theoretical bound for Projection DPP Sampling")
plt.xlabel(r'$p$', fontsize=12)
plt.ylabel(r'$\mathbb{E}_{S \sim \mathcal{P}}(\|X- \pi_{S}(X)\|_{Fr}^{2})$', fontsize=12)
plt.title('The case k = '+str(k)+', '+str(T)+' matrices, flat spectrum after k+1')
#plt.xticks(map(int, Y_cov[:-1]))
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
plt.show()
plt.scatter(p_eff_list,effective_kernel_fro_list,label="Effective Kernel Sampling",marker='_')
plt.scatter(p_eff_list,volume_sampling_fro_list,label="Volume Sampling",marker='_')
#plt.scatter(p_eff_list,derandomized_projection_dpp_fro_list,label="derandomized projection dpp", marker='_')
plt.scatter(p_eff_list,theoretical_effective_kernel_error_bound_list,color='red',marker='_',label="Theoretical bound for Effective Kernel Sampling")
plt.xlabel(r'$p_{eff}(\frac{1}{2})$', fontsize=12)
plt.ylabel(r'$\mathbb{E}_{S \sim \mathcal{P}}(\|X- \pi_{S}(X)\|_{Fr}^{2})$', fontsize=12)
plt.title('The case k = '+str(k)+', '+str(T)+' matrices, flat spectrum after k+1')
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
plt.show()
plt.scatter(p_eff_list,avoiding_proba_list,label="Avoiding Probability")
plt.xlabel(r'$p_{eff}(\frac{1}{2})$', fontsize=12)
plt.ylabel(r'$\mathbb{P}(S\cap T_{eff} = \emptyset)$', fontsize=12)
plt.title('The case k = '+str(k)+', '+str(T)+' matrices, flat spectrum after k+1')
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left")
plt.show()
print("N")
print(N)
| 45.175
| 152
| 0.731046
| 1,083
| 7,228
| 4.493075
| 0.136657
| 0.074805
| 0.027949
| 0.045212
| 0.709823
| 0.634402
| 0.605425
| 0.586724
| 0.561652
| 0.551377
| 0
| 0.014784
| 0.139043
| 7,228
| 160
| 153
| 45.175
| 0.767154
| 0.160072
| 0
| 0.43
| 0
| 0.04
| 0.177734
| 0.029732
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01
| false
| 0
| 0.15
| 0
| 0.16
| 0.06
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bba40b13a90f7230a2307e2b965c7e2e96ab0207
| 1,562
|
py
|
Python
|
utils/relationship_tree/population.py
|
rohern/attila
|
e876af57ee3b77144343ac3c22e798733753a23f
|
[
"MIT"
] | null | null | null |
utils/relationship_tree/population.py
|
rohern/attila
|
e876af57ee3b77144343ac3c22e798733753a23f
|
[
"MIT"
] | null | null | null |
utils/relationship_tree/population.py
|
rohern/attila
|
e876af57ee3b77144343ac3c22e798733753a23f
|
[
"MIT"
] | 1
|
2020-02-21T20:08:43.000Z
|
2020-02-21T20:08:43.000Z
|
from person import Person
class Population:
def __init__(self, family_info, null_parent_value='0'):
self.persons = {}
# Initialize the persons data structure with Person objects
for fid in family_info:
self.persons[fid] = {}
for iid in family_info[fid]:
info = family_info[fid][iid]
father_id = info['father_id']
if father_id == null_parent_value:
father_id = None
mother_id = info['mother_id']
if mother_id == null_parent_value:
mother_id = None
self.persons[fid][iid] = Person(fid, iid, father_id, mother_id, info['sex'], info['birthday'], info['datapresent'])
# Create link structure between persons based on relationship
for fid in self.persons:
family_member_ids = self.persons[fid].keys()
for iid in family_member_ids:
person = self.persons[fid][iid]
if person.father_id is not None:
if person.father_id in family_member_ids:
person.set_father(self.persons[fid][person.father_id])
self.persons[fid][person.father_id].add_child(person)
# else:
# print "%s's father %s is not in their family." % (person.iid, person.father_id)
if person.mother_id is not None:
if person.mother_id in family_member_ids:
person.set_mother(self.persons[fid][person.mother_id])
self.persons[fid][person.mother_id].add_child(person)
# else:
# print "%s's mother %s is not in their family." % (person.iid, person.mother_id)
| 38.097561
| 123
| 0.640845
| 218
| 1,562
| 4.380734
| 0.233945
| 0.115183
| 0.117277
| 0.08377
| 0.363351
| 0.339267
| 0.186387
| 0.127749
| 0.071204
| 0
| 0
| 0.000858
| 0.254161
| 1,562
| 40
| 124
| 39.05
| 0.818884
| 0.18758
| 0
| 0
| 0
| 0
| 0.032488
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.037037
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bba5ec5ee218ef30daab10fe172b51c78e3cf3a4
| 4,040
|
py
|
Python
|
auto_encoder.py
|
kredy/Keras-Projects
|
44c9a7b27f31a8d3eaa7b3bc7a0396d2eb0bf430
|
[
"MIT"
] | 1
|
2021-06-30T13:25:35.000Z
|
2021-06-30T13:25:35.000Z
|
auto_encoder.py
|
kredy/Keras-Projects
|
44c9a7b27f31a8d3eaa7b3bc7a0396d2eb0bf430
|
[
"MIT"
] | null | null | null |
auto_encoder.py
|
kredy/Keras-Projects
|
44c9a7b27f31a8d3eaa7b3bc7a0396d2eb0bf430
|
[
"MIT"
] | 2
|
2020-08-04T01:52:55.000Z
|
2021-03-16T19:12:20.000Z
|
'''
Convolutional autoencoder on MNIST dataset using Keras functional API
'''
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Activation, Input, BatchNormalization
from keras.layers import Conv2D, Conv2DTranspose
from keras.callbacks import TensorBoard
from keras.optimizers import Adam
from keras.utils import to_categorical
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
# Parameters
batch_size = 128
epochs = 3
Tboard = TensorBoard(log_dir='./autoencoder_graph')
# Load the MNIST data
def load_data():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
y_train = to_categorical(y_train, num_classes=10)
y_test = to_categorical(y_test, num_classes=10)
x_train = x_train.reshape(-1, 28, 28, 1)
x_test = x_test.reshape(-1, 28, 28, 1)
x_train = x_train/255.0
x_test = x_test/255.0
return x_train, y_train, x_test, y_test
# Autoencoder
def auto_encoder():
# Encoder
inputs = Input(name='inputs', shape=[28,28,1,])
layer = Conv2D(filters=6, kernel_size=(5,5), strides=(1,1), padding='valid', name='Conv2D_1')(inputs)
layer = BatchNormalization(name='BN_1')(layer)
layer = Activation('relu', name='relu_1')(layer)
layer = Conv2D(filters=6, kernel_size=(5,5), strides=(1,1), padding='valid', name='Conv2D_2')(layer)
layer = BatchNormalization(name='BN_2')(layer)
layer = Activation('relu', name='relu_2')(layer)
layer = Conv2D(filters=6, kernel_size=(3, 3), strides=(1, 1), padding='valid', name='Conv2D_3')(layer)
layer = BatchNormalization(name='BN_3')(layer)
layer = Activation('relu', name='relu_3')(layer)
encoder = Model(inputs=inputs, outputs=layer)
# Decoder
l_inputs = Input(name='l_inputs', shape=[18,18,6,])
layer = Conv2DTranspose(filters=6, kernel_size=(3,3), strides=(1,1), padding='valid', name='deconv2d_1')(l_inputs)
layer = BatchNormalization(name='BN_4')(layer)
layer = Activation('relu', name='relu_4')(layer)
layer = Conv2DTranspose(filters=6, kernel_size=(5, 5), strides=(1, 1), padding='valid', name='deconv2d_2')(layer)
layer = BatchNormalization(name='BN_5')(layer)
layer = Activation('relu', name='relu_5')(layer)
layer = Conv2DTranspose(filters=1, kernel_size=(5, 5), strides=(1, 1), padding='valid', name='deconv2d_3')(layer)
layer = Activation('relu', name='relu_6')(layer)
decoder = Model(inputs=l_inputs, outputs=layer)
# Encoder + Decoder
model = Model(inputs=inputs, outputs=decoder(encoder(inputs)))
return encoder, decoder, model
def main():
x_train, y_train, x_test, y_test = load_data()
encoder, decoder, model = auto_encoder()
encoder.summary()
decoder.summary()
model.summary()
model.compile(optimizer=Adam(), loss='mse')
model.fit(x_train, x_train, batch_size=batch_size, epochs=epochs, callbacks=[Tboard])
gen_imgs = model.predict(x_test, batch_size=batch_size)
# Visualisation of the generation images and comparision with the test images
rn_num = np.random.randint(10000)
gen_imgs = gen_imgs*255.0
gen_img = gen_imgs[rn_num]
x_test = x_test*255.0
test_img = x_test[rn_num]
test_img = test_img.reshape(28,28)
gen_img = gen_img.reshape(28,28)
# Show generated image
plt.imshow(gen_img)
plt.show()
# Show test image
plt.imshow(test_img)
plt.show()
# Save weights of encoder, decoder and the whole model
encoder.save_weights('encoder_weights.hdf5')
decoder.save_weights('decoder_weights.hdf5')
model.save_weights('autoencoder_weights.hdf5')
# Save architecture
encoder_yaml = encoder.to_yaml()
with open('encoder_string.yaml', 'w') as fo:
fo.write(encoder_yaml)
decoder_yaml = decoder.to_yaml()
with open('decoder_string.yaml', 'w') as fo:
fo.write(decoder_yaml)
model_yaml = model.to_yaml()
with open('model_string.yaml', 'w') as fo:
fo.write(model_yaml)
if __name__ == '__main__':
main()
| 35.438596
| 118
| 0.693069
| 585
| 4,040
| 4.589744
| 0.206838
| 0.048417
| 0.020112
| 0.035754
| 0.35121
| 0.312477
| 0.20298
| 0.140782
| 0.116201
| 0.116201
| 0
| 0.036651
| 0.169307
| 4,040
| 113
| 119
| 35.752212
| 0.763409
| 0.081931
| 0
| 0.025
| 0
| 0
| 0.089455
| 0.006506
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0375
| false
| 0
| 0.125
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bba73b50d8937afbf151ac7cc18f80271ca8fda7
| 499
|
py
|
Python
|
test/unit/tools/msvc/common.py
|
jimporter/bfg9000
|
c206646ecfed0d1a510e993b93e6a15677f45a14
|
[
"BSD-3-Clause"
] | 72
|
2015-06-23T02:35:13.000Z
|
2021-12-08T01:47:40.000Z
|
test/unit/tools/msvc/common.py
|
jimporter/bfg9000
|
c206646ecfed0d1a510e993b93e6a15677f45a14
|
[
"BSD-3-Clause"
] | 139
|
2015-03-01T18:48:17.000Z
|
2021-06-18T15:45:14.000Z
|
test/unit/tools/msvc/common.py
|
jimporter/bfg9000
|
c206646ecfed0d1a510e993b93e6a15677f45a14
|
[
"BSD-3-Clause"
] | 19
|
2015-12-23T21:24:33.000Z
|
2022-01-06T04:04:41.000Z
|
from bfg9000.languages import Languages
known_langs = Languages()
with known_langs.make('c') as x:
x.vars(compiler='CC', flags='CFLAGS')
with known_langs.make('c++') as x:
x.vars(compiler='CXX', flags='CXXFLAGS')
def mock_which(*args, **kwargs):
return ['command']
def mock_execute(args, **kwargs):
if '-?' in args:
return ('Microsoft (R) C/C++ Optimizing Compiler Version ' +
'19.12.25831 for x86')
raise OSError('unknown command: {}'.format(args))
| 26.263158
| 68
| 0.639279
| 68
| 499
| 4.617647
| 0.602941
| 0.095541
| 0.089172
| 0.11465
| 0.22293
| 0.22293
| 0.22293
| 0.22293
| 0.22293
| 0.22293
| 0
| 0.037221
| 0.192385
| 499
| 18
| 69
| 27.722222
| 0.741935
| 0
| 0
| 0
| 0
| 0
| 0.236473
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0.076923
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbabba632a1d8ac671dc7f863d9ffae0e405f07a
| 1,266
|
py
|
Python
|
algorithm/linear-regression/gradientDescentLR.py
|
mk43/machine-learning
|
1ca1baf797fe6f593a88ad4e0d7ac7e5c24ce139
|
[
"Apache-2.0"
] | 6
|
2018-02-22T00:27:44.000Z
|
2019-11-21T18:12:48.000Z
|
algorithm/linear-regression/gradientDescentLR.py
|
mk43/machine-learning
|
1ca1baf797fe6f593a88ad4e0d7ac7e5c24ce139
|
[
"Apache-2.0"
] | null | null | null |
algorithm/linear-regression/gradientDescentLR.py
|
mk43/machine-learning
|
1ca1baf797fe6f593a88ad4e0d7ac7e5c24ce139
|
[
"Apache-2.0"
] | 4
|
2018-02-19T05:59:23.000Z
|
2020-04-08T08:53:02.000Z
|
# coding: utf-8
import matplotlib.pyplot as plt
import numpy as np
N = 200
X = np.linspace(0, 10, N * 2)
noise = np.random.normal(0, 0.5, X.shape)
Y = X * 0.5 + 3 + noise
def calcLoss(train_X, train_Y, W, b):
return np.sum(np.square(train_Y - (train_X * W + b)))
def gradientDescent(train_X, train_Y, W, b, learningrate=0.001, trainingtimes=500):
global loss
global W_trace
global b_trace
size = train_Y.size
for _ in range(trainingtimes):
prediction = W * train_X + b
tempW = W + learningrate * np.sum(train_X * (train_Y - prediction)) / size
tempb = b + learningrate * np.sum(train_Y - prediction) / size
W = tempW
b = tempb
loss.append(calcLoss(train_X, train_Y, W, b))
W_trace.append(W)
b_trace.append(b)
Training_Times = 100
Learning_Rate = 0.002
loss = []
W_trace = [-1]
b_trace = [1]
gradientDescent(X, Y, W_trace[0], b_trace[0], learningrate=Learning_Rate, trainingtimes=Training_Times)
print(W_trace[-1], b_trace[-1])
fig = plt.figure()
plt.title(r'$loss\ function\ change\ tendency$')
plt.xlabel(r'$learning\ times$')
plt.ylabel(r'$loss\ value$')
plt.plot(np.linspace(1, Training_Times, Training_Times), loss)
plt.savefig("gradientDescentLR.png")
plt.show()
| 26.375
| 103
| 0.661137
| 202
| 1,266
| 3.99505
| 0.351485
| 0.052045
| 0.054523
| 0.05948
| 0.106568
| 0.106568
| 0.054523
| 0
| 0
| 0
| 0
| 0.034415
| 0.196682
| 1,266
| 47
| 104
| 26.93617
| 0.759095
| 0.010269
| 0
| 0
| 0
| 0
| 0.067946
| 0.016787
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0.027778
| 0.138889
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbac3dea77b9a3981684ddd7952fdf41e36843fc
| 6,343
|
py
|
Python
|
lambda-email-parser/lambda_function.py
|
aws-samples/serverless-mail
|
a002dd90817c9eb2090ca0ad36114c51a0490d61
|
[
"MIT-0"
] | null | null | null |
lambda-email-parser/lambda_function.py
|
aws-samples/serverless-mail
|
a002dd90817c9eb2090ca0ad36114c51a0490d61
|
[
"MIT-0"
] | null | null | null |
lambda-email-parser/lambda_function.py
|
aws-samples/serverless-mail
|
a002dd90817c9eb2090ca0ad36114c51a0490d61
|
[
"MIT-0"
] | null | null | null |
import os
import boto3
import email
import logging
import json
import re
import uuid
s3 = boto3.client("s3")
workmail_message_flow = boto3.client('workmailmessageflow')
logger = logging.getLogger()
def lambda_handler(event, context):
logger.error(json.dumps(event))
destination_bucket = os.environ.get('destination_bucket')
key_prefix = None
if not destination_bucket:
logger.error("Environment variable missing: destination_bucket")
return
# keep track of how many MIME parts are parsed and saved to S3
saved_parts = 0
msg = None
parts = None
workmail_mutate = None
# event is from workmail
if event.get('messageId'):
message_id = event['messageId']
key_prefix = message_id
raw_msg = workmail_message_flow.get_raw_message_content(messageId=message_id)
msg = email.message_from_bytes(raw_msg['messageContent'].read())
if os.environ.get('modify_workmail_message'):
workmail_mutate = True
# event is from s3
else:
records = event.get('Records', [])
record = records[0]
# TODO: for record in records:
# get the S3 object information
s3_info = record['s3']
object_info = s3_info['object']
if s3_info['bucket']['name'] == destination_bucket:
logger.error("To prevent recursive file creation this function will not write back to the same bucket")
return {
'statusCode': 400,
'body': 'To prevent recursive file creation this function will not write back to the same bucket'
}
# get the email message stored in S3 and parse it using the python email library
# TODO: error condition - if the file isn't an email message or doesn't parse correctly
fileObj, object_key = [None] * 2
object_key = object_info['key']
key_prefix = object_key
fileObj = s3.get_object(Bucket = s3_info['bucket']['name'], Key = object_key)
msg = email.message_from_bytes(fileObj['Body'].read())
# save the headers of the message to the bucket
headers_to_save = None
# By default saving all headers, but use environment vairables to be more specific
if os.environ.get('select_headers','ALL'):
headers_to_save = re.split(',\s*', str(os.environ.get('select_headers', 'ALL')))
all_headers = msg.items()
if "ALL" in headers_to_save:
s3.put_object(Bucket = destination_bucket, Key = key_prefix + "/headers.json", Body = json.dumps(all_headers))
elif len(headers_to_save) > 0:
saved_headers = []
i = 0
while i < len(all_headers):
this_header = all_headers[i]
if this_header[0].upper() in (header.upper() for header in headers_to_save):
saved_headers.append(this_header)
i += 1
s3.put_object(Bucket = destination_bucket, Key = key_prefix + "/headers.json", Body = json.dumps(saved_headers))
# parse the mime parts out of the message
parts = msg.walk()
# walk through each MIME part from the email message
part_idx = 0
for part in parts:
part_idx += 1
# get information about the MIME part
content_type, content_disposition, content, charset, filename = [None] * 5
content_type = part.get_content_type()
content_disposition = str(part.get_content_disposition())
content = part.get_payload(decode=True)
charset = part.get_content_charset()
filename = part.get_filename()
logger.error(f"Part: {part_idx}. Content charset: {charset}. Content type: {content_type}. Content disposition: {content_disposition}. Filename: {filename}");
# make file name for body, and untitled text or html parts
# add additional content types that we want to support non-existent filenames
if not filename:
if content_type == 'text/plain':
if 'attachment' not in content_disposition:
filename = "body.txt"
else:
filename = "untitled.txt"
elif content_type == 'text/html':
if 'attachment' not in content_disposition:
filename = "body.html"
else:
filename = "untitled.html"
else:
filename = "untitled"
# TODO: consider overriding or sanitizing the filenames since that is tainted data and might be subject to abuse in object key names
# technically, the entire message is tainted data, so it would be the responsibility of downstream parsers to ensure protection from interpreter abuse
# skip parts that aren't attachment parts
if content_type in ["multipart/mixed", "multipart/related", "multipart/alternative"]:
continue
if content:
# decode the content based on the character set specified
# TODO: add error handling
if charset:
content = content.decode(charset)
# store the decoded MIME part in S3 with the filename appended to the object key
s3.put_object(Bucket = destination_bucket, Key = key_prefix + "/mimepart" + str(part_idx) + "_" + filename, Body = content)
saved_parts += 1
else:
logger.error(f"Part {part_idx} has no content. Content type: {content_type}. Content disposition: {content_disposition}.");
if workmail_mutate:
email_subject = event['subject']
modified_object_key = key_prefix + "/" + str(uuid.uuid4())
new_subject = f"[PROCESSED] {email_subject}"
msg.replace_header('Subject', new_subject)
msg.add_header('X-AWS-Mailsploder-Bucket-Prefix', "s3://" + destination_bucket + "/" + key_prefix)
msg.add_header('X-AWS-Mailsploder-Parts-Saved', str(saved_parts))
# Store updated email in S3
s3.put_object(Bucket = destination_bucket, Key = modified_object_key, Body = msg.as_bytes())
# Update the email in WorkMail
s3_reference = {
'bucket': destination_bucket,
'key': modified_object_key
}
content = {
's3Reference': s3_reference
}
workmail_message_flow.put_raw_message_content(messageId=message_id, content=content)
return {
'statusCode': 200,
'body': 'Number of parts saved to S3 bucket: ' + destination_bucket + ': ' + str(saved_parts)
}
| 40.922581
| 164
| 0.653003
| 805
| 6,343
| 4.981366
| 0.265839
| 0.050873
| 0.034913
| 0.032419
| 0.236658
| 0.215711
| 0.159352
| 0.135162
| 0.082793
| 0.071322
| 0
| 0.009306
| 0.254611
| 6,343
| 154
| 165
| 41.188312
| 0.838832
| 0.196437
| 0
| 0.080357
| 0
| 0.008929
| 0.200237
| 0.029168
| 0
| 0
| 0
| 0.006494
| 0
| 1
| 0.008929
| false
| 0
| 0.0625
| 0
| 0.098214
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbaeca43c2d4bafe283a3a22b25235f71d730c45
| 12,685
|
py
|
Python
|
python_modules/dagster-airflow/dagster_airflow/operators.py
|
jake-billings/dagster
|
7a1548a1f246c48189f3d8109e831b744bceb7d4
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-airflow/dagster_airflow/operators.py
|
jake-billings/dagster
|
7a1548a1f246c48189f3d8109e831b744bceb7d4
|
[
"Apache-2.0"
] | null | null | null |
python_modules/dagster-airflow/dagster_airflow/operators.py
|
jake-billings/dagster
|
7a1548a1f246c48189f3d8109e831b744bceb7d4
|
[
"Apache-2.0"
] | null | null | null |
'''The dagster-airflow operators.'''
import ast
import datetime
import json
import logging
import os
from contextlib import contextmanager
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator, SkipMixin
from airflow.operators.docker_operator import DockerOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils.file import TemporaryDirectory
from docker import APIClient, from_env
from dagster import check, seven, DagsterEventType
from dagster.core.events import DagsterEvent
from dagster_graphql.client.mutations import execute_start_pipeline_execution_query
from dagster_graphql.client.query import START_PIPELINE_EXECUTION_QUERY
from .util import airflow_storage_exception, construct_variables, parse_raw_res
DOCKER_TEMPDIR = '/tmp'
DEFAULT_ENVIRONMENT = {
'AWS_ACCESS_KEY_ID': os.getenv('AWS_ACCESS_KEY_ID'),
'AWS_SECRET_ACCESS_KEY': os.getenv('AWS_SECRET_ACCESS_KEY'),
}
LINE_LENGTH = 100
class DagsterSkipMixin(SkipMixin):
def skip_self_if_necessary(self, events, execution_date, task):
check.list_param(events, 'events', of_type=DagsterEvent)
check.inst_param(execution_date, 'execution_date', datetime.datetime)
check.inst_param(task, 'task', BaseOperator)
skipped = any([e.event_type_value == DagsterEventType.STEP_SKIPPED.value for e in events])
if skipped:
self.skip(None, execution_date, [task])
class ModifiedDockerOperator(DockerOperator):
"""ModifiedDockerOperator supports host temporary directories on OSX.
Incorporates https://github.com/apache/airflow/pull/4315/ and an implementation of
https://issues.apache.org/jira/browse/AIRFLOW-3825.
:param host_tmp_dir: Specify the location of the temporary directory on the host which will
be mapped to tmp_dir. If not provided defaults to using the standard system temp directory.
:type host_tmp_dir: str
"""
def __init__(self, host_tmp_dir='/tmp', **kwargs):
self.host_tmp_dir = host_tmp_dir
kwargs['xcom_push'] = True
super(ModifiedDockerOperator, self).__init__(**kwargs)
@contextmanager
def get_host_tmp_dir(self):
'''Abstracts the tempdir context manager so that this can be overridden.'''
with TemporaryDirectory(prefix='airflowtmp', dir=self.host_tmp_dir) as tmp_dir:
yield tmp_dir
def execute(self, context):
'''Modified only to use the get_host_tmp_dir helper.'''
self.log.info('Starting docker container from image %s', self.image)
tls_config = self.__get_tls_config()
if self.docker_conn_id:
self.cli = self.get_hook().get_conn()
else:
self.cli = APIClient(base_url=self.docker_url, version=self.api_version, tls=tls_config)
if self.force_pull or len(self.cli.images(name=self.image)) == 0:
self.log.info('Pulling docker image %s', self.image)
for l in self.cli.pull(self.image, stream=True):
output = json.loads(l.decode('utf-8').strip())
if 'status' in output:
self.log.info("%s", output['status'])
with self.get_host_tmp_dir() as host_tmp_dir:
self.environment['AIRFLOW_TMP_DIR'] = self.tmp_dir
self.volumes.append('{0}:{1}'.format(host_tmp_dir, self.tmp_dir))
self.container = self.cli.create_container(
command=self.get_command(),
environment=self.environment,
host_config=self.cli.create_host_config(
auto_remove=self.auto_remove,
binds=self.volumes,
network_mode=self.network_mode,
shm_size=self.shm_size,
dns=self.dns,
dns_search=self.dns_search,
cpu_shares=int(round(self.cpus * 1024)),
mem_limit=self.mem_limit,
),
image=self.image,
user=self.user,
working_dir=self.working_dir,
)
self.cli.start(self.container['Id'])
res = []
line = ''
for new_line in self.cli.logs(container=self.container['Id'], stream=True):
line = new_line.strip()
if hasattr(line, 'decode'):
line = line.decode('utf-8')
self.log.info(line)
res.append(line)
result = self.cli.wait(self.container['Id'])
if result['StatusCode'] != 0:
raise AirflowException('docker container failed: ' + repr(result))
if self.xcom_push_flag:
# Try to avoid any kind of race condition?
return '\n'.join(res) + '\n' if self.xcom_all else str(line)
# This is a class-private name on DockerOperator for no good reason --
# all that the status quo does is inhibit extension of the class.
# See https://issues.apache.org/jira/browse/AIRFLOW-3880
def __get_tls_config(self):
# pylint: disable=no-member
return super(ModifiedDockerOperator, self)._DockerOperator__get_tls_config()
class DagsterDockerOperator(ModifiedDockerOperator, DagsterSkipMixin):
'''Dagster operator for Apache Airflow.
Wraps a modified DockerOperator incorporating https://github.com/apache/airflow/pull/4315.
Additionally, if a Docker client can be initialized using docker.from_env,
Unlike the standard DockerOperator, this operator also supports config using docker.from_env,
so it isn't necessary to explicitly set docker_url, tls_config, or api_version.
'''
# py2 compat
# pylint: disable=keyword-arg-before-vararg
def __init__(
self,
task_id,
environment_dict=None,
pipeline_name=None,
mode=None,
step_keys=None,
dag=None,
*args,
**kwargs
):
check.str_param(pipeline_name, 'pipeline_name')
step_keys = check.opt_list_param(step_keys, 'step_keys', of_type=str)
environment_dict = check.opt_dict_param(environment_dict, 'environment_dict', key_type=str)
tmp_dir = kwargs.pop('tmp_dir', DOCKER_TEMPDIR)
host_tmp_dir = kwargs.pop('host_tmp_dir', seven.get_system_temp_directory())
if 'storage' not in environment_dict:
raise airflow_storage_exception(tmp_dir)
check.invariant(
'in_memory' not in environment_dict.get('storage', {}),
'Cannot use in-memory storage with Airflow, must use S3',
)
self.docker_conn_id_set = kwargs.get('docker_conn_id') is not None
self.environment_dict = environment_dict
self.pipeline_name = pipeline_name
self.mode = mode
self.step_keys = step_keys
self._run_id = None
# These shenanigans are so we can override DockerOperator.get_hook in order to configure
# a docker client using docker.from_env, rather than messing with the logic of
# DockerOperator.execute
if not self.docker_conn_id_set:
try:
from_env().version()
except Exception: # pylint: disable=broad-except
pass
else:
kwargs['docker_conn_id'] = True
# We do this because log lines won't necessarily be emitted in order (!) -- so we can't
# just check the last log line to see if it's JSON.
kwargs['xcom_all'] = True
# Store Airflow DAG run timestamp so that we can pass along via execution metadata
self.airflow_ts = kwargs.get('ts')
if 'environment' not in kwargs:
kwargs['environment'] = DEFAULT_ENVIRONMENT
super(DagsterDockerOperator, self).__init__(
task_id=task_id, dag=dag, tmp_dir=tmp_dir, host_tmp_dir=host_tmp_dir, *args, **kwargs
)
@property
def run_id(self):
if self._run_id is None:
return ''
else:
return self._run_id
@property
def query(self):
# TODO: https://github.com/dagster-io/dagster/issues/1342
redacted = construct_variables(
self.mode, 'REDACTED', self.pipeline_name, self.run_id, self.airflow_ts, self.step_keys
)
self.log.info(
'Executing GraphQL query: {query}\n'.format(query=START_PIPELINE_EXECUTION_QUERY)
+ 'with variables:\n'
+ seven.json.dumps(redacted, indent=2)
)
variables = construct_variables(
self.mode,
self.environment_dict,
self.pipeline_name,
self.run_id,
self.airflow_ts,
self.step_keys,
)
return '-v \'{variables}\' \'{query}\''.format(
variables=seven.json.dumps(variables), query=START_PIPELINE_EXECUTION_QUERY
)
def get_command(self):
if self.command is not None and self.command.strip().find('[') == 0:
commands = ast.literal_eval(self.command)
elif self.command is not None:
commands = self.command
else:
commands = self.query
return commands
def get_hook(self):
if self.docker_conn_id_set:
return super(DagsterDockerOperator, self).get_hook()
class _DummyHook(object):
def get_conn(self):
return from_env().api
return _DummyHook()
def execute(self, context):
try:
from dagster_graphql.client.mutations import (
handle_start_pipeline_execution_errors,
handle_start_pipeline_execution_result,
)
except ImportError:
raise AirflowException(
'To use the DagsterPythonOperator, dagster and dagster_graphql must be installed '
'in your Airflow environment.'
)
if 'run_id' in self.params:
self._run_id = self.params['run_id']
elif 'dag_run' in context and context['dag_run'] is not None:
self._run_id = context['dag_run'].run_id
try:
raw_res = super(DagsterDockerOperator, self).execute(context)
self.log.info('Finished executing container.')
res = parse_raw_res(raw_res)
handle_start_pipeline_execution_errors(res)
events = handle_start_pipeline_execution_result(res)
self.skip_self_if_necessary(events, context['execution_date'], context['task'])
return events
finally:
self._run_id = None
# This is a class-private name on DockerOperator for no good reason --
# all that the status quo does is inhibit extension of the class.
# See https://issues.apache.org/jira/browse/AIRFLOW-3880
def __get_tls_config(self):
# pylint:disable=no-member
return super(DagsterDockerOperator, self)._ModifiedDockerOperator__get_tls_config()
@contextmanager
def get_host_tmp_dir(self):
yield self.host_tmp_dir
class DagsterPythonOperator(PythonOperator, DagsterSkipMixin):
def __init__(
self,
task_id,
handle,
pipeline_name,
environment_dict,
mode,
step_keys,
dag,
*args,
**kwargs
):
if 'storage' not in environment_dict:
raise airflow_storage_exception('/tmp/special_place')
check.invariant(
'in_memory' not in environment_dict.get('storage', {}),
'Cannot use in-memory storage with Airflow, must use filesystem or S3',
)
def python_callable(ts, dag_run, **kwargs): # pylint: disable=unused-argument
run_id = dag_run.run_id
# TODO: https://github.com/dagster-io/dagster/issues/1342
redacted = construct_variables(mode, 'REDACTED', pipeline_name, run_id, ts, step_keys)
logging.info(
'Executing GraphQL query: {query}\n'.format(query=START_PIPELINE_EXECUTION_QUERY)
+ 'with variables:\n'
+ seven.json.dumps(redacted, indent=2)
)
events = execute_start_pipeline_execution_query(
handle,
construct_variables(mode, environment_dict, pipeline_name, run_id, ts, step_keys),
)
self.skip_self_if_necessary(events, kwargs['execution_date'], kwargs['task'])
return events
super(DagsterPythonOperator, self).__init__(
task_id=task_id,
provide_context=True,
python_callable=python_callable,
dag=dag,
*args,
**kwargs
)
| 36.346705
| 100
| 0.633662
| 1,523
| 12,685
| 5.050558
| 0.217334
| 0.021841
| 0.022101
| 0.021061
| 0.277041
| 0.215419
| 0.182137
| 0.152366
| 0.152366
| 0.152366
| 0
| 0.005124
| 0.276862
| 12,685
| 348
| 101
| 36.451149
| 0.833424
| 0.161135
| 0
| 0.2
| 0
| 0
| 0.08722
| 0.006068
| 0
| 0
| 0
| 0.002874
| 0
| 1
| 0.065306
| false
| 0.004082
| 0.077551
| 0.012245
| 0.212245
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbaf9c88715d3ed6658c5b7cac9f3b5786ab4dad
| 908
|
py
|
Python
|
setup.py
|
patel-zeel/CGLB-1
|
6afab3631704ae4233e93c2de289b4e351f61838
|
[
"Apache-2.0"
] | 5
|
2021-07-19T09:08:15.000Z
|
2022-03-21T10:19:08.000Z
|
setup.py
|
patel-zeel/CGLB-1
|
6afab3631704ae4233e93c2de289b4e351f61838
|
[
"Apache-2.0"
] | 5
|
2021-08-30T20:24:52.000Z
|
2021-11-29T07:24:51.000Z
|
setup.py
|
patel-zeel/CGLB-1
|
6afab3631704ae4233e93c2de289b4e351f61838
|
[
"Apache-2.0"
] | 1
|
2021-11-25T22:15:27.000Z
|
2021-11-25T22:15:27.000Z
|
# Copyright 2021 The CGLB Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
pkgs = find_packages()
setup(
name="cglb",
author="Artem Artemev, David Burt",
author_email="a.artemev20@imperial.ac.uk, drb62@cam.ac.uk",
version="0.0.1",
packages=pkgs,
install_requires=["numpy", "scipy"],
dependency_links=[],
)
| 32.428571
| 74
| 0.732379
| 134
| 908
| 4.925373
| 0.69403
| 0.090909
| 0.039394
| 0.048485
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019947
| 0.171806
| 908
| 27
| 75
| 33.62963
| 0.857713
| 0.63326
| 0
| 0
| 0
| 0
| 0.272727
| 0.08464
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbb379a767a5b56faec727f2a03bfb35f2d9f7df
| 5,361
|
py
|
Python
|
automation/aux_funcs.py
|
jaimiles23/hacker_rank
|
0580eac82e5d0989afabb5c2e66faf09713f891b
|
[
"Apache-2.0"
] | null | null | null |
automation/aux_funcs.py
|
jaimiles23/hacker_rank
|
0580eac82e5d0989afabb5c2e66faf09713f891b
|
[
"Apache-2.0"
] | null | null | null |
automation/aux_funcs.py
|
jaimiles23/hacker_rank
|
0580eac82e5d0989afabb5c2e66faf09713f891b
|
[
"Apache-2.0"
] | 3
|
2021-09-22T11:06:58.000Z
|
2022-01-25T09:29:24.000Z
|
"""Collection of functions related to navigating directories
"""
##########
# Imports
##########
import os
from typing import Union
from pathlib import Path
from logger.select_chall import logging
import constants
from domains import problem_domains
from git import Repo
##########
# Subdomain dir_name
##########
def get_subdomain_dirname(subdomain_num: int, total_subdomains: int, subdomain: str) -> str:
"""Returns directory name for subdirectory.
Args:
subdomain_num (int): Subdomain number
total_subdomains (int): Total number of subdomains
subdomain (str): Subdomain name.
Returns:
str: directory name
"""
logging.debug(f"Subdir info: {subdomain_num}, {total_subdomains}, {subdomain}")
subdomain_num, total_subdomains = str(subdomain_num), str(total_subdomains)
if total_subdomains == '1': # specific challenges, e.g., 10 days of stats
subdomain_num = ''
else:
while len(subdomain_num) < len(total_subdomains):
subdomain_num = '0' + subdomain_num
subdomain_num += '_'
subdir_name = subdomain_num + subdomain.strip().lower().replace(' ', '_')
logging.debug(f"subdir - {subdir_name}")
return subdir_name
##########
# Change dir
##########
def change_dir(domain_dir: str):
"""Changes the current working directory.
Creates directory if it doesn't exist.
Also creates a pre-READMEME.md file.
Args:
domain_dir: directory to change to.
"""
if not os.path.exists( domain_dir):
logging.info(f"DIR - creating {domain_dir}")
os.mkdir(domain_dir)
logging.info(f"DIR - changing to {domain_dir}")
os.chdir(domain_dir)
return
##########
# get dirname
##########
def get_dirname(dir_path: Path) -> str:
"""returns directory name from windows filepath
Args:
dir_path (Path): path oject
Returns:
str: directory name
"""
dirname = str(dir_path.resolve())
dirname = dirname[dirname.rfind('\\') + 1:]
logging.debug(f"Dirname {dirname} from {dir_path}")
return dirname
##########
# get_domain_dirs
##########
def get_domain_dirs(home_dir: object) -> list:
"""Returns list of domain directories.
Args:
home_dir (object): Home directory
Returns:
list: List of domain directories
"""
domain_dirs = []
for d in problem_domains:
domain_dir = home_dir / d.name
domain_dirs.append(domain_dir)
logging.debug("DIR - Domain dirs:" + '\n-'.join(
[str(d) for d in domain_dirs]))
return domain_dirs
##########
# get_subdomain_dirs
##########
def get_subdomain_dirs(domain_dir) -> list:
"""Returns list of subdomain dirs.
Args:
domain_dir ([type]): Domain directory.
Returns:
list: Returns list of subdomain directories.
"""
not_subdirs = (
'.ipynb_checkpoints'
)
p = Path(domain_dir)
subdirs = []
for f in p.glob('**/*'):
if f.is_dir():
dir_name = get_dirname(f)
logging.debug(f"Check dir - {dir_name}")
if dir_name not in not_subdirs:
subdirs.append(f)
logging.debug("DIR - Subdomain dirs:" + '\n-'.join(
[str(d) for d in subdirs]))
return subdirs
##########
# Challenge csv name
##########
def get_chall_csv_filename(sub_dir) -> str:
"""Returns csv name containing challenge informatin.
Args:
sub_dir ([type]): sub directory name
Returns:
str: csv filename
"""
p = Path(sub_dir)
for f in p.glob('**/*'):
filename = str(f)
if filename.endswith(".csv"):
return filename
raise Exception(f"No csv located in {sub_dir}")
##########
# pre readme
##########
def make_readme_setup(name: str, url: str):
"""Creates pre-readme in file."""
filename = constants.PRE_README_FILENAME
if not os.path.exists( filename):
with open(filename, 'w') as outfile:
outfile.write(f"# {name}")
outfile.write(f"\nContains solutions to [{name}]({url}).")
return
##########
# Make file
##########
def make_file(filename: str, name: str, url: str) -> None:
"""Checks if file exists. If it doesn't exist, creates file."""
exists = os.path.exists(filename)
logging.debug(f"{filename} - {exists}")
if os.path.exists(filename):
return
logging.debug(f"FILE - Creating {filename}")
with open(filename, 'w') as outfile:
outfile.write(f"Solution to [{name}]({url})")
return
##########
# Update github
##########
def update_github(home_dir: object, commit_msg: str) -> None:
"""Updates github directory.
Args:
home_dir (object): home dir pathlib
commit_msg (str): Commit message
"""
repo = Repo(home_dir)
repo.git.add(update = True)
repo.index.commit(commit_msg)
logging.debug(f"Committing: {commit_msg}")
origin = repo.remote(name = 'origin')
origin.push()
logging.debug("Pushed to repo.")
def get_solution_commit_msg(domain: Path, subdomain: Path, chall_name: str) -> str:
"""Returns commit message for adding solution."""
domain_name = get_dirname(domain)
subdomain_name = get_dirname(subdomain)
return f"Solution to {domain_name} {subdomain_name} - {chall_name}"
| 23.933036
| 92
| 0.610707
| 656
| 5,361
| 4.83689
| 0.22561
| 0.034037
| 0.028679
| 0.016073
| 0.113457
| 0.056729
| 0.041601
| 0.041601
| 0.029625
| 0.029625
| 0
| 0.001228
| 0.240627
| 5,361
| 223
| 93
| 24.040359
| 0.778187
| 0.257042
| 0
| 0.089888
| 0
| 0
| 0.148709
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.11236
| false
| 0
| 0.078652
| 0
| 0.303371
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbb6e0ba861fbcb599f8a5421f34367fccb32fdd
| 1,158
|
py
|
Python
|
src/redcli/application/services/logger.py
|
Zhouhao12345/redcli
|
8a8260b0799e8524d0c339df8dfe6bcfb22f1841
|
[
"MIT"
] | 6
|
2019-12-02T02:38:40.000Z
|
2021-02-05T06:40:56.000Z
|
src/redcli/application/services/logger.py
|
Zhouhao12345/redcli
|
8a8260b0799e8524d0c339df8dfe6bcfb22f1841
|
[
"MIT"
] | null | null | null |
src/redcli/application/services/logger.py
|
Zhouhao12345/redcli
|
8a8260b0799e8524d0c339df8dfe6bcfb22f1841
|
[
"MIT"
] | 1
|
2019-12-02T04:19:08.000Z
|
2019-12-02T04:19:08.000Z
|
from ..constant import Service as Service_Key
from .base import Service
import logging
class LoggerService(Service):
def init(self, services):
config_service = services.get_service(Service_Key.CONFIG_LOCAL)
self.date_format = config_service.get_config_value(
"LOGGER", "DateFormat")
self.format_str = config_service.get_config_value(
"LOGGER", "FormatString"
)
level_str = config_service.get_config_value(
"LOGGER", "LEVEL"
)
self.level = getattr(logging, level_str)
self.file_path = config_service.get_config_value(
"LOGGER", "FilePath"
)
def start(self):
self._logging = logging
self._logging.basicConfig(
filename=self.file_path,
level=self.level,
filemode="w",
format=self.format_str,
datefmt=self.date_format,
)
def close(self):
del self._logging
del self.file_path
del self.level
del self.format_str
del self.date_format
@property
def logging(self):
return self._logging
| 27.571429
| 71
| 0.607945
| 128
| 1,158
| 5.25
| 0.304688
| 0.096726
| 0.095238
| 0.130952
| 0.205357
| 0.205357
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0.30829
| 1,158
| 41
| 72
| 28.243902
| 0.838951
| 0
| 0
| 0
| 0
| 0
| 0.051813
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.083333
| 0.027778
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbb7c9d675be21d26531a6e1f3de3d231a427a1e
| 1,224
|
py
|
Python
|
scale/scale/local_settings_TRAVIS-CI.py
|
stevevarner/scale
|
9623b261db4ddcf770f00df16afc91176142bb7c
|
[
"Apache-2.0"
] | null | null | null |
scale/scale/local_settings_TRAVIS-CI.py
|
stevevarner/scale
|
9623b261db4ddcf770f00df16afc91176142bb7c
|
[
"Apache-2.0"
] | null | null | null |
scale/scale/local_settings_TRAVIS-CI.py
|
stevevarner/scale
|
9623b261db4ddcf770f00df16afc91176142bb7c
|
[
"Apache-2.0"
] | null | null | null |
# Settings file for use with travis-ci
# Include all the default settings.
from settings import *
# Use the following lines to enable developer/debug mode.
DEBUG = False
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# Set the external URL context here
FORCE_SCRIPT_NAME = '/'
USE_X_FORWARDED_HOST = True
ALLOWED_HOSTS = ["*"]
STATIC_ROOT = 'static'
STATIC_URL = '/static/'
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# Not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC'
SECRET_KEY = "0fnk28edjh"
# The template database to use when creating your new database.
# By using your own template that already includes the postgis extension,
# you can avoid needing to run the unit tests as a PostgreSQL superuser.
#POSTGIS_TEMPLATE = 'scale'
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'scale',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
},
}
# Master settings
MESOS_MASTER = 'zk://localhost:2181/mesos'
# Metrics collection directory
METRICS_DIR = '/tmp'
| 26.042553
| 73
| 0.718137
| 171
| 1,224
| 5.035088
| 0.690058
| 0.027875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007937
| 0.176471
| 1,224
| 46
| 74
| 26.608696
| 0.84623
| 0.558007
| 0
| 0
| 0
| 0
| 0.309886
| 0.119772
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.047619
| 0.047619
| 0
| 0.047619
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbbaccac8596eecef7a731177661a6286ed440a5
| 1,316
|
py
|
Python
|
scoring/dictionary/YAQ93.py
|
majazeh/risloo-samples
|
aadf27912a5044663698fa14fa781c644ea3f548
|
[
"Unlicense"
] | null | null | null |
scoring/dictionary/YAQ93.py
|
majazeh/risloo-samples
|
aadf27912a5044663698fa14fa781c644ea3f548
|
[
"Unlicense"
] | null | null | null |
scoring/dictionary/YAQ93.py
|
majazeh/risloo-samples
|
aadf27912a5044663698fa14fa781c644ea3f548
|
[
"Unlicense"
] | 1
|
2021-03-07T09:15:55.000Z
|
2021-03-07T09:15:55.000Z
|
f1 = 'intentionally_not_thinking_about_upsetting_things'
f2 = 'substance_abuse'
f3 = 'denial_of_unhappiness'
f4 = 'excessive_rationality_and_control'
f5 = 'suppression_of_anger'
f6 = 'psychosomatic_symptoms'
f7 = 'denial_of_memories'
f8 = 'withdrawal_from_people'
f9 = 'avoidance_through_sleep_and_lack of energy'
f10 = 'distraction_through_activity'
f11 = 'self_soothing_like_eating_shopping_etc'
f12 = 'passive_blocking_of_upsetting_emotions'
f13 = 'passive_distraction_fantasy_daydreaming_television'
f14 = 'avoidance_of_upsetting_situations'
factors_names = (f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15)
factors = {
1 :(f1,)
, 2 :(f2,)
, 3 :(f3,)
, 4 :(f3,)
, 5 :(f4,)
, 6 :(f5,)
, 7:(f2,)
, 8 :(f7 ,)
, 9 :(f2,f11,)
, 10 :(f6 ,)
, 11 :(f12 ,)
, 12 :(f6 ,)
, 13 :(f5 , f8,)
, 14 :(f9 ,)
, 15 :(f6 ,)
, 16 :(f13 ,)
, 17 :(f4,)
, 18 :(f5 ,)
, 19 :(f4,)
, 20 :(f8 ,)
, 21 :(f7,)
, 22 :(f9 ,)
, 23 :(f10 ,)
, 24 :(f10 ,)
, 25 :(f13 ,)
, 26 :(f11 ,)
, 27 :(f7 ,)
, 28:(f10 ,)
, 29 :(f3,)
, 30 :(f8 ,)
, 31 :(f1 ,)
, 32 :(f1 ,)
, 33 :(f12 ,)
, 34 :(f3,)
, 35 :(f4,)
, 36 :(f11 ,)
, 37 :(f14 ,)
, 38 :(f6,)
, 39 :(f12 ,)
, 40 :(f4 , f12,) #
}
| 21.225806
| 68
| 0.521277
| 171
| 1,316
| 3.77193
| 0.584795
| 0.024806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175605
| 0.277356
| 1,316
| 62
| 69
| 21.225806
| 0.502629
| 0
| 0
| 0
| 0
| 0
| 0.325988
| 0.278116
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.035088
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbbb396fd5ddce2bf132ab9fc786735f3c60216f
| 11,400
|
py
|
Python
|
scripts/lib/smart.py
|
lpenuelac/ImageAnalysis
|
a01b1278cca92e45fef6f5e41d1310cfbb041308
|
[
"MIT"
] | 93
|
2015-11-26T14:15:51.000Z
|
2022-03-10T13:54:21.000Z
|
scripts/lib/smart.py
|
lpenuelac/ImageAnalysis
|
a01b1278cca92e45fef6f5e41d1310cfbb041308
|
[
"MIT"
] | 19
|
2017-04-06T11:09:21.000Z
|
2022-03-05T20:12:39.000Z
|
scripts/lib/smart.py
|
lpenuelac/ImageAnalysis
|
a01b1278cca92e45fef6f5e41d1310cfbb041308
|
[
"MIT"
] | 30
|
2017-06-12T16:08:51.000Z
|
2022-01-28T17:34:04.000Z
|
# code to estimate world surface elevation and EKF yaw error from
# image direct pose informaation.
# - trianglulate image features (in 3d) based on camera poses
# - use essential/fundamental matrix + camera pose to estimate yaw error
# - use affine transformation + camera pose to estimate yaw error
import cv2
import math
import numpy as np
import os
from props import getNode
import props_json
from . import camera
from . import image
from .logger import log, qlog
from . import project
from . import srtm
r2d = 180 / math.pi
d2r = math.pi / 180
smart_node = getNode("/smart", True)
# compute the 3d triangulation of the matches between a pair of images
def triangulate_features(i1, i2):
# quick sanity checks
if i1 == i2:
return None
if not i2.name in i1.match_list:
return None
if len(i1.match_list[i2.name]) == 0:
return None
if not i1.kp_list or not len(i1.kp_list):
i1.load_features()
if not i2.kp_list or not len(i2.kp_list):
i2.load_features()
# camera calibration
K = camera.get_K()
IK = np.linalg.inv(K)
# get poses
rvec1, tvec1 = i1.get_proj()
rvec2, tvec2 = i2.get_proj()
R1, jac = cv2.Rodrigues(rvec1)
PROJ1 = np.concatenate((R1, tvec1), axis=1)
R2, jac = cv2.Rodrigues(rvec2)
PROJ2 = np.concatenate((R2, tvec2), axis=1)
# setup data structures for cv2 call
uv1 = []; uv2 = []; indices = []
for pair in i1.match_list[i2.name]:
p1 = i1.kp_list[ pair[0] ].pt
p2 = i2.kp_list[ pair[1] ].pt
uv1.append( [p1[0], p1[1], 1.0] )
uv2.append( [p2[0], p2[1], 1.0] )
pts1 = IK.dot(np.array(uv1).T)
pts2 = IK.dot(np.array(uv2).T)
points = cv2.triangulatePoints(PROJ1, PROJ2, pts1[:2], pts2[:2])
points /= points[3]
return points
# find (forward) affine transformation between feature pairs
def find_affine(i1, i2):
# quick sanity checks
if i1 == i2:
return None
if not i2.name in i1.match_list:
return None
if len(i1.match_list[i2.name]) == 0:
return None
if not i1.kp_list or not len(i1.kp_list):
i1.load_features()
if not i2.kp_list or not len(i2.kp_list):
i2.load_features()
# affine transformation from i2 uv coordinate system to i1
uv1 = []; uv2 = []; indices = []
for pair in i1.match_list[i2.name]:
uv1.append( i1.kp_list[ pair[0] ].pt )
uv2.append( i2.kp_list[ pair[1] ].pt )
uv1 = np.float32([uv1])
uv2 = np.float32([uv2])
affine, status = \
cv2.estimateAffinePartial2D(uv2, uv1)
return affine
# return individual components of affine transform: rot, tx, ty, sx,
# sy (units are degrees and pixels)
def decompose_affine(affine):
tx = affine[0][2]
ty = affine[1][2]
a = affine[0][0]
b = affine[0][1]
c = affine[1][0]
d = affine[1][1]
sx = math.sqrt( a*a + b*b )
if a < 0.0:
sx = -sx
sy = math.sqrt( c*c + d*d )
if d < 0.0:
sy = -sy
angle_deg = math.atan2(-b,a) * 180.0/math.pi
if angle_deg < -180.0:
angle_deg += 360.0
if angle_deg > 180.0:
angle_deg -= 360.0
return (angle_deg, tx, ty, sx, sy)
# average of the triangulated points (converted to positive elevation)
def estimate_surface_elevation(i1, i2):
points = triangulate_features(i1, i2)
(ned1, ypr1, quat1) = i1.get_camera_pose()
(ned2, ypr2, quat2) = i2.get_camera_pose()
diff = np.array(ned2) - np.array(ned1)
dist_m = np.linalg.norm( diff )
# num_matches = points.shape[1]
if points is None:
return None, None, dist_m
else:
# points are are triangulated in the NED coordinates, so
# invert the vertical (down) average before returning the
# answer.
return -np.average(points[2]), np.std(points[2]), dist_m
# Estimate image pose yaw error (based on found pairs affine
# transform, original image pose, and gps positions; assumes a mostly
# nadir camara pose.) After computering affine transform, project
# image 2 center uv into image1 uv space and compute approximate
# course in local uv space, then add this to direct pose yaw estimate
# and compare to gps course.
def estimate_yaw_error(i1, i2):
affine = find_affine(i1, i2)
if affine is None:
return None, None, None, None
# fyi ...
# print(i1.name, 'vs', i2.name)
# print(" affine:\n", affine)
(rot, tx, ty, sx, sy) = decompose_affine(affine)
# print(" ", rot, tx, ty, sx, sy)
if abs(ty) > 0:
weight = abs(ty / tx)
else:
weight = abs(tx)
# ground course between camera poses
(ned1, ypr1, quat1) = i1.get_camera_pose()
(ned2, ypr2, quat2) = i2.get_camera_pose()
diff = np.array(ned2) - np.array(ned1)
dist = np.linalg.norm( diff )
dir = diff / dist
print(" dist:", dist, 'ned dir:', dir[0], dir[1], dir[2])
crs_gps = 90 - math.atan2(dir[0], dir[1]) * r2d
if crs_gps < 0: crs_gps += 360
if crs_gps > 360: crs_gps -= 360
# center pixel of i2 in i1's uv coordinate system
(w, h) = camera.get_image_params()
cx = int(w*0.5)
cy = int(h*0.5)
print("center:", [cx, cy])
newc = affine.dot(np.float32([cx, cy, 1.0]))[:2]
cdiff = [ newc[0] - cx, cy - newc[1] ]
#print("new center:", newc)
#print("center diff:", cdiff)
# estimated course based on i1 pose and [local uv coordinate
# system] affine transform
crs_aff = 90 - math.atan2(cdiff[1], cdiff[0]) * r2d
(_, air_ypr1, _) = i1.get_aircraft_pose()
#print(" aircraft yaw: %.1f" % air_ypr1[0])
#print(" affine course: %.1f" % crs_aff)
#print(" ground course: %.1f" % crs_gps)
crs_fit = air_ypr1[0] + crs_aff
yaw_error = crs_gps - crs_fit
if yaw_error < -180: yaw_error += 360
if yaw_error > 180: yaw_error -= 360
print(" estimated yaw error: %.1f" % yaw_error)
# aircraft yaw (est) + affine course + yaw error = ground course
return yaw_error, dist, crs_aff, weight
# compute the pairwise surface estimate and then update the property
# tree records
def update_surface_estimate(i1, i2):
avg, std, dist_m = estimate_surface_elevation(i1, i2)
if avg is None:
return None, None
i1_node = smart_node.getChild(i1.name, True)
i2_node = smart_node.getChild(i2.name, True)
tri1_node = i1_node.getChild("tri_surface_pairs", True)
tri2_node = i2_node.getChild("tri_surface_pairs", True)
# update pairwise info in the property tree
#weight = len(i1.match_list[i2.name])
weight = dist_m * dist_m
pair1_node = tri1_node.getChild(i2.name, True)
pair2_node = tri2_node.getChild(i1.name, True)
pair1_node.setFloat("surface_m", float("%.1f" % avg))
pair1_node.setInt("weight", weight)
pair1_node.setFloat("stddev", float("%.1f" % std))
pair1_node.setInt("dist_m", dist_m)
pair2_node.setFloat("surface_m", float("%.1f" % avg))
pair2_node.setInt("weight", weight)
pair2_node.setFloat("stddev", float("%.1f" % std))
pair2_node.setInt("dist_m", dist_m)
# update the average surface values
cutoff_std = 25 # more than this suggests a bad set of matches
sum1 = 0
count1 = 0
for child in tri1_node.getChildren():
pair_node = tri1_node.getChild(child)
surf = pair_node.getFloat("surface_m")
weight = pair_node.getInt("weight")
stddev = pair_node.getFloat("stddev")
if stddev < cutoff_std:
sum1 += surf * weight
count1 += weight
if count1 > 0:
i1_node.setFloat("tri_surface_m", float("%.1f" % (sum1 / count1)))
sum2 = 0
count2 = 0
for child in tri2_node.getChildren():
pair_node = tri2_node.getChild(child)
surf = pair_node.getFloat("surface_m")
weight = pair_node.getInt("weight")
stddev = pair_node.getFloat("stddev")
if stddev < cutoff_std:
sum2 += surf * weight
count2 += weight
if count2 > 0:
i2_node.setFloat("tri_surface_m", float("%.1f" % (sum2 / count2)))
return avg, std
# compute the pairwise surface estimate and then update the property
# tree records
def update_yaw_error_estimate(i1, i2):
yaw_error, dist, crs_affine, weight = estimate_yaw_error(i1, i2)
if yaw_error is None:
return 0
i1_node = smart_node.getChild(i1.name, True)
yaw_node = i1_node.getChild("yaw_pairs", True)
# update pairwise info in the property tree
pair_node = yaw_node.getChild(i2.name, True)
pair_node.setFloat("yaw_error", "%.1f" % yaw_error)
pair_node.setFloat("dist_m", "%.1f" % dist)
pair_node.setFloat("relative_crs", "%.1f" % crs_affine)
pair_node.setFloat("weight", "%.1f" % weight)
sum = 0
count = 0
for child in yaw_node.getChildren():
pair_node = yaw_node.getChild(child)
yaw_error = pair_node.getFloat("yaw_error")
weight = pair_node.getInt("weight")
dist_m = pair_node.getFloat("dist_m")
if dist_m >= 0.5 and abs(yaw_error) <= 30:
sum += yaw_error * weight
count += weight
#else:
# log("yaw error ignored:", i1.name, i2.name, "%.1fm" % dist_m,
# "%.1f(deg)" % yaw_error)
if count > 0:
i1_node.setFloat("yaw_error", float("%.1f" % (sum / count)))
return sum / count
else:
return 0
def get_yaw_error_estimate(i1):
i1_node = smart_node.getChild(i1.name, True)
if i1_node.hasChild("yaw_error"):
return i1_node.getFloat("yaw_error")
else:
return 0.0
# return the average of estimated surfaces below the image pair
def get_surface_estimate(i1, i2):
i1_node = smart_node.getChild(i1.name, True)
i2_node = smart_node.getChild(i2.name, True)
tri1_node = i1_node.getChild("tri_surface_pairs", True)
tri2_node = i2_node.getChild("tri_surface_pairs", True)
count = 0
sum = 0
if i1_node.hasChild("tri_surface_m"):
sum += i1_node.getFloat("tri_surface_m")
count += 1
if i2_node.hasChild("tri_surface_m"):
sum += i2_node.getFloat("tri_surface_m")
count += 1
if count > 0:
return sum / count
# no triangulation estimate yet, fall back to SRTM lookup
g1 = i1_node.getFloat("srtm_surface_m")
g2 = i2_node.getFloat("srtm_surface_m")
ground_m = (g1 + g2) * 0.5
qlog(" SRTM ground (no triangulation yet): %.1f" % ground_m)
return ground_m
# find srtm surface altitude under each camera pose
def update_srtm_elevations(proj):
for image in proj.image_list:
ned, ypr, quat = image.get_camera_pose()
surface = srtm.ned_interp([ned[0], ned[1]])
image_node = smart_node.getChild(image.name, True)
image_node.setFloat("srtm_surface_m", float("%.1f" % surface))
def set_yaw_error_estimates(proj):
for image in proj.image_list:
image_node = smart_node.getChild(image.name, True)
yaw_node = image_node.getChild("yaw_pairs", True)
yaw_error_deg = yaw_node.getFloat("yaw_error")
image.set_aircraft_yaw_error_estimate(yaw_error_deg)
def load(analysis_dir):
surface_file = os.path.join(analysis_dir, "smart.json")
props_json.load(surface_file, smart_node)
def save(analysis_dir):
surface_file = os.path.join(analysis_dir, "smart.json")
props_json.save(surface_file, smart_node)
| 33.236152
| 78
| 0.635351
| 1,703
| 11,400
| 4.093365
| 0.171462
| 0.040166
| 0.014919
| 0.0241
| 0.390331
| 0.3275
| 0.291063
| 0.254053
| 0.224502
| 0.205709
| 0
| 0.044324
| 0.242018
| 11,400
| 342
| 79
| 33.333333
| 0.762412
| 0.208684
| 0
| 0.295833
| 0
| 0
| 0.061907
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054167
| false
| 0
| 0.045833
| 0
| 0.191667
| 0.0125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbbdb4df5c383e2a41743d8bacdff942a6c94c7d
| 3,229
|
py
|
Python
|
portal/migrations/versions/4b1e5b7b69eb_.py
|
ivan-c/truenth-portal
|
0b9d39ae43f42ea3413ed9634f295f5d856cbc77
|
[
"BSD-3-Clause"
] | 3
|
2017-01-15T10:11:57.000Z
|
2018-10-02T23:46:44.000Z
|
portal/migrations/versions/4b1e5b7b69eb_.py
|
pep8speaks/true_nth_usa_portal
|
31ff755b0cfe61ab908e2a399e3c41ef17ca8c16
|
[
"BSD-3-Clause"
] | 876
|
2016-04-04T20:45:11.000Z
|
2019-02-28T00:10:36.000Z
|
portal/migrations/versions/4b1e5b7b69eb_.py
|
pep8speaks/true_nth_usa_portal
|
31ff755b0cfe61ab908e2a399e3c41ef17ca8c16
|
[
"BSD-3-Clause"
] | 9
|
2016-04-13T01:18:55.000Z
|
2018-09-19T20:44:23.000Z
|
"""empty message
Revision ID: 4b1e5b7b69eb
Revises: 13d1c714823a
Create Date: 2017-01-19 12:36:55.339537
"""
# revision identifiers, used by Alembic.
revision = '4b1e5b7b69eb'
down_revision = '13d1c714823a'
import re
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from portal.models.audit import Audit
from portal.models.user import User
Session = sessionmaker()
def extract_context(comment):
contexts = [
('login', ['login', 'logout']),
('assessment', ['patient report', 'questionnaireresponse']),
('authentication', ['assuming identity', 'service',
'inadequate permission', 'identity challenge',
'access token']),
('intervention', ['intervention', r'client .* assuming role',
r'client .* releasing role', r'updated .* using']),
('account', ['register', 'merging', 'account', 'marking deleted',
'purging', 'registration']),
('user', ['time of death', 'deceased', 'demographics']),
('organization', ['organization', r'adding .* to']),
('consent', ['consent']),
('observation', ['observation', r'set codeableconcept .* on user']),
('group', ['group']),
('procedure', ['procedure']),
('relationship', ['relationship']),
('role', ['role']),
('tou', ['tou']),
('other', ['remote', 'test'])
]
for ct in contexts:
for searchterm in ct[1]:
if re.search(searchterm, comment):
return ct[0]
return 'other'
def upgrade():
op.add_column('audit', sa.Column('subject_id', sa.Integer()))
op.create_foreign_key('audit_subject_id_fkey', 'audit',
'users', ['subject_id'], ['id'])
op.add_column('audit', sa.Column('context', sa.Text(), nullable=True))
# copying user_id to subject_id for existing audit rows
bind = op.get_bind()
session = Session(bind=bind)
for audit in session.query(Audit):
# use user_id as subject_id by default
audit.subject_id = audit.user_id
# use 'other' as context by default
audit.context = "other"
if audit.comment:
# if comment references changed user, use that as subject_id
audit_comment_list = audit.comment.lower().split()
if ("user" in audit_comment_list and
len(audit_comment_list) > audit_comment_list.index("user") +
1):
subj_id = audit_comment_list[audit_comment_list.index(
"user") + 1]
if subj_id.isdigit() and session.query(User).filter_by(id=subj_id).first():
audit.subject_id = int(subj_id)
# if possible, use context extracted from comment
audit.context = extract_context(audit.comment.lower())
session.commit()
op.alter_column('audit', 'subject_id', nullable=False)
op.alter_column('audit', 'context', nullable=False)
def downgrade():
op.drop_column('audit', 'context')
op.drop_constraint('audit_subject_id_fkey', 'audit', type_='foreignkey')
op.drop_column('audit', 'subject_id')
| 33.28866
| 91
| 0.59585
| 355
| 3,229
| 5.28169
| 0.4
| 0.0528
| 0.0448
| 0.0336
| 0.112
| 0.087467
| 0.0448
| 0.0448
| 0.0448
| 0
| 0
| 0.022623
| 0.260762
| 3,229
| 96
| 92
| 33.635417
| 0.762882
| 0.115825
| 0
| 0
| 0
| 0
| 0.255454
| 0.022167
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.174603
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbc06ee508b4f1069613557ce6ed45315a87cb10
| 639
|
py
|
Python
|
bisect/36003.py
|
simonjayhawkins/pandas
|
9f571c58d7796dac8fd1aa2301cf4aa30ad7143a
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1
|
2022-02-22T17:13:16.000Z
|
2022-02-22T17:13:16.000Z
|
bisect/36003.py
|
simonjayhawkins/pandas
|
9f571c58d7796dac8fd1aa2301cf4aa30ad7143a
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
bisect/36003.py
|
simonjayhawkins/pandas
|
9f571c58d7796dac8fd1aa2301cf4aa30ad7143a
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
import datetime
import pandas as pd
import pandas.testing as tm
print(pd.__version__)
df = pd.DataFrame(
{
"A": ["X", "Y"],
"B": [
datetime.datetime(2005, 1, 1, 10, 30, 23, 540000),
datetime.datetime(3005, 1, 1, 10, 30, 23, 540000),
],
}
)
print(df)
print(df.dtypes)
result = df.groupby("A").B.max()
print(result)
expected = pd.Series(
[
pd.Timestamp("2005-01-01 10:30:23.540000"),
datetime.datetime(3005, 1, 1, 10, 30, 23, 540000),
],
index=pd.Index(["X", "Y"], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
| 18.257143
| 62
| 0.56025
| 90
| 639
| 3.911111
| 0.4
| 0.045455
| 0.068182
| 0.136364
| 0.267045
| 0.267045
| 0.261364
| 0.261364
| 0.261364
| 0.261364
| 0
| 0.154812
| 0.251956
| 639
| 34
| 63
| 18.794118
| 0.58159
| 0
| 0
| 0.153846
| 0
| 0
| 0.064163
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 1
| 0
| false
| 0
| 0.115385
| 0
| 0.115385
| 0.153846
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbc90b64134d3ab6cf219527bed05e8373ac58a3
| 717
|
py
|
Python
|
tests/core/test_lc.py
|
atsuki-kuwata/jaxsot
|
7de5dd964c951661892c79e4447e9f314885a0a9
|
[
"MIT"
] | 2
|
2022-03-01T23:50:08.000Z
|
2022-03-22T15:25:34.000Z
|
tests/core/test_lc.py
|
atsuki-kuwata/jaxsot
|
7de5dd964c951661892c79e4447e9f314885a0a9
|
[
"MIT"
] | 8
|
2022-02-19T00:06:34.000Z
|
2022-03-31T00:09:54.000Z
|
tests/core/test_lc.py
|
atsuki-kuwata/jaxsot
|
7de5dd964c951661892c79e4447e9f314885a0a9
|
[
"MIT"
] | 1
|
2022-03-01T22:39:00.000Z
|
2022-03-01T22:39:00.000Z
|
""" test for lc
"""
import pytest
import numpy as np
from jaxsot.core.weight import comp_weight, comp_omega
from jaxsot.core.lc import gen_lightcurve
from jaxsot.io.earth import binarymap
def test_lc():
mmap=binarymap(nside=16,show=False)
nside=16
inc=0.0
Thetaeq=np.pi
zeta=np.pi/3.0
Pspin=23.9344699/24.0
wspin=2*np.pi/Pspin
Porb=40.0
worb=2.*np.pi/Porb
N=1024
obst=np.linspace(0.0,Porb,N)
Thetav=worb*obst
Phiv=np.mod(wspin*obst,2*np.pi)
omega=comp_omega(nside)
WI,WV=comp_weight(nside,zeta,inc,Thetaeq,Thetav,Phiv,omega)
W=WI*WV
lc=gen_lightcurve(W,mmap,0.0)
assert np.abs(np.sum(lc)-63856.86)<0.1
if __name__=="__main__":
test_lc()
| 21.727273
| 63
| 0.679219
| 131
| 717
| 3.59542
| 0.450382
| 0.042463
| 0.031847
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073005
| 0.178522
| 717
| 32
| 64
| 22.40625
| 0.726655
| 0.015342
| 0
| 0
| 0
| 0
| 0.011478
| 0
| 0
| 0
| 0
| 0
| 0.038462
| 1
| 0.038462
| false
| 0
| 0.192308
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbca0d47c796ae1504de61caad61827265203834
| 4,057
|
py
|
Python
|
src/squad/kinematics/base.py
|
douglasdaly/spot-robot
|
7a4fdd7eb5fe5fc2d31180ed6b9f7ea21647bea2
|
[
"MIT"
] | null | null | null |
src/squad/kinematics/base.py
|
douglasdaly/spot-robot
|
7a4fdd7eb5fe5fc2d31180ed6b9f7ea21647bea2
|
[
"MIT"
] | null | null | null |
src/squad/kinematics/base.py
|
douglasdaly/spot-robot
|
7a4fdd7eb5fe5fc2d31180ed6b9f7ea21647bea2
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict
from squad.config import config
from squad.exceptions import FrozenError
class BodyParameters:
"""
Storage class for (static) body data/parameters.
"""
__slots__ = (
"_frozen",
"body_length_units",
"body_angle_units",
"l_body",
"w_body",
"h_body",
"l_hip",
"l_femur",
"l_leg",
"l_rod",
"l_rod_arm",
"l_rod_femur",
"h_rod_femur",
"l_rod_leg",
"cm_dx",
"cm_dy",
"cm_dz",
"leg_alpha_min",
"leg_alpha_max",
"leg_beta_min",
"leg_beta_max",
"leg_gamma_min",
"leg_gamma_max",
)
def __init__(self, **kwargs: float) -> None:
self._frozen = False
self.body_angle_units = kwargs.pop(
"body_angle_units",
config.body_angle_units,
)
self.body_length_units = kwargs.pop(
"body_length_units",
config.body_length_units,
)
self.l_body = kwargs.pop("l_body", config.l_body)
self.w_body = kwargs.pop("w_body", config.w_body)
self.h_body = kwargs.pop("h_body", config.h_body)
self.l_hip = kwargs.pop("l_hip", config.l_hip)
self.l_femur = kwargs.pop("l_femur", config.l_femur)
self.l_leg = kwargs.pop("l_leg", config.l_leg)
self.l_rod = kwargs.pop("l_rod", config.l_rod)
self.l_rod_arm = kwargs.pop("l_rod_arm", config.l_rod_arm)
self.l_rod_femur = kwargs.pop("l_rod_femur", config.l_rod_femur)
self.h_rod_femur = kwargs.pop("h_rod_femur", config.h_rod_femur)
self.l_rod_leg = kwargs.pop("l_rod_leg", config.l_rod_leg)
self.cm_dx = kwargs.pop("cm_dx", config.cm_dx)
self.cm_dy = kwargs.pop("cm_dy", config.cm_dy)
self.cm_dz = kwargs.pop("cm_dz", config.cm_dz)
self.leg_alpha_min = kwargs.pop("leg_alpha_min", config.leg_alpha_min)
self.leg_alpha_max = kwargs.pop("leg_alpha_max", config.leg_alpha_max)
self.leg_beta_min = kwargs.pop("leg_beta_min", config.leg_beta_min)
self.leg_beta_max = kwargs.pop("leg_beta_max", config.leg_beta_max)
self.leg_gamma_min = kwargs.pop("leg_gamma_min", config.leg_gamma_min)
self.leg_gamma_max = kwargs.pop("leg_gamma_max", config.leg_gamma_max)
self._frozen = True
def __repr__(self) -> str:
return repr(self.__getstate__())
def __setattr__(self, __name: str, __value: Any) -> None:
if hasattr(self, "_frozen") and self._frozen:
raise FrozenError(
"BodyParameters objects are frozen and cannot be modified"
)
return super().__setattr__(__name, __value)
def __getitem__(self, key: str) -> float:
try:
return getattr(self, key)
except AttributeError:
raise KeyError(key)
def __getstate__(self) -> Dict[str, Any]:
state = {}
for name in (x for x in self.__slots__ if x != "_frozen"):
state[name] = getattr(self, name)
return state
def __setstate__(self, state: Dict[str, Any]) -> None:
object.__setattr__(self, "_frozen", False)
for k, v in state.items():
setattr(self, k, v)
object.__setattr__(self, "_frozen", True)
def to_dict(self) -> Dict[str, float]:
"""Gets the parameters for this body in dictionary form.
Returns
-------
dict
The data dictionary representation of this object's data.
"""
return self.__getstate__()
@classmethod
def from_dict(cls, data: Dict[str, float]) -> "BodyParameters":
"""Instantiates a new object from the given data.
Parameters
----------
data : dict
The data to use to create the new body parameters object.
Returns
-------
BodyParameters
The new instance of the body parameters from the `data`
given.
"""
return cls(**data)
| 32.198413
| 78
| 0.590338
| 536
| 4,057
| 4.083955
| 0.184701
| 0.090452
| 0.036546
| 0.023755
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.291348
| 4,057
| 125
| 79
| 32.456
| 0.761391
| 0.106729
| 0
| 0.045455
| 0
| 0
| 0.147668
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.034091
| 0.011364
| 0.215909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbcac82869e4955029c31c4d5ba367911fd7fe18
| 2,833
|
py
|
Python
|
deep_image_compression/single_psnr.py
|
LichengXiao2017/deep-image-compression
|
cf6e5699bad4d7b4a0dd8db6da72aa0c56e3d1e4
|
[
"MIT"
] | 9
|
2020-01-09T21:15:17.000Z
|
2022-02-08T12:41:54.000Z
|
deep_image_compression/single_psnr.py
|
LichengXiao2017/deep-image-compression
|
cf6e5699bad4d7b4a0dd8db6da72aa0c56e3d1e4
|
[
"MIT"
] | 8
|
2019-10-15T23:50:03.000Z
|
2021-11-10T19:40:15.000Z
|
deep_image_compression/single_psnr.py
|
LichengXiao2017/enas-image-compression
|
cf6e5699bad4d7b4a0dd8db6da72aa0c56e3d1e4
|
[
"MIT"
] | 3
|
2019-10-16T06:06:49.000Z
|
2020-07-06T15:02:09.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2019 Licheng Xiao. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy
import math
import cv2
import os
import logging
from os import listdir
from os.path import isfile, join
from absl import flags
flags.DEFINE_string("original_img", default=None,
help="Path for original image file.")
flags.DEFINE_string("compressed_img", default=None,
help="Path for compressed image file.")
flags.DEFINE_string("reconstructed_img", default=None,
help="Path for reconstructed image file.")
FLAGS = flags.FLAGS
class SingleEvaluator:
def get_psnr_msssim_bpp(self, original_img, reconstructed_img, compressed_img):
psnr = 0
msssim = 0
bpp = 0
try:
sess = tf.Session()
original = cv2.imread(original_img)
contrast = cv2.imread(reconstructed_img)
original = numpy.expand_dims(original, axis=0)
contrast = numpy.expand_dims(contrast, axis=0)
original_tensor = tf.convert_to_tensor(original, dtype=tf.uint8)
contrast_tensor = tf.convert_to_tensor(contrast, dtype=tf.uint8)
msssim_tensor = tf.image.ssim_multiscale(
original_tensor, contrast_tensor, 255)
psnr_tensor = tf.image.psnr(original_tensor, contrast_tensor, 255)
msssim = sess.run(msssim_tensor)
psnr = sess.run(psnr_tensor)
first, h, w, bpp = numpy.shape(contrast)
bpp = os.path.getsize(compressed_img) * 8 / (h * w)
except Exception as e:
logging.error(e)
if psnr == 0:
logging.error('Error occurs, please check log for details.')
else:
logging.info('psnr: ', psnr, '\n',
'ms_ssim: ', msssim, '\n',
'bpp: ', bpp)
return psnr, msssim, bpp
def main(_):
single_evaluator = SingleEvaluator()
single_evaluator.get_psnr_msssim_bpp(FLAGS.original_img,
FLAGS.reconstructed_img,
FLAGS.compressed_img)
if __name__ == "__main__":
tf.app.run()
| 37.276316
| 83
| 0.614896
| 342
| 2,833
| 4.947368
| 0.415205
| 0.035461
| 0.030142
| 0.031915
| 0.138889
| 0.044326
| 0
| 0
| 0
| 0
| 0
| 0.013094
| 0.27215
| 2,833
| 75
| 84
| 37.773333
| 0.807468
| 0.237204
| 0
| 0
| 0
| 0
| 0.098835
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.173077
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbcbbaa39a696df212fa670accc00b3ebb116dbd
| 984
|
py
|
Python
|
message/tests/utils/creators.py
|
ThePokerFaCcCe/messenger
|
2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c
|
[
"MIT"
] | null | null | null |
message/tests/utils/creators.py
|
ThePokerFaCcCe/messenger
|
2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c
|
[
"MIT"
] | null | null | null |
message/tests/utils/creators.py
|
ThePokerFaCcCe/messenger
|
2db3d5c2ccd05ac40d2442a13d664ca9ad3cb14c
|
[
"MIT"
] | null | null | null |
from message.models import Message, DeletedMessage, TextContent
from user.tests.utils import create_active_user
from conversation.tests.utils import create_private_chat
def create_text_content(text='hello') -> TextContent:
return TextContent.objects.create(text=text)
def create_message(sender=None, chat=None, content=None,
content_type=Message.ContentTypeChoices.TEXT
) -> Message:
sender = sender if sender else create_active_user()
data = {
'sender': sender,
'chat': chat if chat else create_private_chat(sender),
'content': content if content else create_text_content(),
'content_type': content_type,
}
return Message.objects.create(**data)
def create_deleted_msg(msg=None, user=None) -> DeletedMessage:
user = create_active_user() if not user else user
return DeletedMessage.objects.create(
message=msg if msg else create_message(sender=user),
user=user)
| 31.741935
| 65
| 0.707317
| 121
| 984
| 5.578512
| 0.247934
| 0.059259
| 0.071111
| 0.065185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204268
| 984
| 30
| 66
| 32.8
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0.034553
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0.047619
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbcbbc848a277e8f627eea2957cd5be0baa2e598
| 350
|
py
|
Python
|
excercises/6-0001/finger_excercises/iteration-1.py
|
obsessedyouth/simulacra
|
530155664daf1aff06cb575c4c4073acbacdb32d
|
[
"MIT"
] | null | null | null |
excercises/6-0001/finger_excercises/iteration-1.py
|
obsessedyouth/simulacra
|
530155664daf1aff06cb575c4c4073acbacdb32d
|
[
"MIT"
] | null | null | null |
excercises/6-0001/finger_excercises/iteration-1.py
|
obsessedyouth/simulacra
|
530155664daf1aff06cb575c4c4073acbacdb32d
|
[
"MIT"
] | null | null | null |
"""
Replace the comment in the following code with a while loop.
numXs = int(input('How many times should I print the letter X? '))
toPrint = "
#concatenate X to toPrint numXs times
print(toPrint)
"""
numXs = int(input('How many times should I print the letter X? '))
toPrint = ""
while numXs > 0:
toPrint += "X"
numXs -= 1
print(toPrint)
| 20.588235
| 66
| 0.677143
| 54
| 350
| 4.388889
| 0.481481
| 0.067511
| 0.109705
| 0.135021
| 0.455696
| 0.455696
| 0.455696
| 0.455696
| 0.455696
| 0.455696
| 0
| 0.007194
| 0.205714
| 350
| 16
| 67
| 21.875
| 0.845324
| 0.548571
| 0
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbd1b55c9769411d9a683ac06b192b84c0c94cde
| 2,479
|
py
|
Python
|
FlaskApp/flask_server.py
|
pjneelam/pjneelam.eportfolio2022
|
3f55c1da6214e3eabab949ff83b34c0553c52866
|
[
"CC-BY-3.0"
] | null | null | null |
FlaskApp/flask_server.py
|
pjneelam/pjneelam.eportfolio2022
|
3f55c1da6214e3eabab949ff83b34c0553c52866
|
[
"CC-BY-3.0"
] | null | null | null |
FlaskApp/flask_server.py
|
pjneelam/pjneelam.eportfolio2022
|
3f55c1da6214e3eabab949ff83b34c0553c52866
|
[
"CC-BY-3.0"
] | null | null | null |
#to create the flask page
#import flask
#flask library was installed in the command line/computer terminal first
#Source: PythonHow https://pythonhow.com/python-tutorial/flask/How-making-a-website-with-Python-works/
#Python assigns the name "__main__" to the script when the script is executed.
#The debug parameter is set to true, to trace Python errors.
# To note: in a production environment, it must be set to False to avoid any security issues.
#returning HTML in Flask, create a homepage.html in another folder
#add render_template method
from flask import Flask, render_template
#pip install flask-mysqldb in cmd
#from flask_mysqldb import MySQL
#from mysql.connector.connection import MySQLConnection
#from sql_connection import get_sql_connection
#connection with mysql not established
app = Flask(__name__)
@app.route('/')
#to go directly to the home page, add another route
@app.route('/homepage')
def homepage():
return render_template('homepage.html')
#add another page: market page
@app.route('/flask_server')
#this python file should have been called Market (like the webpage created!!!)
#add list / dictionaries
#Iteration will be necessary - access in html
def market():
items = [
{'product_id': 1, 'product_name': 'rice', 'unit_id': '2', 'product_price_unit': 1.65},
{'product_id': 2, 'product_name': 'toothpaste', 'unit_id': '1', 'product_price_unit': 1.40},
{'product_id': 3, 'product_name': 'soap', 'unit_id': '1', 'product_price_unit': 0.45},
{'product_id': 4, 'product_name': 'toothbrush', 'unit_id': '1', 'product_price_unit': 1.20},
{'product_id': 5, 'product_name': 'flour', 'unit_id': '2', 'product_price_unit': 0.90},
{'product_id': 6, 'product_name': 'facemask', 'unit_id': '1', 'product_price_unit': 2.95}
]
#send some random data from Python to market.html: add key name 'items'
return render_template('market.html', items=items)
if __name__ == '__main__':
app.run(debug=True)
#to style your web page, can use styling framework "Bootstrap" - https://getbootstrap.com/docs/4.5/getting-started/introduction/#starter-template
#copy and page in html page created
#IP/page set up: http://127.0.0.1:5000/
#page created: http://127.0.0.1:5000/market
#to synchonise your updates in the codes and the web page, RUN the program and check if Debug mode is on in the Terminal below
#to turn it on, run code: set FLASK_DEBUG=1
| 42.741379
| 146
| 0.709964
| 376
| 2,479
| 4.531915
| 0.422872
| 0.03169
| 0.056338
| 0.032864
| 0.098592
| 0.098592
| 0.028169
| 0
| 0
| 0
| 0
| 0.025716
| 0.168616
| 2,479
| 58
| 147
| 42.741379
| 0.801067
| 0.582493
| 0
| 0
| 0
| 0
| 0.403361
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0.052632
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbd4c608ff119a6e8725d951c6333bfee210d76b
| 9,913
|
py
|
Python
|
appsite/resolver/models.py
|
inchiresolver/inchiresolver
|
6b3f080a4364ebe7499298e5a1b3cd4ed165322d
|
[
"BSD-3-Clause"
] | 3
|
2020-10-22T06:18:17.000Z
|
2021-03-19T16:49:00.000Z
|
appsite/resolver/models.py
|
inchiresolver/inchiresolver
|
6b3f080a4364ebe7499298e5a1b3cd4ed165322d
|
[
"BSD-3-Clause"
] | 11
|
2019-11-01T23:04:31.000Z
|
2022-02-10T12:32:11.000Z
|
appsite/resolver/models.py
|
inchiresolver/inchiresolver
|
6b3f080a4364ebe7499298e5a1b3cd4ed165322d
|
[
"BSD-3-Clause"
] | null | null | null |
import uuid
from urllib.parse import urljoin
from django.core.exceptions import FieldError
from multiselectfield import MultiSelectField
from rdkit import Chem
from django.db import models
from resolver import defaults
from inchi.identifier import InChIKey, InChI
class Inchi(models.Model):
id = models.UUIDField(primary_key=True, editable=False)
version = models.IntegerField(db_index=True, default=1)
block1 = models.CharField(db_index=True, max_length=14)
block2 = models.CharField(db_index=True, max_length=10)
block3 = models.CharField(db_index=True, max_length=1)
key = models.CharField(max_length=27, blank=True, null=True)
string = models.CharField(max_length=32768, blank=True, null=True)
is_standard = models.BooleanField(default=False)
safe_options = models.CharField(db_index=True, max_length=2, default=None, null=True)
entrypoints = models.ManyToManyField('EntryPoint', related_name='inchis')
added = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class JSONAPIMeta:
resource_name = 'inchis'
class Meta:
unique_together = ('block1', 'block2', 'block3', 'version', 'safe_options')
verbose_name = "InChI"
@classmethod
def create(cls, *args, **kwargs):
if 'url_prefix' in kwargs:
url_prefix = kwargs['url_prefix']
inchiargs = kwargs.pop('url_prefix')
inchi = cls(*args, inchiargs)
else:
url_prefix = None
inchi = cls(*args, **kwargs)
k = None
s = None
if 'key' in kwargs and kwargs['key']:
k = InChIKey(kwargs['key'])
if 'string' in kwargs and kwargs['string']:
s = InChI(kwargs['string'])
_k = InChIKey(Chem.InchiToInchiKey(kwargs['string']))
if k:
if not k.element['well_formatted'] == _k.element['well_formatted']:
raise FieldError("InChI key does not represent InChI string")
else:
k = _k
inchi.key = k.element['well_formatted_no_prefix']
inchi.version = k.element['version']
inchi.is_standard = k.element['is_standard']
inchi.block1 = k.element['block1']
inchi.block2 = k.element['block2']
inchi.block3 = k.element['block3']
if s:
inchi.string = s.element['well_formatted']
#if url_prefix:
# inchi.id = uuid.uuid5(uuid.NAMESPACE_URL, urljoin(url_prefix, inchi.key))
#else:
inchi.id = uuid.uuid5(uuid.NAMESPACE_URL, "/".join([
inchi.key,
str(kwargs.get('safe_options', None)),
]))
return inchi
def __str__(self):
return self.key
class Organization(models.Model):
id = models.UUIDField(primary_key=True, editable=False)
parent = models.ForeignKey('self', related_name='children', on_delete=models.SET_NULL, blank=True, null=True)
name = models.CharField(max_length=32768)
abbreviation = models.CharField(max_length=32, blank=True, null=True)
category = models.CharField(max_length=16, choices=(
('regulatory', 'Regulatory'),
('government', 'Government'),
('academia', 'Academia'),
('company', 'Company'),
('vendor', 'Vendor'),
('research', 'Research'),
('publishing', 'Publishing'),
('provider', 'Provider'),
('public', 'Public'),
('society', "Society"),
('charity', "Charity"),
('other', 'Other'),
('none', 'None'),
), default='none')
href = models.URLField(max_length=4096, blank=True, null=True)
added = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class JSONAPIMeta:
resource_name = 'organizations'
class Meta:
unique_together = ('parent', 'name')
@classmethod
def create(cls, *args, **kwargs):
organization = cls(*args, **kwargs)
organization.id = uuid.uuid5(uuid.NAMESPACE_URL, kwargs.get('name'))
return organization
def __str__(self):
return self.name
class Publisher(models.Model):
id = models.UUIDField(primary_key=True, editable=False)
parent = models.ForeignKey('self', related_name='children', on_delete=models.SET_NULL, null=True)
organization = models.ForeignKey('Organization', related_name='publishers', on_delete=models.SET_NULL, null=True)
category = models.CharField(max_length=16, choices=(
('entity', 'Entity'),
('service', 'Service'),
('network', 'Network'),
('division', 'Division'),
('group', 'Group'),
('person', 'Person'),
('other', 'Other'),
('none', 'None'),
), default='none')
name = models.CharField(max_length=1024)
email = models.EmailField(max_length=254, blank=True, null=True)
address = models.CharField(max_length=8192, blank=True, null=True)
href = models.URLField(max_length=4096, blank=True, null=True)
orcid = models.URLField(max_length=4096, blank=True, null=True)
added = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class JSONAPIMeta:
resource_name = 'publishers'
class Meta:
unique_together = ('organization', 'parent', 'name', 'href', 'orcid')
@classmethod
def create(cls, *args, **kwargs):
publisher = cls(*args, **kwargs)
publisher.id = uuid.uuid5(uuid.NAMESPACE_URL, "/".join([
str(kwargs.get('organization', None)),
str(kwargs.get('parent', None)),
str(kwargs.get('href', None)),
str(kwargs.get('orcid', None)),
kwargs.get('name')
]))
return publisher
def __str__(self):
return "%s[%s]" % (self.name, self.category)
class EntryPoint(models.Model):
id = models.UUIDField(primary_key=True, editable=False)
parent = models.ForeignKey('self', on_delete=models.SET_NULL, related_name='children', null=True)
category = models.CharField(max_length=16, choices=(
('self', 'Self'),
('site', 'Site'),
('api', 'API'),
('resolver', 'Resolver'),
), default='site')
publisher = models.ForeignKey("Publisher", related_name="entrypoints", on_delete=models.SET_NULL, null=True)
href = models.URLField(max_length=4096)
entrypoint_href = models.URLField(max_length=4096, blank=True, null=True)
name = models.CharField(max_length=255, blank=True, null=True)
description = models.TextField(max_length=32768, blank=True, null=True)
added = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class JSONAPIMeta:
resource_name = 'entrypoints'
class Meta:
unique_together = ('parent', 'publisher', 'href')
@classmethod
def create(cls, *args, **kwargs):
entrypoint = cls(*args, **kwargs)
entrypoint.id = uuid.uuid5(uuid.NAMESPACE_URL, "/".join([
str(kwargs.get('parent', None)),
str(kwargs.get('publisher')),
kwargs.get('href'),
]))
return entrypoint
def __str__(self):
return "%s [%s]" % (self.publisher, self.href)
class EndPoint(models.Model):
id = models.UUIDField(primary_key=True, editable=False)
entrypoint = models.ForeignKey('EntryPoint', related_name='endpoints', on_delete=models.SET_NULL, null=True)
uri = models.CharField(max_length=32768)
accept_header_media_types = models.ManyToManyField('MediaType', related_name='accepting_endpoints')
content_media_types = models.ManyToManyField('MediaType', related_name='delivering_endpoints')
request_schema_endpoint = models.ForeignKey('EndPoint', related_name='schema_requesting_endpoints',
on_delete=models.SET_NULL, null=True)
response_schema_endpoint = models.ForeignKey('EndPoint', related_name='schema_responding_endpoints',
on_delete=models.SET_NULL, null=True)
category = models.CharField(max_length=16, choices=(
('schema', 'Schema'),
('uritemplate', 'URI Template (RFC6570)'),
('documentation', 'Documentation (HTML, PDF)'),
), default='uritemplate')
request_methods = MultiSelectField(choices=defaults.http_verbs, default=['GET'])
description = models.TextField(max_length=32768, blank=True, null=True)
added = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class JSONAPIMeta:
resource_name = 'endpoints'
class Meta:
unique_together = ('entrypoint', 'uri')
def full_path_uri(self):
if self.entrypoint:
return self.entrypoint.href + "/" + self.uri
else:
return self.uri
@classmethod
def create(cls, *args, **kwargs):
endpoint = cls(*args, **kwargs)
endpoint.id = uuid.uuid5(uuid.NAMESPACE_URL, "/".join([
str(kwargs.get('entrypoint')),
kwargs.get('uri'),
]))
return endpoint
def __str__(self):
return "%s[%s]" % (self.entrypoint, self.uri)
class MediaType(models.Model):
id = models.UUIDField(primary_key=True, editable=False)
name = models.CharField(max_length=1024, blank=False, null=False, unique=True)
description = models.TextField(max_length=32768, blank=True, null=True)
added = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class JSONAPIMeta:
resource_name = 'mediatypes'
@classmethod
def create(cls, *args, **kwargs):
mediatype = cls(*args, **kwargs)
mediatype.id = uuid.uuid5(uuid.NAMESPACE_URL, "/".join([
str(kwargs.get('name'))
]))
return mediatype
def __str__(self):
return "%s" % self.name
| 36.988806
| 117
| 0.635226
| 1,115
| 9,913
| 5.497758
| 0.161435
| 0.038173
| 0.02969
| 0.038825
| 0.526591
| 0.496411
| 0.439804
| 0.368679
| 0.320065
| 0.29217
| 0
| 0.01436
| 0.227277
| 9,913
| 267
| 118
| 37.127341
| 0.785901
| 0.009684
| 0
| 0.336364
| 0
| 0
| 0.118708
| 0.007948
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059091
| false
| 0
| 0.036364
| 0.027273
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbd76e1f33835fcc21edddd8477a6604c70dcdb3
| 5,143
|
py
|
Python
|
src/core.py
|
z62060037/ArtStationDownloader
|
f6e8a657dfd3584cbf870470f1b19dc4edf54e92
|
[
"MIT"
] | 1
|
2019-04-19T10:14:49.000Z
|
2019-04-19T10:14:49.000Z
|
src/core.py
|
z62060037/ArtStationDownloader
|
f6e8a657dfd3584cbf870470f1b19dc4edf54e92
|
[
"MIT"
] | null | null | null |
src/core.py
|
z62060037/ArtStationDownloader
|
f6e8a657dfd3584cbf870470f1b19dc4edf54e92
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""内核方法
Copyright 2018-2019 Sean Feng(sean@FantaBlade.com)
"""
import os
import re
from concurrent import futures
from multiprocessing import cpu_count
from urllib.parse import urlparse
import pafy
import requests
class Core:
def log(self, message):
print(message)
def __init__(self, log_print=None):
if log_print:
global print
print = log_print
max_workers = cpu_count()*4
self.executor = futures.ThreadPoolExecutor(max_workers)
self.executor_video = futures.ThreadPoolExecutor(1)
self.root_path = None
self.futures = []
def download_file(self, url, file_path, file_name):
file_full_path = os.path.join(file_path, file_name)
if os.path.exists(file_full_path):
self.log('[Exist][image][{}]'.format(file_full_path))
else:
r = requests.get(url)
os.makedirs(file_path, exist_ok=True)
with open(file_full_path, "wb") as code:
code.write(r.content)
self.log('[Finish][image][{}]'.format(file_full_path))
def download_video(self, id, file_path):
file_full_path = os.path.join(file_path, "{}.{}".format(id, 'mp4'))
if os.path.exists(file_full_path):
self.log('[Exist][video][{}]'.format(file_full_path))
else:
video = pafy.new(id)
best = video.getbest(preftype="mp4")
r = requests.get(best.url)
os.makedirs(file_path, exist_ok=True)
with open(file_full_path, "wb") as code:
code.write(r.content)
self.log('[Finish][video][{}]'.format(file_full_path))
def download_project(self, hash_id):
url = 'https://www.artstation.com/projects/{}.json'.format(hash_id)
r = requests.get(url)
j = r.json()
assets = j['assets']
title = j['slug'].strip()
# self.log('=========={}=========='.format(title))
username = j['user']['username']
for asset in assets:
assert(self.root_path)
user_path = os.path.join(self.root_path, username)
os.makedirs(user_path, exist_ok=True)
file_path = os.path.join(user_path, title)
if not self.no_image and asset['has_image']: # 包含图片
url = asset['image_url']
file_name = urlparse(url).path.split('/')[-1]
try:
self.futures.append(self.executor.submit(self.download_file,
url, file_path, file_name))
except Exception as e:
print(e)
if not self.no_video and asset['has_embedded_player']: # 包含视频
player_embedded = asset['player_embedded']
id = re.search(
r'(?<=https://www\.youtube\.com/embed/)[\w_]+', player_embedded).group()
try:
self.futures.append(self.executor_video.submit(
self.download_video, id, file_path))
except Exception as e:
print(e)
def get_projects(self, username):
data = []
if username is not '':
page = 0
while True:
page += 1
url = 'https://www.artstation.com/users/{}/projects.json?page={}'.format(
username, page)
r = requests.get(url)
if not r.ok:
self.log("[Error] Please input right username")
break
j = r.json()
total_count = int(j['total_count'])
if total_count == 0:
self.log("[Error] Please input right username")
break
if page is 1:
self.log('\n==========[{}] BEGIN=========='.format(username))
data_fragment = j['data']
data += data_fragment
self.log('\n==========Get page {}/{}=========='.format(page,
total_count // 50 + 1))
if page > total_count / 50:
break
return data
def download_by_username(self, username):
data = self.get_projects(username)
if len(data) is not 0:
future_list = []
for project in data:
future = self.executor.submit(
self.download_project, project['hash_id'])
future_list.append(future)
futures.wait(future_list)
def download_by_usernames(self, usernames, type):
self.no_image = type == 'video'
self.no_video = type == 'image'
# 去重与处理网址
username_set = set()
for username in usernames:
username = username.strip().split('/')[-1]
if username not in username_set:
username_set.add(username)
self.download_by_username(username)
futures.wait(self.futures)
self.log("\n========ALL DONE========")
| 37.816176
| 92
| 0.517986
| 577
| 5,143
| 4.448873
| 0.247834
| 0.029996
| 0.046747
| 0.021815
| 0.298403
| 0.22127
| 0.155045
| 0.155045
| 0.099727
| 0.099727
| 0
| 0.007503
| 0.352129
| 5,143
| 135
| 93
| 38.096296
| 0.762905
| 0.028194
| 0
| 0.224138
| 0
| 0
| 0.101063
| 0
| 0
| 0
| 0
| 0
| 0.008621
| 1
| 0.068966
| false
| 0
| 0.060345
| 0
| 0.146552
| 0.060345
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbdb59840ecfefbcddcb7e8ef4a69bf99648feb6
| 1,953
|
py
|
Python
|
notebooks/debug_monuseg.py
|
voreille/2d_bispectrum_cnn
|
ba8f26f6a557602bc3343c4562c83a3de914c67e
|
[
"MIT"
] | null | null | null |
notebooks/debug_monuseg.py
|
voreille/2d_bispectrum_cnn
|
ba8f26f6a557602bc3343c4562c83a3de914c67e
|
[
"MIT"
] | null | null | null |
notebooks/debug_monuseg.py
|
voreille/2d_bispectrum_cnn
|
ba8f26f6a557602bc3343c4562c83a3de914c67e
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import numpy as np
from PIL import Image, ImageSequence
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_io as tfio
from scipy.ndimage import rotate
from src.data.monuseg import get_dataset, tf_random_rotate, tf_random_crop
ds = get_dataset()
def random_crop(image, segmentation, size=(256, 256), rotation=False):
image_height, image_width, _ = image.shape
radius = np.sqrt(size[0]**2 + size[1]**2) / 2
if rotation:
angle = np.random.uniform(-180, 180)
dx = int((2 * radius - size[0]) // 2)
dy = int((2 * radius - size[1]) // 2)
else:
dx, dy = 0, 0
offset_height = np.random.randint(dx, high=image_height - size[0] - dx)
offset_width = np.random.randint(dy, high=image_width - size[1] - dy)
if rotation:
image_cropped = image[offset_height - dx:offset_height + dx + size[0],
offset_width - dy:offset_width + dy + size[1]]
seg_cropped = segmentation[offset_height - dx:offset_height + dx +
size[0], offset_width - dy:offset_width +
dy + size[1]]
image_rotated = rotate(image_cropped, angle, reshape=False, order=1)
seg_rotated = rotate(seg_cropped, angle, reshape=False, order=1)
seg_rotated = tf.where(seg_rotated > 0.5, x=1.0, y=0.0)
return (
image_rotated[dx:dx + size[0], dy:dy + size[1]],
seg_rotated[dx:dx + size[0], dy:dy + size[1]],
)
else:
return (image[offset_height:offset_height + size[0],
offset_width:offset_width + size[1]],
segmentation[offset_height:offset_height + size[0],
offset_width:offset_width + size[1]])
image, mask = next(ds.as_numpy_iterator())
image, mask = random_crop(image, mask, rotation=True)
print(f"yo la shape de liamg cest {image.shape}")
| 39.06
| 78
| 0.612391
| 273
| 1,953
| 4.21978
| 0.271062
| 0.039063
| 0.048611
| 0.055556
| 0.321181
| 0.321181
| 0.321181
| 0.321181
| 0.251736
| 0.208333
| 0
| 0.032982
| 0.270353
| 1,953
| 50
| 79
| 39.06
| 0.775439
| 0
| 0
| 0.097561
| 0
| 0
| 0.019959
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.195122
| 0
| 0.268293
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbddc18cfd9ad9a4f3ecf0a646125463caad5351
| 870
|
py
|
Python
|
investmap/migrations/0004_investmapdescriptiontabs.py
|
30Meridian/RozumneMistoSnapshot
|
67a83b3908674d01992561dfb37596e395b4d482
|
[
"BSD-3-Clause"
] | null | null | null |
investmap/migrations/0004_investmapdescriptiontabs.py
|
30Meridian/RozumneMistoSnapshot
|
67a83b3908674d01992561dfb37596e395b4d482
|
[
"BSD-3-Clause"
] | null | null | null |
investmap/migrations/0004_investmapdescriptiontabs.py
|
30Meridian/RozumneMistoSnapshot
|
67a83b3908674d01992561dfb37596e395b4d482
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import ckeditor_uploader.fields
class Migration(migrations.Migration):
dependencies = [
('weunion', '0001_initial'),
('investmap', '0003_auto_20161205_1926'),
]
operations = [
migrations.CreateModel(
name='InvestMapDescriptionTabs',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
('slug', models.CharField(unique=True, max_length=32)),
('description', ckeditor_uploader.fields.RichTextUploadingField()),
('town', models.ForeignKey(to='weunion.Town')),
],
options={
'db_table': 'investmap_descriptions',
},
),
]
| 30
| 114
| 0.589655
| 75
| 870
| 6.613333
| 0.706667
| 0.064516
| 0.08871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036741
| 0.28046
| 870
| 28
| 115
| 31.071429
| 0.755591
| 0.024138
| 0
| 0
| 0
| 0
| 0.165289
| 0.081464
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.136364
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbe19a963ebbfb67a87214650a7ef4c055ac8952
| 2,071
|
py
|
Python
|
render/model.py
|
kennyngod/portfoliov2
|
3b931c35c342bfeea18fd2d97eadc65ed57c56a7
|
[
"CC-BY-3.0"
] | 1
|
2022-02-22T07:19:16.000Z
|
2022-02-22T07:19:16.000Z
|
render/model.py
|
kennyngod/portfoliov2
|
3b931c35c342bfeea18fd2d97eadc65ed57c56a7
|
[
"CC-BY-3.0"
] | null | null | null |
render/model.py
|
kennyngod/portfoliov2
|
3b931c35c342bfeea18fd2d97eadc65ed57c56a7
|
[
"CC-BY-3.0"
] | null | null | null |
"""Mostly helper functions to help with the driver."""
import json
import os
import pathlib
import sqlite3
import arrow # type: ignore
def sql_db():
"""Open a SQL connection and perform a query."""
db_path = pathlib.Path(os.getcwd())
db_path = pathlib.Path(db_path/'sql'/'portfolio.sqlite3')
con = sqlite3.connect(str(db_path))
cur = con.cursor()
skills = cur.execute(
"SELECT * "
"FROM skills "
"ORDER BY meter DESC"
)
skills = cur.fetchall()
con.close()
return skills
def create_json():
"""Create a context JSON file to render to jinja."""
skills_db = sql_db()
skills = []
for skill_db in skills_db:
skill = {}
skill['language'] = skill_db[0]
skill['time'] = get_time(skill_db[1])
skill['proficiency'] = skill_db[2]
skill['meter'] = skill_db[3]
skill['description'] = skill_db[4]
skill['filelink'] = skill_db[5]
if skill_db[6]:
skill['framework'] = skill_db[6]
skills.append(skill)
context = {"skills": skills}
data = {"template": "index.html", "context": context}
data = json.dumps(data)
# write to json file
path = pathlib.Path(os.getcwd())
path = str(path/'render/context.json')
with open(path, 'w+', encoding='utf-8') as outfile:
outfile.write(data)
def get_time(db_time):
"""Calculate the time difference from now to start time in database."""
now = arrow.now().format("YYYY-MM-DD")
arr_now = now.split('-')
arr_time = db_time.split('-')
time_diff = []
for time_now, time_time in zip(arr_now, arr_time):
time_now = int(time_now)
time_time = int(time_time)
diff = abs(time_now - time_time)
time_diff.append(round(diff))
# dont care about day
# check year
if time_diff[0] != 0:
if time_diff[0] == 1:
return f'{time_diff[0]} year'
return f'{time_diff[0]} years'
if time_diff[1] == 1:
return f'{time_diff[1]} month'
return f'{time_diff[1]} months'
| 29.169014
| 75
| 0.598262
| 291
| 2,071
| 4.109966
| 0.364261
| 0.06689
| 0.0301
| 0.050167
| 0.093645
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014342
| 0.259295
| 2,071
| 70
| 76
| 29.585714
| 0.765319
| 0.129406
| 0
| 0
| 0
| 0
| 0.14896
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053571
| false
| 0
| 0.089286
| 0
| 0.232143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbe3022d59c8c55da8fc827a792032bc2f9f1ed9
| 1,053
|
py
|
Python
|
codes/exact/matStuff/lambdaFlucSparseSysRep.py
|
joshuahellier/PhDStuff
|
6fbe9e507c40e9017cde9312b0cfcc6ceefa284e
|
[
"MIT"
] | null | null | null |
codes/exact/matStuff/lambdaFlucSparseSysRep.py
|
joshuahellier/PhDStuff
|
6fbe9e507c40e9017cde9312b0cfcc6ceefa284e
|
[
"MIT"
] | null | null | null |
codes/exact/matStuff/lambdaFlucSparseSysRep.py
|
joshuahellier/PhDStuff
|
6fbe9e507c40e9017cde9312b0cfcc6ceefa284e
|
[
"MIT"
] | null | null | null |
import subprocess
import sys
import os
import math
# This code is meant to manage running multiple instances of my KMCLib codes at the same time,
# in the name of time efficiency
numLambda = 256
sysSize = 5
numVecs = 1
dataLocation = "exactSolns/thesisCorrections/low"
lambdaMin = 10.0**(-4)
lambdaMax = 10.0**(4)
rateStepSize = (lambdaMax-lambdaMin)/float(numLambda-1)
jobIndex = 513
botConc = 0.3
topConc = 0.1
boundMult = 1000.0
tolerance = 10.0**(-18)
runningJobs = []
for rateIndex in range(0, numLambda):
tempRate = lambdaMin + rateStepSize*rateIndex
# currentRate = tempRate
currentRate = math.exp(((tempRate-lambdaMin)*math.log(lambdaMax)+(lambdaMax-tempRate)*math.log(lambdaMin))/(lambdaMax-lambdaMin))
jobInput = "simpleGroundStateFinder.py "+str(botConc)+" "+str(topConc)+" "+str(currentRate)+" "+str(sysSize)+" "+str(numVecs)+" "+str(boundMult)+" "+str(tolerance)+" "+str(1)+" "+dataLocation+str(rateIndex)+"\n"
with open("jobInputs/testInput."+str(jobIndex), 'w') as f:
f.write(jobInput)
jobIndex += 1
| 33.967742
| 215
| 0.706553
| 135
| 1,053
| 5.511111
| 0.533333
| 0.012097
| 0.010753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037569
| 0.140551
| 1,053
| 30
| 216
| 35.1
| 0.78453
| 0.1415
| 0
| 0
| 0
| 0
| 0.1
| 0.064444
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbe58ee718d54a29743fdde56951e945cc81bce6
| 378
|
py
|
Python
|
examples/chart-types/pie_chart.py
|
tcbegley/dash-google-charts
|
b8b22e5b6bac533167f218e3610697dec0c3e4ca
|
[
"Apache-2.0"
] | 6
|
2019-01-23T17:37:09.000Z
|
2020-11-17T16:12:27.000Z
|
examples/chart-types/pie_chart.py
|
tcbegley/dash-google-charts
|
b8b22e5b6bac533167f218e3610697dec0c3e4ca
|
[
"Apache-2.0"
] | 9
|
2019-01-25T11:09:17.000Z
|
2022-02-26T09:10:04.000Z
|
examples/chart-types/pie_chart.py
|
tcbegley/dash-google-charts
|
b8b22e5b6bac533167f218e3610697dec0c3e4ca
|
[
"Apache-2.0"
] | 1
|
2019-01-23T17:37:12.000Z
|
2019-01-23T17:37:12.000Z
|
import dash
from dash_google_charts import PieChart
app = dash.Dash()
app.layout = PieChart(
height="500px",
data=[
["Task", "Hours per Day"],
["Work", 11],
["Eat", 2],
["Commute", 2],
["Watch TV", 2],
["Sleep", 7],
],
options={"title": "My Daily Activities"},
)
if __name__ == "__main__":
app.run_server()
| 18
| 45
| 0.518519
| 43
| 378
| 4.302326
| 0.790698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033708
| 0.293651
| 378
| 20
| 46
| 18.9
| 0.659176
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.117647
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbe8dbb33350754634ad5a39bc45f35bec1cec43
| 4,183
|
py
|
Python
|
threats-monitoring/modules/thehive.py
|
filippostz/McAfee-MVISION-EDR-Integrations
|
0fbe1af15f844b796337ccd2ff219a0c4e625846
|
[
"Apache-2.0"
] | null | null | null |
threats-monitoring/modules/thehive.py
|
filippostz/McAfee-MVISION-EDR-Integrations
|
0fbe1af15f844b796337ccd2ff219a0c4e625846
|
[
"Apache-2.0"
] | null | null | null |
threats-monitoring/modules/thehive.py
|
filippostz/McAfee-MVISION-EDR-Integrations
|
0fbe1af15f844b796337ccd2ff219a0c4e625846
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Written by mohlcyber v.0.1 (15.04.2020)
# Edited by filippostz v.0.2 (24.09.2021)
import random
import sys
import socket
import requests
import json
import re
import smtplib
from datetime import datetime
from urllib.parse import urljoin
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
#Used for comments in Cases and Email
EDR_URL = 'https://ui.soc.eu-central-1.mcafee.com/monitoring/'
class TheHive():
def __init__(self, kwargs):
self.base_url = kwargs.get('url')
self.port = kwargs.get('port')
self.session = requests.Session()
self.verify = False
token = kwargs.get('token')
self.headers = {'Authorization': 'Bearer {0}'.format(token),
'Content-Type': 'application/json'}
self.artifacts = []
def create(self, event, eventType = "case"):
if eventType != "case" and eventType != "alert":
return 1
else:
try:
name = str(event['name'])
edr_severity = str(event['severity'])
if edr_severity == 's4' or edr_severity == 's5':
severity = 3
elif edr_severity == 's2' or edr_severity == 's3':
severity = 2
else:
severity = 1
self.artifacts.append(event['hashes']['md5'])
self.artifacts.append(event['hashes']['sha1'])
self.artifacts.append(event['hashes']['sha256'])
payload = {
'title': 'MVISION EDR Threat Detection - {0}'.format(name),
'description': 'This case has been created by MVISION EDR',
'severity': severity,
'type':'Detection',
'source':'edr',
'sourceRef':'ref-' + str(random.randint(10000, 99000)),
'tlp': 3,
'tags': ['edr', 'threat']
}
print('{0}:{1}/thehive/api/{2}'.format(self.base_url, self.port, eventType))
res = self.session.post('{0}:{1}/thehive/api/{2}'.format(self.base_url, self.port, eventType),
headers=self.headers, data=json.dumps(payload), verify=self.verify)
if res.ok:
print('SUCCESS: Successfully created case in TheHive - {0}.'.format(str(self.base_url)))
eventId = res.json()['id']
for artifact in self.artifacts:
self.add_observable(eventId, eventType, artifact)
else:
print('ERROR: HTTP {0} - {1}'.format(str(res.status_code), res.content))
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print("ERROR: SNOW Error in {location}.{funct_name}() - line {line_no} : {error}"
.format(location=__name__, funct_name=sys._getframe().f_code.co_name, line_no=exc_tb.tb_lineno,
error=str(e)))
def add_observable(self, eventId, eventType, artifact):
try:
payload = {
'dataType': 'hash',
'data': artifact,
'ioc': True,
'tlp': 3,
'tags': ['edr', 'threat'],
'message': 'MVISION EDR Threat Detection'
}
print('{0}:{1}/thehive/api/{2}/{3}/artifact'.format(self.base_url, self.port, eventType, str(eventId)))
self.session.post('{0}:{1}/thehive/api/{2}/{3}/artifact'.format(self.base_url, self.port, eventType, str(eventId)),
headers=self.headers, data=json.dumps(payload), verify=self.verify)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print("ERROR: SNOW Error in {location}.{funct_name}() - line {line_no} : {error}"
.format(location=__name__, funct_name=sys._getframe().f_code.co_name, line_no=exc_tb.tb_lineno,
error=str(e)))
def run(self, event):
self.create(event)
| 41.83
| 127
| 0.531198
| 469
| 4,183
| 4.624733
| 0.336887
| 0.005533
| 0.030429
| 0.02213
| 0.394652
| 0.337483
| 0.332872
| 0.332872
| 0.319041
| 0.319041
| 0
| 0.023656
| 0.333015
| 4,183
| 99
| 128
| 42.252525
| 0.753763
| 0.032752
| 0
| 0.253012
| 0
| 0
| 0.182133
| 0.041574
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048193
| false
| 0
| 0.13253
| 0
| 0.204819
| 0.072289
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbe9079acec9fc7d47c390d5c89d9f262c9f1f50
| 518
|
py
|
Python
|
src/config.py
|
psu-os-rss/Rock-Paper-and-Scissors
|
05e9f51978cae1f05c9f06a71d9822ccfedbc5e1
|
[
"MIT"
] | null | null | null |
src/config.py
|
psu-os-rss/Rock-Paper-and-Scissors
|
05e9f51978cae1f05c9f06a71d9822ccfedbc5e1
|
[
"MIT"
] | 6
|
2020-08-03T20:55:44.000Z
|
2020-08-13T22:03:13.000Z
|
src/config.py
|
psu-os-rss/Rock-Paper-and-Scissors
|
05e9f51978cae1f05c9f06a71d9822ccfedbc5e1
|
[
"MIT"
] | null | null | null |
#parameters
accumulated_weight = 0.5
detector_u = 50
detector_b = 350
detector_r = 300
detector_l = 600
message_x = 10
message_y = 400
date_x = 0
date_y = 450
threshold_min=22
rate = 0.8
RGB_INT_MAX = 255
RGB_INT_MIN = 0
RGB_FLT_MAX = 255.0
RGB_FLT_MIN = 0.0
Blur_value = 7
text_color = (200,50,150)
rectangle_color = (0,0,255)
rectangle_thickness = 5
processing_frame = 35
font_scale = 0.7
thickness = 2
cv2adaptive_block = 11
cv2adaptive_param = 2
erodtime = 1
dilatetime = 2
circle_thickness = 10
circle_rate = 0.25
| 17.862069
| 27
| 0.758687
| 95
| 518
| 3.842105
| 0.578947
| 0.027397
| 0.038356
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155963
| 0.158301
| 518
| 29
| 28
| 17.862069
| 0.681193
| 0.019305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbe94404de84755169d02669d387f24583e7d3f0
| 1,309
|
py
|
Python
|
ejercicios/arreglos/perimetro.py
|
leugimkm/Soluciones
|
d71601c8d9b5e86e926f48d9e49462af8a956b6d
|
[
"MIT"
] | 1
|
2022-02-02T04:44:56.000Z
|
2022-02-02T04:44:56.000Z
|
ejercicios/arreglos/perimetro.py
|
leugimkm/Soluciones
|
d71601c8d9b5e86e926f48d9e49462af8a956b6d
|
[
"MIT"
] | null | null | null |
ejercicios/arreglos/perimetro.py
|
leugimkm/Soluciones
|
d71601c8d9b5e86e926f48d9e49462af8a956b6d
|
[
"MIT"
] | null | null | null |
"""AyudaEnPython: https://www.facebook.com/groups/ayudapython
Genere una matriz de 25 x 40 con números decimales al azar entre 0 y 1.
Mostrar los numeros del perimetro y calcularlo.
"""
from random import random
from prototools import show_matrix
def solver_a():
"""
>>> solver_a()
[1, 2, 3, 4, 5, 16, 17, 18, 19, 20, 6, 11, 10, 15]
147
"""
#arr = [[random() for _ in range(40)] for _ in range(25)]
arr = [
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
]
r_arr = list(map(list, zip(*arr)))
perimetro = [*arr[0], *arr[-1], *r_arr[0][1:-1], *r_arr[-1][1:-1]]
print(perimetro)
print(sum(perimetro))
def solver_b():
arr = [
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
]
r_arr = list(map(list, zip(*arr)))
perimetro = [*arr[0], *arr[-1], *r_arr[0][1:-1], *r_arr[-1][1:-1]]
t = [[0 for _ in range(5)] for _ in range(4)]
t[0] = arr[0]
t[-1] = arr[-1]
for i in range(1, len(t[0]) - 1):
t[i][0] = r_arr[0][i]
t[i][-1] = r_arr[-1][i]
show_matrix(t)
print(sum(perimetro))
if __name__ == "__main__":
import doctest
doctest.testmod()
# solver_a()
# solver_b()
| 23.375
| 71
| 0.50573
| 221
| 1,309
| 2.873303
| 0.348416
| 0.050394
| 0.03937
| 0.018898
| 0.294488
| 0.270866
| 0.270866
| 0.270866
| 0.270866
| 0.270866
| 0
| 0.135974
| 0.286478
| 1,309
| 55
| 72
| 23.8
| 0.543897
| 0.250573
| 0
| 0.484848
| 0
| 0
| 0.00843
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.090909
| 0
| 0.151515
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbec39b874803a9ced574ab89af24276b12b55c2
| 4,698
|
py
|
Python
|
process.py
|
bisi-dev/wa-analytics
|
a657fd793a59fa551d5755877c4e6c814bc3d17c
|
[
"Apache-2.0"
] | 1
|
2022-01-09T21:57:56.000Z
|
2022-01-09T21:57:56.000Z
|
process.py
|
bisi-dev/wa-analytics
|
a657fd793a59fa551d5755877c4e6c814bc3d17c
|
[
"Apache-2.0"
] | null | null | null |
process.py
|
bisi-dev/wa-analytics
|
a657fd793a59fa551d5755877c4e6c814bc3d17c
|
[
"Apache-2.0"
] | null | null | null |
# import modules
import os
import re
import glob
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from wordcloud import WordCloud
class Analyse:
# Data Cleaning Function
def raw_to_df(self, file, key):
global df
# Time formatting
split_formats = {
"12hr": "\d{1,2}/\d{1,2}/\d{2,4},\s\d{1,2}:\d{2}\s[APap][mM]\s-\s",
"24hr": "\d{1,2}/\d{1,2}/\d{2,4},\s\d{1,2}:\d{2}\s-\s",
"custom": "",
}
datetime_formats = {
"12hr": "%m/%d/%y, %I:%M %p - ",
"24hr": "%m/%d/%y, %H:%M - ",
"custom": "",
}
with open(file, "r", encoding="utf8") as raw_data:
# Converting the list split by newline char as one whole string
# As there can be multi-line messages
raw_string = " ".join(raw_data.read().split("\n"))
# Splits at all the date-time pattern,
# resulting in list of all the messages with user names
user_msg = re.split(split_formats[key], raw_string)[1:]
# Finds all the date-time patterns
date_time = re.findall(split_formats[key], raw_string)
# Export it to a df
df = pd.DataFrame({"date_time": date_time, "user_msg": user_msg})
# Converting date-time pattern which is of type String to datetime,
# Format is to be specified for the whole string
# where the placeholders are extracted by the method
df["date_time"] = pd.to_datetime(
df["date_time"], format=datetime_formats[key]
)
# Split user and msg
usernames = []
msgs = []
for i in df["user_msg"]:
# Lazy pattern match to first {user_name}
# pattern and splitting each msg from a user
a = re.split("([\w\W]+?):\s", i)
# User typed messages
if a[1:]:
usernames.append(a[1])
msgs.append(a[2])
# Other notifications in the group(someone was added, some left...)
else:
usernames.append("grp_notif")
msgs.append(a[0])
# Creating new columns
df["user"] = usernames
df["msg"] = msgs
# Dropping the old user_msg col.
df.drop("user_msg", axis=1, inplace=True)
# Group Notifications
grp_notif = df[df["user"] == "grp_notif"]
# Media
# no. of images, images are represented by <media omitted>
media = df[df["msg"] == "<Media omitted> "]
# removing images
df.drop(media.index, inplace=True)
# removing grp_notif
df.drop(grp_notif.index, inplace=True)
# Reset Index
df.reset_index(inplace=True, drop=True)
return df
# Function to get total sum of messages in chat.
def messages_count(self):
return df.shape[0] - 1
# Function to get total sum of people with message frequency in chat.
def users_count(self):
msgs_per_user = df["user"].value_counts(sort=True)
df2 = msgs_per_user.to_frame()
df2.rename({"user": "FREQUENCY"}, axis=1, inplace=True)
return (df2.shape[0], df2)
# Function uses Wordcloud lib to create infographics on words used in chat.
def infographics(self):
# Version Control - Keep Directory Clean for repeated usage in server
# Check for Last Image file
list_of_files = glob.glob("./static/data/*.png")
latest_file = max(list_of_files, key=os.path.getctime)
# Get Filename without extension
basename, fileext = os.path.splitext(latest_file)
# Increase count
current = basename[14:]
v = re.findall("[0-9]+", current)
version = int(v[0])
version += 1
version = str(version)
# Rename it
current = re.sub("\d+", "", current)
current_file = current + version + fileext
# Delete Previous File
os.remove(latest_file)
# Comment out all previous code and switch to remove V.C
# current_file = "test.png"
comment_words = " "
for val in df.msg.values:
val = str(val)
tokens = val.split()
for i in range(len(tokens)):
tokens[i] = tokens[i].lower()
for words in tokens:
comment_words = comment_words + words + " "
wordcloud = WordCloud(
width=800, height=800, background_color="black", min_font_size=10
).generate(comment_words)
wordcloud.to_file("./static/data/" + current_file)
return current_file
| 29.923567
| 79
| 0.561303
| 616
| 4,698
| 4.186688
| 0.37013
| 0.024816
| 0.006979
| 0.009306
| 0.048856
| 0.030244
| 0.012408
| 0.012408
| 0.012408
| 0.012408
| 0
| 0.017263
| 0.321839
| 4,698
| 156
| 80
| 30.115385
| 0.792216
| 0.269902
| 0
| 0.025
| 0
| 0.025
| 0.104038
| 0.029472
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.1
| 0.0125
| 0.2125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbece8b4d9743d75c14096162d201bda457080e8
| 2,857
|
py
|
Python
|
server/rdp.py
|
husmen/Trajectory-Mapping
|
215d5a2c58482b7ddb168a50dd02c59ba285c8bd
|
[
"MIT"
] | 2
|
2019-08-06T07:28:45.000Z
|
2020-05-31T14:41:40.000Z
|
server/rdp.py
|
husmen/Trajectory-Mapping
|
215d5a2c58482b7ddb168a50dd02c59ba285c8bd
|
[
"MIT"
] | null | null | null |
server/rdp.py
|
husmen/Trajectory-Mapping
|
215d5a2c58482b7ddb168a50dd02c59ba285c8bd
|
[
"MIT"
] | 1
|
2019-01-07T10:14:50.000Z
|
2019-01-07T10:14:50.000Z
|
#!/usr/bin/python3
"""
rdp
Python implementation of the Ramer-Douglas-Peucker algorithm.
"""
import sys
import numpy as np
#from math import sqrt
#from functools import partial
from math import radians, cos, sin, asin, sqrt
if sys.version_info[0] >= 3:
xrange = range
def pl_dist(point, start, end):
"""
Calculates the distance from ``point`` to the line given
by the points ``start`` and ``end``.
:param point: a point
:type point: numpy array
:param start: a point of the line
:type start: numpy array
:param end: another point of the line
:type end: numpy array
"""
if np.all(np.equal(start, end)):
return np.linalg.norm(point - start)
return np.divide(
np.abs(np.linalg.norm(np.cross(end - start, start - point))),
np.linalg.norm(end - start))
def rdp_rec(M, epsilon, dist=pl_dist):
"""
Simplifies a given array of points.
Recursive version.
:param M: an array
:type M: numpy array
:param epsilon: epsilon in the rdp algorithm
:type epsilon: float
:param dist: distance function
:type dist: function with signature ``f(point, start, end)`` -- see :func:`rdp.pl_dist`
"""
dmax = 0.0
index = -1
for i in xrange(1, M.shape[0]):
d = dist(M[i], M[0], M[-1])
if d > dmax:
index = i
dmax = d
if dmax > epsilon:
r_1 = rdp_rec(M[:index + 1], epsilon, dist)
r_2 = rdp_rec(M[index:], epsilon, dist)
return np.vstack((r_1[:-1], r_2))
else:
return np.vstack((M[0], M[-1]))
def rdp(M, epsilon=0, dist=pl_dist):
"""
Simplifies a given array of points using the Ramer-Douglas-Peucker
algorithm.
Example:
>>> from rdp import rdp
>>> rdp([[1, 1], [2, 2], [3, 3], [4, 4]])
[[1, 1], [4, 4]]
This is a convenience wrapper around :func:`rdp.rdp_rec`
that detects if the input is a numpy array
in order to adapt the output accordingly. This means that
when it is called using a Python list as argument, a Python
list is returned, and in case of an invocation using a numpy
array, a NumPy array is returned.
Example:
>>> from rdp import rdp
>>> import numpy as np
>>> arr = np.array([1, 1, 2, 2, 3, 3, 4, 4]).reshape(4, 2)
>>> arr
array([[1, 1],
[2, 2],
[3, 3],
[4, 4]])
:param M: a series of points
:type M: numpy array with shape (n,d) where n is the number of points and d their dimension
:param epsilon: epsilon in the rdp algorithm
:type epsilon: float
:param dist: distance function
:type dist: function with signature ``f(point, start, end)`` -- see :func:`rdp.pl_dist`
"""
if "numpy" in str(type(M)):
return rdp_rec(M, epsilon, dist)
return rdp_rec(np.array(M), epsilon, dist).tolist()
| 26.453704
| 95
| 0.60098
| 445
| 2,857
| 3.822472
| 0.267416
| 0.047031
| 0.016461
| 0.007055
| 0.329218
| 0.22575
| 0.22575
| 0.22575
| 0.221046
| 0.159906
| 0
| 0.023501
| 0.270214
| 2,857
| 107
| 96
| 26.700935
| 0.792326
| 0.579979
| 0
| 0
| 0
| 0
| 0.005076
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.103448
| 0
| 0.413793
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbee0d1262c642ad50187e5394e6ab5c37bd528f
| 5,560
|
py
|
Python
|
tests/algorithms/test_gail.py
|
sony/nnabla-rl
|
6a9a91ac5363b8611e0c9f736590729952a8d460
|
[
"Apache-2.0"
] | 75
|
2021-06-14T02:35:19.000Z
|
2022-03-23T04:30:24.000Z
|
tests/algorithms/test_gail.py
|
sony/nnabla-rl
|
6a9a91ac5363b8611e0c9f736590729952a8d460
|
[
"Apache-2.0"
] | 2
|
2021-12-17T08:46:54.000Z
|
2022-03-15T02:04:53.000Z
|
tests/algorithms/test_gail.py
|
sony/nnabla-rl
|
6a9a91ac5363b8611e0c9f736590729952a8d460
|
[
"Apache-2.0"
] | 3
|
2021-06-15T13:32:57.000Z
|
2022-03-25T16:53:14.000Z
|
# Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import nnabla as nn
import nnabla_rl.algorithms as A
import nnabla_rl.environments as E
from nnabla_rl.replay_buffer import ReplayBuffer
class TestGAIL():
def setup_method(self):
nn.clear_parameters()
def _create_dummy_buffer(self, env, batch_size=5):
experiences = generate_dummy_experiences(env, batch_size)
dummy_buffer = ReplayBuffer()
dummy_buffer.append_all(experiences)
return dummy_buffer
def test_algorithm_name(self):
dummy_env = E.DummyContinuous()
dummy_buffer = self._create_dummy_buffer(dummy_env)
gail = A.GAIL(dummy_env, dummy_buffer)
assert gail.__name__ == 'GAIL'
def test_discrete_action_env_unsupported(self):
'''
Check that error occurs when training on discrete action env
'''
dummy_env = E.DummyDiscrete()
dummy_env = EpisodicEnv(dummy_env, min_episode_length=3)
dummy_buffer = self._create_dummy_buffer(dummy_env, batch_size=15)
config = A.GAILConfig()
with pytest.raises(Exception):
A.GAIL(dummy_env, dummy_buffer, config=config)
def test_run_online_training(self):
'''
Check that no error occurs when calling online training
'''
dummy_env = E.DummyContinuous()
dummy_env = EpisodicEnv(dummy_env, min_episode_length=3)
dummy_buffer = self._create_dummy_buffer(dummy_env, batch_size=15)
config = A.GAILConfig(num_steps_per_iteration=5,
pi_batch_size=5,
vf_batch_size=2,
discriminator_batch_size=2,
sigma_kl_divergence_constraint=10.0,
maximum_backtrack_numbers=50)
gail = A.GAIL(dummy_env, dummy_buffer, config=config)
gail.train_online(dummy_env, total_iterations=5)
def test_run_offline_training(self):
'''
Check that raising error when calling offline training
'''
dummy_env = E.DummyContinuous()
dummy_buffer = self._create_dummy_buffer(dummy_env)
gail = A.GAIL(dummy_env, dummy_buffer)
with pytest.raises(NotImplementedError):
gail.train_offline([], total_iterations=10)
def test_compute_eval_action(self):
dummy_env = E.DummyContinuous()
dummy_buffer = self._create_dummy_buffer(dummy_env)
gail = A.GAIL(dummy_env, dummy_buffer)
state = dummy_env.reset()
state = np.float32(state)
action = gail.compute_eval_action(state)
assert action.shape == dummy_env.action_space.shape
def test_parameter_range(self):
with pytest.raises(ValueError):
A.GAILConfig(gamma=-0.1)
with pytest.raises(ValueError):
A.GAILConfig(num_steps_per_iteration=-1)
with pytest.raises(ValueError):
A.GAILConfig(sigma_kl_divergence_constraint=-0.1)
with pytest.raises(ValueError):
A.GAILConfig(maximum_backtrack_numbers=-0.1)
with pytest.raises(ValueError):
A.GAILConfig(conjugate_gradient_damping=-0.1)
with pytest.raises(ValueError):
A.GAILConfig(conjugate_gradient_iterations=-5)
with pytest.raises(ValueError):
A.GAILConfig(vf_epochs=-5)
with pytest.raises(ValueError):
A.GAILConfig(vf_batch_size=-5)
with pytest.raises(ValueError):
A.GAILConfig(vf_learning_rate=-0.5)
with pytest.raises(ValueError):
A.GAILConfig(discriminator_learning_rate=-0.5)
with pytest.raises(ValueError):
A.GAILConfig(discriminator_batch_size=-5)
with pytest.raises(ValueError):
A.GAILConfig(policy_update_frequency=-5)
with pytest.raises(ValueError):
A.GAILConfig(discriminator_update_frequency=-5)
with pytest.raises(ValueError):
A.GAILConfig(adversary_entropy_coef=-0.5)
def test_latest_iteration_state(self):
'''
Check that latest iteration state has the keys and values we expected
'''
dummy_env = E.DummyContinuous()
dummy_buffer = self._create_dummy_buffer(dummy_env)
gail = A.GAIL(dummy_env, dummy_buffer)
gail._v_function_trainer_state = {'v_loss': 0.}
gail._discriminator_trainer_state = {'reward_loss': 1.}
latest_iteration_state = gail.latest_iteration_state
assert 'v_loss' in latest_iteration_state['scalar']
assert 'reward_loss' in latest_iteration_state['scalar']
assert latest_iteration_state['scalar']['v_loss'] == 0.
assert latest_iteration_state['scalar']['reward_loss'] == 1.
if __name__ == "__main__":
from testing_utils import EpisodicEnv, generate_dummy_experiences
pytest.main()
else:
from ..testing_utils import EpisodicEnv, generate_dummy_experiences
| 37.823129
| 77
| 0.673561
| 684
| 5,560
| 5.19883
| 0.277778
| 0.056243
| 0.071991
| 0.102362
| 0.475816
| 0.459505
| 0.426603
| 0.39342
| 0.292463
| 0.23622
| 0
| 0.013767
| 0.242266
| 5,560
| 146
| 78
| 38.082192
| 0.830287
| 0.15036
| 0
| 0.319588
| 0
| 0
| 0.018839
| 0
| 0
| 0
| 0
| 0
| 0.061856
| 1
| 0.092784
| false
| 0
| 0.082474
| 0
| 0.195876
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbefdf91c1e6ecf066af1879e3918f12b778aa84
| 11,398
|
py
|
Python
|
options_chain_pull.py
|
anupamsharma01/python
|
f415aa663c9e83ff8ab615da93a5a71ec877834b
|
[
"blessing"
] | 2
|
2020-12-25T22:30:52.000Z
|
2021-11-26T14:08:12.000Z
|
options_chain_pull.py
|
anupamsharma01/python_options_trade
|
f415aa663c9e83ff8ab615da93a5a71ec877834b
|
[
"blessing"
] | null | null | null |
options_chain_pull.py
|
anupamsharma01/python_options_trade
|
f415aa663c9e83ff8ab615da93a5a71ec877834b
|
[
"blessing"
] | 3
|
2020-04-10T15:00:10.000Z
|
2021-08-19T21:20:19.000Z
|
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import parse_qs
import requests
import ssl
import sys
import tdameritrade.auth #added as40183
import urllib
import urllib3 #as40183
from sys import argv
import pymysql.cursors
import datetime
import dateutil.relativedelta
import calendar
import time
import json
import ast
import pandas
import sqlite3
import string
import xlwt
import openpyxl
KEY = 'STOCKTIPS'
# Arguements
in_file = r'C:\Anupam\market\stock_options_api-master\trading_api\tdameritrade\my_programs\data\program_in.txt'
out_file=r'C:\Anupam\market\stock_options_api-master\trading_api\tdameritrade\my_programs\data\program_out.txt'
script='C:/Anupam/market/stock_options_api-master/trading_api/tdameritrade/my_programs/options_chain_pull.py'
debug = 'true'
f_in = open(in_file)
equity_list = f_in.readlines()
equity_list = [l.replace('\n','') for l in equity_list]
f_out = open(out_file,'w')
print ('EQUITY | CMP | 52WkRange', file=f_out)
#sqlite3 connection
connection = sqlite3.connect('C:\Anupam\Technical\sqlite\db\mydb.db')
cursor = connection.cursor()
create_sql = """CREATE TABLE IF NOT EXISTS chain (
equity text NOT NULL,
symbol text NOT NULL,
cmp real NOT NULL, --added from stocks
_52WkRange text NOT NULL, --added from stocks
strikePrice real NOT NULL,
last real NOT NULL,
bid real NOT NULL,
ask real NOT NULL,
bidSize real NOT NULL,
askSize real NOT NULL,
totalVolume real NOT NULL,
volatility real NOT NULL,
putCall text NOT NULL,
inTheMoney text NOT NULL,
daysToExpiration int NOT NULL,
timeValue real NOT NULL,
theoreticalVolatility real NOT NULL
);"""
drop_sql = "DROP TABLE CHAIN"
select_sql = "SELECT * FROM CHAIN"
delete_sql = "DELETE FROM CHAIN"
if (debug == 'true'):
print ('create_sql==',create_sql)
print ('delete_sql==',delete_sql)
#cursor.execute(drop_sql)
cursor.execute(create_sql)
cursor.execute(delete_sql)
connection.commit()
cursor.execute(select_sql)
row=cursor.fetchall()
print (row)
# Declare
#start = datetime.now()
args_list = []
count = str(250)
myFormat = "%Y-%m-%d"
today = datetime.date.today()
rd = dateutil.relativedelta.relativedelta(days=1, weekday=dateutil.relativedelta.FR)
next_friday = today + rd
if (debug == 'true'):
print ('today=',today)
print('next_friday=',str(next_friday))
#debug: Remove comment to use expiration of a future date
next_friday=today+datetime.timedelta(days=17)
print('next_friday=', str(next_friday))
#debug starts
#equity='AAPL'
count=40
start_date=next_friday
#active_day variables start - syncup from excel_pull
#CUSTOMIZATION BLOCK starts
debug='false'
skip_days=0 #set to 0 if placing order today; update to 1 if need for tomorrow+day-after-tomorrow
#CUSTOMIZATION BLOCK ends
curr_date = datetime.date.today() + datetime.timedelta(days=skip_days)
if (curr_date.isoweekday() == 6):
curr_date = curr_date + datetime.timedelta(days=2)
elif (curr_date.isoweekday() == 7):
curr_date = curr_date + datetime.timedelta(days=1)
if curr_date.isoweekday() in set((5, 6)):
next_date = curr_date + datetime.timedelta(days=8 - curr_date.isoweekday())
else:
next_date = curr_date + datetime.timedelta(days=1)
print (curr_date, calendar.day_name[curr_date.weekday()], curr_date.isoweekday())
print (next_date, calendar.day_name[next_date.weekday()], next_date.isoweekday())
active_day_today = calendar.day_name[curr_date.weekday()]
active_day_tomorrow = calendar.day_name[next_date.weekday()]
print (active_day_today, active_day_tomorrow)
#active_day variables end
# for NEXT WEEK FRIDAY DEBUG only
#next_friday = next_friday + datetime.timedelta(days=7)
#start_date = start_date + datetime.timedelta(days=7)
#print("NEXT WEEK next_friday-start_date", next_friday, start_date)
# END OF NEXT WEEK DEBUG
for equity in equity_list:
#EQUITY STOCK CODE
time.sleep(1.01)
equity, mkt_time = equity.split(",")
equity = equity.strip()
print('equity=', equity)
start_equity = datetime.datetime.now()
url = 'https://api.tdameritrade.com/v1/marketdata/'+equity+'/quotes?apikey='+KEY
#url1 = 'https://api.tdameritrade.com/v1/marketdata/AAPL/quotes?apikey=STOCKTIPS'
r = requests.get(url)
payload = r.json()
if (debug=='true'):
print(url)
print ('r=',r)
print ('r.text=',r.text)
print ('payload=',payload)
equity = payload[equity]['symbol']
cmp = payload[equity]['regularMarketLastPrice'] #lastPrice
_52WkLow = round(payload[equity]['52WkLow'])
_52WkHigh = round(payload[equity]['52WkHigh'])
if (debug=='true'):
print ('equity=',equity)
print ('cmp=',cmp)
print ('EQUITY | CMP | 52WkRange', file=f_out)
print (equity, '|', cmp, '|', _52WkLow, '-', _52WkHigh, file=f_out)
#OPTION CHAIN CODE
url = 'https://api.tdameritrade.com/v1/marketdata/chains?apikey=' + KEY + \
'&symbol=' + equity + '&contractType=' + 'PUT' + '&range=OTM' + '&fromDate=' + \
str(start_date) + '&toDate=' + str(next_friday) + '&strikeCount=' + str(count) # + '&strike<170.0'
r = requests.get(url) # <Response [200]>
payload = r.json()
if (debug == 'true'):
print('URL==', url)
print(r.text)
print(payload)
symbol = payload['symbol']
# Get Puts
for keyy, valuee in payload["putExpDateMap"].items():
d = datetime.datetime.strptime(keyy, "%Y-%m-%d:%f")
ex_date = d.strftime(myFormat)
for key, value in valuee.items():
for v in value:
args = [ v['symbol'], payload["symbol"], v['strikePrice'], v['last'], v['bid'], v['ask'], v['bidSize'], v['askSize'], v['totalVolume'], v['volatility'], v['putCall'], ex_date, v['inTheMoney'], v['daysToExpiration'], v['timeValue'], v['theoreticalVolatility'] ]
if (debug == 'true'):
print (v['strikePrice'] ,'CMP=', float(cmp))
if (v['strikePrice'] < float(cmp)):
args_list.append(args)
if (debug == 'true'):
print ('args_list=',args_list)
insert_sql = "INSERT INTO CHAIN (" \
+ " equity, symbol, cmp, _52WkRange, strikePrice, last, bid, ask, bidSize, askSize, totalVolume, volatility, putCall, inTheMoney, daysToExpiration, timeValue, theoreticalVolatility " \
+ ") values ('" \
+ payload['symbol'] + "','" \
+ v['symbol'] + "'," \
+ str(cmp) + "," \
+ str("'" + str(_52WkLow) + "-" + str(_52WkHigh)) + "'" + "," \
+ str(v['strikePrice']) + "," \
+ str(v['last']) + "," \
+ str(v['bid']) + "," \
+ str(v['ask']) + "," \
+ str(v['bidSize']) + "," \
+ str(v['askSize']) + "," \
+ str(v['totalVolume']) + "," \
+ str(v['volatility']) + ",'" \
+ str(v['putCall']) + "','" \
+ str(v['inTheMoney']) + "'," \
+ str(v['daysToExpiration']) + "," \
+ str(v['timeValue']) + "," \
+ str(v['theoreticalVolatility']) \
+ ")"
if (debug == 'true'):
print ('insert_sql==',insert_sql)
cursor.execute(insert_sql)
connection.commit()
# FINAL RESULT SQLs
wbkName_out = r'C:\Anupam\market\consolidated_excel_data.xlsx'
wbk_out = openpyxl.load_workbook(wbkName_out)
wks_out = wbk_out[active_day_today+'-'+active_day_tomorrow]
#WRITE OUTPUT TO EXCEL
select_sql1 = "select distinct equity, market_time from chain order by equity;"
print ('select_sql1=',select_sql1)
print ("-----------------", file=f_out)
select_sql2 = "select distinct equity, cmp, _52WkRange from chain order by equity;"
print ('select_sql2=',select_sql2)
cursor.execute(select_sql2)
rows = cursor.fetchall()
idx=2
#wks_out.cell(row=1, column=3).value = " ".join(["EQUITY" , " | " , "CMP" , "|" , "52WkRange"])
wks_out.cell(row=1, column=2).value = "52WkRange"
wks_out.cell(row=1, column=5).value = "CMP"
for row in rows:
if (debug == 'true'):
print('select_sql2:', row[0], "|" ,row[1], "|", row[2])
#wks_out.cell(row=idx, column=3).value = " ".join([str(row[0]) , "|" , str(row[1]) , "|" , str(row[2])])
wks_out.cell(row=idx, column=2).value = str(row[2])
wks_out.cell(row=idx, column=5).value = row[1]
idx+= 1
if (debug == 'true'):
print ('select_sql2:idx=',idx)
print ("-----------------", file=f_out)
select_sql3 = "select equity, strikeprice, bid, round(bid*100/strikeprice,2) prem_per from chain " + \
"where equity||strikeprice in (select equity||max(strikeprice) from chain group by equity) order by equity;"
print ('select_sql3=',select_sql3)
cursor.execute(select_sql3)
rows = cursor.fetchall()
idx=2
#wks_out.cell(row=1, column=4).value = " ".join(["EQUITY" , " | " , "STRIKEPRICE" , "|" , "BID", "|", "PREM_PCT"])
wks_out.cell(row=1, column=3).value = "EQUITY"
wks_out.cell(row=1, column=6).value = "STRIKEPRICE"
wks_out.cell(row=1, column=7).value = "BID"
wks_out.cell(row=1, column=8).value = "PREM_PCT"
for row in rows:
print(row[0], "|" ,row[1], "|", row[2], "|", row[3])
#wks_out.cell(row=idx, column=4).value = " ".join([str(row[0]) , "|" , str(row[1]) , "|" , str(row[2]), "|" , str(row[3])])
wks_out.cell(row=idx, column=3).value = str(row[0])
wks_out.cell(row=idx, column=6).value = row[1]
wks_out.cell(row=idx, column=7).value = row[2]
wks_out.cell(row=idx, column=8).value = row[3]
idx+= 1
if (debug == 'true'):
print ('select_sql3:idx=',idx)
print ("-----------------", file=f_out)
select_sql4 = "select equity, strikeprice, round(((cmp-strikeprice)*-100/cmp),1) prc_diff, bid, round(bid*100/strikeprice,1) prem_per from chain a " + \
"where bid>=0.05 and (prc_diff <=-5 and prc_diff >= -12) or (prc_diff <= -14 and prc_diff >= -20) " + \
"order by equity, prc_diff;"
print ('select_sql4=',select_sql4)
cursor.execute(select_sql4)
rows = cursor.fetchall()
idx=2
wks_out.cell(row=1, column=9).value = " ".join(["EQUITY" , " | " , "STRIKEPRICE" , "|", "PCT_DIFF", "|" , "BID", "|", "PREM_PCT"])
for row in rows:
new_eq = row[0]
if (debug == 'true'):
print(row[0], "|" ,row[1], "|", row[2], "|", row[3], "|", row[4], file=f_out)
wks_out.cell(row=idx, column=9).value = " ".join([str(row[0]) , "|" , str(row[1]) , "|" , str(row[2]), "|" , str(row[3]), "|" , str(row[4])])
idx += 1
if (debug == 'true'):
print('idx=', idx)
prev_eq = row[0]
if (prev_eq != new_eq):
print ("---", file=f_out)
wbk_out.save(wbkName_out)
wbk_out.close
| 37.993333
| 277
| 0.5887
| 1,439
| 11,398
| 4.528839
| 0.186935
| 0.017493
| 0.02762
| 0.035906
| 0.304128
| 0.283106
| 0.204082
| 0.115084
| 0.084855
| 0.070585
| 0
| 0.021368
| 0.244517
| 11,398
| 299
| 278
| 38.120401
| 0.735455
| 0.11037
| 0
| 0.176991
| 0
| 0.026549
| 0.311684
| 0.061378
| 0.004425
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.09292
| 0
| 0.09292
| 0.172566
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbf00ed1d2c63a8cbd6917e7f62b070f2c550c40
| 4,492
|
py
|
Python
|
src/main.py
|
Naman-ntc/3D-HourGlass-Network
|
e58b7b6a78d35bc14fe4c0bc611f80022b2f409b
|
[
"MIT"
] | 53
|
2018-10-28T20:07:16.000Z
|
2021-12-17T02:25:57.000Z
|
src/main.py
|
Naman-ntc/3D-HourGlass-Network
|
e58b7b6a78d35bc14fe4c0bc611f80022b2f409b
|
[
"MIT"
] | 3
|
2019-01-07T14:01:39.000Z
|
2019-05-07T12:01:44.000Z
|
src/main.py
|
Naman-ntc/3D-HourGlass-Network
|
e58b7b6a78d35bc14fe4c0bc611f80022b2f409b
|
[
"MIT"
] | 9
|
2018-10-28T22:31:29.000Z
|
2021-10-14T02:54:27.000Z
|
import os
import time
import datetime
import ref
import torch
import torch.utils.data
from opts import opts
from model.Pose3D import Pose3D
from datahelpers.dataloaders.fusedDataLoader import FusionDataset
from datahelpers.dataloaders.h36mLoader import h36m
from datahelpers.dataloaders.mpiiLoader import mpii
from datahelpers.dataloaders.posetrackLoader import posetrack
from utils.utils import adjust_learning_rate
from utils.logger import Logger
from train import train,val
from inflateScript import *
def main():
opt = opts().parse()
torch.cuda.set_device(opt.gpu_id)
print('Using GPU ID: ' ,str(torch.cuda.current_device()))
now = datetime.datetime.now()
logger = Logger(opt.saveDir + '/logs_{}'.format(now.isoformat()))
if opt.loadModel == 'none':
model = inflate(opt).cuda()
elif opt.loadModel == 'scratch':
model = Pose3D(opt.nChannels, opt.nStack, opt.nModules, opt.numReductions, opt.nRegModules, opt.nRegFrames, ref.nJoints, ref.temporal).cuda()
else :
if opt.isStateDict:
model = Pose3D(opt.nChannels, opt.nStack, opt.nModules, opt.numReductions, opt.nRegModules, opt.nRegFrames, ref.nJoints, ref.temporal).cuda()
model.load_state_dict(torch.load(opt.loadModel))
model = model.cuda()
print("yaya")
else:
model = torch.load(opt.loadModel).cuda()
val_loader = torch.utils.data.DataLoader(
h36m('val', opt),
batch_size = 1,
shuffle = False,
num_workers = int(ref.nThreads)
)
if opt.completeTest:
mp = 0.
cnt = 0.
for i in range(6000//opt.nVal):
opt.startVal = 120*i
opt.nVal = opt.nVal
a,b = val(i, opt, val_loader, model)
mp += a*b
cnt += b
print("This Round " + str(a) + " MPJPE in " + str(b) + " frames!!")
print("Average MPJPE so far " + str(mp/cnt))
print("")
print("------Finally--------")
print("Final MPJPE ==> :" + str(mp/cnt))
return
if (opt.test):
val(0, opt, val_loader, model)
return
train_loader = torch.utils.data.DataLoader(
FusionDataset('train',opt) if opt.loadMpii else h36m('train',opt),
batch_size = opt.dataloaderSize,
shuffle = True,
num_workers = int(ref.nThreads)
)
optimizer = torch.optim.RMSprop(
[{'params': model.hg.parameters(), 'lr': opt.LRhg},
{'params': model.dr.parameters(), 'lr': opt.LRdr}],
alpha = ref.alpha,
eps = ref.epsilon,
weight_decay = ref.weightDecay,
momentum = ref.momentum
)
def hookdef(grad):
newgrad = grad.clone()
if (grad.shape[2]==1):
newgrad = grad*opt.freezefac
else:
newgrad[:,:,1,:,:] = grad[:,:,1,:,:]*opt.freezefac
return newgrad
def hookdef1(grad):
newgrad = grad.clone()
newgrad[:,4096:8192] = newgrad[:,4096:8192]*opt.freezefac
return newgrad
for i in (model.parameters()):
if len(i.shape)==5:
_ = i.register_hook(hookdef)
if len(i.shape)==2:
_ = i.register_hook(hookdef1)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor = opt.dropMag, patience = opt.patience, verbose = True, threshold = opt.threshold)
for epoch in range(1, opt.nEpochs + 1):
loss_train, loss3d_train, mpjpe_train, acc_train = train(epoch, opt, train_loader, model, optimizer)
logger.scalar_summary('loss_train', loss_train, epoch)
#logger.scalar_summary('acc_train', acc_train, epoch)
logger.scalar_summary('mpjpe_train', mpjpe_train, epoch)
logger.scalar_summary('loss3d_train', loss3d_train, epoch)
if epoch % opt.valIntervals == 0:
loss_val, loss3d_val, mpjpe_val, acc_val = val(epoch, opt, val_loader, model)
logger.scalar_summary('loss_val', loss_val, epoch)
# logger.scalar_summary('acc_val', acc_val, epoch)
logger.scalar_summary('mpjpe_val', mpjpe_val, epoch)
logger.scalar_summary('loss3d_val', loss3d_val, epoch)
torch.save(model.state_dict(), os.path.join(opt.saveDir, 'model_{}.pth'.format(epoch)))
logger.write('{:8f} {:8f} {:8f} {:8f} {:8f} {:8f} \n'.format(loss_train, mpjpe_train, loss3d_train, acc_val, loss_val, mpjpe_val, loss3d_val, acc_train))
else:
logger.write('{:8f} {:8f} {:8f} \n'.format(loss_train, mpjpe_train, loss3d_train, acc_train))
#adjust_learning_rate(optimizer, epoch, opt.dropLR, opt.LR)
if opt.scheduler == 1:
scheduler.step(int(loss_train))
elif opt.scheduler == 2:
scheduler.step(int(loss3d_train))
elif opt.scheduler == 3:
scheduler.step(int(loss_train + loss3d_train))
elif opt.scheduler == 4:
scheduler.step(int(mpjpe_train))
logger.close()
if __name__ == '__main__':
#torch.set_default_tensor_type('torch.DoubleTensor')
main()
| 31.412587
| 163
| 0.70236
| 628
| 4,492
| 4.883758
| 0.270701
| 0.031301
| 0.04956
| 0.046951
| 0.244539
| 0.10238
| 0.10238
| 0.10238
| 0.10238
| 0.10238
| 0
| 0.019306
| 0.146705
| 4,492
| 142
| 164
| 31.633803
| 0.780851
| 0.046972
| 0
| 0.122807
| 0
| 0
| 0.069207
| 0.00491
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.140351
| 0
| 0.201754
| 0.061404
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbf0f0dbbea749b29ef7a61b2ac5e680c12f1409
| 1,053
|
py
|
Python
|
basic-part-1/07-print-file-extension.py
|
inderpal2406/python-practice-2022
|
59e280a5babefc96b1a9c773a79fb5176e876f7a
|
[
"MIT"
] | null | null | null |
basic-part-1/07-print-file-extension.py
|
inderpal2406/python-practice-2022
|
59e280a5babefc96b1a9c773a79fb5176e876f7a
|
[
"MIT"
] | null | null | null |
basic-part-1/07-print-file-extension.py
|
inderpal2406/python-practice-2022
|
59e280a5babefc96b1a9c773a79fb5176e876f7a
|
[
"MIT"
] | null | null | null |
# This script will accept a filename from the user and print the extension of that.
# If the script doesn't find a period in filename, then it'll display result accordingly.
# "not in" or "in" membership operator can be used with strings as well along with list, tuples.
# Need to check which additional other places can it be used.
# Import modules.
import platform
import os
# Detect the OS and clear the screen.
os_name = platform.system()
if os_name == "Windows":
os.system("cls")
elif os_name == "Linux":
os.system("clear")
# Display purpose of the script.
print(f"This script will accept filename from the user and print its extension.\n")
# Accept user input.
filename = input("Enter the filename: ")
# Check if the filename has a period "." in it. If it contains a period, then extract the extension and display it.
if "." not in filename:
print(f"\nThe filename doesn't contain . in it. It seems to be a file without extension.\n")
else:
our_list = filename.split(".")
print(f"\nFile extension: {our_list[-1]}\n")
| 30.970588
| 115
| 0.716999
| 174
| 1,053
| 4.310345
| 0.454023
| 0.028
| 0.037333
| 0.053333
| 0.072
| 0.072
| 0
| 0
| 0
| 0
| 0
| 0.001168
| 0.187085
| 1,053
| 33
| 116
| 31.909091
| 0.875
| 0.512821
| 0
| 0
| 0
| 0.071429
| 0.459245
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.214286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbf2ae61952632fab35bb3d4da6625e30a6cc5d4
| 1,279
|
py
|
Python
|
src/Xtb/Python/__init__.py
|
qcscine/xtb_wrapper
|
5295244771ed5efe3d9e1582e07ed9d26545d387
|
[
"BSD-3-Clause"
] | null | null | null |
src/Xtb/Python/__init__.py
|
qcscine/xtb_wrapper
|
5295244771ed5efe3d9e1582e07ed9d26545d387
|
[
"BSD-3-Clause"
] | null | null | null |
src/Xtb/Python/__init__.py
|
qcscine/xtb_wrapper
|
5295244771ed5efe3d9e1582e07ed9d26545d387
|
[
"BSD-3-Clause"
] | 1
|
2022-02-04T13:40:00.000Z
|
2022-02-04T13:40:00.000Z
|
__copyright__ = """This code is licensed under the 3-clause BSD license.
Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
See LICENSE.txt for details.
"""
import os
import scine_utilities as utils
from distutils import ccompiler
manager = utils.core.ModuleManager()
if not manager.module_loaded('Xtb'):
shlib_suffix = ccompiler.new_compiler().shared_lib_extension
module_filename = "xtb.module" + shlib_suffix
# Look within the python module directory (module is here in the case of
# python packages) and the lib folder the site packages are in
current_path = os.path.dirname(os.path.realpath(__file__))
lib_path = os.path.dirname(os.path.dirname(os.path.dirname(current_path)))
test_paths = [current_path, lib_path]
def exists_and_could_load(path):
full_path = os.path.join(path, module_filename)
if os.path.exists(full_path):
try:
manager.load(full_path)
except RuntimeError as err:
print("Could not load {}: {}".format(full_path, err))
return False
return True
return False
if not any(map(exists_and_could_load, test_paths)):
raise ImportError('{} could not be located.'.format(module_filename))
| 36.542857
| 78
| 0.693511
| 174
| 1,279
| 4.896552
| 0.505747
| 0.049296
| 0.061033
| 0.052817
| 0.077465
| 0.077465
| 0
| 0
| 0
| 0
| 0
| 0.001
| 0.218139
| 1,279
| 34
| 79
| 37.617647
| 0.851
| 0.102424
| 0
| 0.076923
| 0
| 0
| 0.184279
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.153846
| 0
| 0.307692
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbf40515dd7d835260533fe653dd331f52016415
| 5,062
|
py
|
Python
|
perch/validators.py
|
OpenPermissions/perch
|
36d78994133918f3c52c187f19e50132960a0156
|
[
"Apache-2.0"
] | 3
|
2016-05-03T20:07:25.000Z
|
2020-12-22T07:16:11.000Z
|
perch/validators.py
|
OpenPermissions/perch
|
36d78994133918f3c52c187f19e50132960a0156
|
[
"Apache-2.0"
] | 17
|
2016-04-26T09:35:42.000Z
|
2016-08-18T10:07:40.000Z
|
perch/validators.py
|
OpenPermissions/perch
|
36d78994133918f3c52c187f19e50132960a0156
|
[
"Apache-2.0"
] | 1
|
2019-05-20T01:40:56.000Z
|
2019-05-20T01:40:56.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2016 Open Permissions Platform Coalition
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""Voluptuous validotor functions"""
import re
from urlparse import urlsplit
from voluptuous import AllInvalid, Invalid, Schema, ALLOW_EXTRA
from .model import State
class MetaSchema(object):
"""
Schema must pass all validators. Useful for cases where a field depends on
the value of another field
Similar to using All with a schema and vaildator function, e.g.
All(Schema({'x': int, 'y': int}), x_greater_than_y)
>>> validate = MetaSchema({'x': '10'}, Coerce(int))
>>> validate('10')
10
"""
def __init__(self, schema, *validators, **kwargs):
self.validators = validators
self.msg = kwargs.pop('msg', None)
self._schema = Schema(schema)
self._schemas = [Schema(val, **kwargs) for val in validators]
@property
def schema(self):
return self._schema.schema
def __call__(self, v):
try:
v = self._schema(v)
for schema in self._schemas:
v = schema(v)
except Invalid as e:
raise e if self.msg is None else AllInvalid(self.msg)
return v
def partial_schema(schema, filtered_fields):
"""
Validator for part of a schema, ignoring some fields
:param schema: the Schema
:param filtered_fields: fields to filter out
"""
return Schema({
k: v for k, v in schema.schema.items()
if getattr(k, 'schema', k) not in filtered_fields
}, extra=ALLOW_EXTRA)
def valid_email(email):
"""Validate email."""
if "@" not in email:
raise Invalid('This email is invalid.')
return email
def validate_hex(color):
"""
Validate string is a hex color code
"""
hex_re = '^#(?:[0-9a-fA-F]{3}){1,2}$'
if not re.match(hex_re, color):
raise Invalid('Invalid Hex Color')
return color
def validate_url(url):
"""Validate URL is valid
NOTE: only support http & https
"""
schemes = ['http', 'https']
netloc_re = re.compile(
r'^'
r'(?:\S+(?::\S*)?@)?' # user:pass auth
r'(?:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])'
r'(?:\.(?:[a-z0-9]|[a-z0-9][a-z0-9\-]{0,61}[a-z0-9]))*' # host
r'(?::[0-9]{2,5})?' # port
r'$', re.IGNORECASE
)
try:
scheme, netloc, path, query, fragment = urlsplit(url)
except ValueError:
raise Invalid('Invalid URL')
if scheme not in schemes:
raise Invalid('Missing URL scheme')
if not netloc_re.search(netloc):
raise Invalid('Invalid URL')
return url
def validate_reference_links(reference_links):
"""
Vaidate reference links data structure
Expected data structure:
{
"links": {
id_type1: url1,
id_type2: url2
},
"redirect_id_type": id_type1 | id1_type2
}
where links is an optional key but must be a dictionary with id types to
URLs if it exists, and redirect_id_type is optional but if it exists,
it must point to one of the existing id types in the links object. It is
used to set a default redirect URL that is used by the resolution service.
"""
allowed_keys = ['links', 'redirect_id_type']
if not isinstance(reference_links, dict):
raise Invalid('Expected reference_links to be an object')
if 'links' in reference_links and not isinstance(reference_links['links'], dict):
raise Invalid('Expected links in reference_links to be an object')
links = reference_links.get('links', {})
redirect_id_type = reference_links.get('redirect_id_type')
for key in reference_links:
if key not in allowed_keys:
raise Invalid('Key {} is not allowed'.format(key))
if redirect_id_type and redirect_id_type not in links:
raise Invalid('Redirect ID type must point to one of the links\' ID types')
[validate_url(url) for url in links.values()]
return reference_links
VALID_STATES = {x.name for x in State}
VALID_USER_STATES = {x.name for x in [State.approved, State.deactivated]}
def validate_state(state):
return _validate_state(state, VALID_STATES)
def validate_user_state(state):
return _validate_state(state, VALID_USER_STATES)
def _validate_state(state, valid_states):
"""Validate a state string"""
if state in State:
return state.name
elif state in valid_states:
return state
else:
raise Invalid('Invalid state')
| 29.260116
| 107
| 0.644212
| 713
| 5,062
| 4.462833
| 0.31136
| 0.052797
| 0.010057
| 0.006285
| 0.108422
| 0.079195
| 0.050911
| 0.012571
| 0.012571
| 0.012571
| 0
| 0.013925
| 0.248123
| 5,062
| 172
| 108
| 29.430233
| 0.822123
| 0.335836
| 0
| 0.04878
| 0
| 0.02439
| 0.150849
| 0.038655
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134146
| false
| 0
| 0.04878
| 0.036585
| 0.329268
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbf6bf0479cef19ff010cf6f671d185104dd03d3
| 9,060
|
py
|
Python
|
glycan_profiling/tandem/evaluation_dispatch/task.py
|
mstim/glycresoft
|
1d305c42c7e6cba60326d8246e4a485596a53513
|
[
"Apache-2.0"
] | 4
|
2019-04-26T15:47:57.000Z
|
2021-04-20T22:53:58.000Z
|
glycan_profiling/tandem/evaluation_dispatch/task.py
|
mstim/glycresoft
|
1d305c42c7e6cba60326d8246e4a485596a53513
|
[
"Apache-2.0"
] | 8
|
2017-11-22T19:20:20.000Z
|
2022-02-14T01:49:58.000Z
|
glycan_profiling/tandem/evaluation_dispatch/task.py
|
mstim/glycresoft
|
1d305c42c7e6cba60326d8246e4a485596a53513
|
[
"Apache-2.0"
] | 3
|
2017-11-21T18:05:28.000Z
|
2021-09-23T18:38:33.000Z
|
import os
from collections import deque
from glycan_profiling.task import TaskBase
debug_mode = bool(os.environ.get("GLYCRESOFTDEBUG"))
class StructureSpectrumSpecificationBuilder(object):
"""Base class for building structure hit by spectrum specification
"""
def build_work_order(self, hit_id, hit_map, scan_hit_type_map, hit_to_scan):
"""Packs several task-defining data structures into a simple to unpack payload for
sending over IPC to worker processes.
Parameters
----------
hit_id : int
The id number of a hit structure
hit_map : dict
Maps hit_id to hit structure
hit_to_scan : dict
Maps hit id to list of scan ids
scan_hit_type_map : dict
Maps (hit id, scan id) to the type of mass shift
applied for this match
Returns
-------
tuple
Packaged message payload
"""
return (hit_map[hit_id],
[(s, scan_hit_type_map[s, hit_id])
for s in hit_to_scan[hit_id]])
class TaskSourceBase(StructureSpectrumSpecificationBuilder, TaskBase):
"""A base class for building a stream of work items through
:class:`StructureSpectrumSpecificationBuilder`.
"""
batch_size = 10000
def add(self, item):
"""Add ``item`` to the work stream
Parameters
----------
item : object
The work item to deal
"""
raise NotImplementedError()
def join(self):
"""Checkpoint that may halt the stream generation.
"""
return
def feed(self, hit_map, hit_to_scan, scan_hit_type_map):
"""Push tasks onto the input queue feeding the worker
processes.
Parameters
----------
hit_map : dict
Maps hit id to structure
hit_to_scan : dict
Maps hit id to list of scan ids
scan_hit_type_map : dict
Maps (hit id, scan id) to the type of mass shift
applied for this match
"""
i = 0
n = len(hit_to_scan)
seen = dict()
for hit_id, scan_ids in hit_to_scan.items():
i += 1
hit = hit_map[hit_id]
# This sanity checking is likely unnecessary, and is a hold-over from
# debugging redundancy in the result queue. For the moment, it is retained
# to catch "new" bugs.
# If a hit structure's id doesn't match the id it was looked up with, something
# may be wrong with the upstream process. Log this event.
if hit.id != hit_id:
self.log("Hit %r doesn't match its id %r" % (hit, hit_id))
if hit_to_scan[hit.id] != scan_ids:
self.log("Mismatch leads to different scans! (%d, %d)" % (
len(scan_ids), len(hit_to_scan[hit.id])))
# If a hit structure has been seen multiple times independent of whether or
# not the expected hit id matches, something may be wrong in the upstream process.
# Log this event.
if hit.id in seen:
self.log("Hit %r already dealt under hit_id %r, now again at %r" % (
hit, seen[hit.id], hit_id))
raise ValueError(
"Hit %r already dealt under hit_id %r, now again at %r" % (
hit, seen[hit.id], hit_id))
seen[hit.id] = hit_id
if i % self.batch_size == 0 and i:
self.join()
try:
work_order = self.build_work_order(hit_id, hit_map, scan_hit_type_map, hit_to_scan)
# if debug_mode:
# self.log("...... Matching %s against %r" % work_order)
self.add(work_order)
# Set a long progress update interval because the feeding step is less
# important than the processing step. Additionally, as the two threads
# run concurrently, the feeding thread can log a short interval before
# the entire process has formally logged that it has started.
if i % 10000 == 0:
self.log("...... Dealt %d work items (%0.2f%% Complete)" % (i, i * 100.0 / n))
except Exception as e:
self.log("An exception occurred while feeding %r and %d scan ids: %r" % (hit_id, len(scan_ids), e))
self.log("...... Finished dealing %d work items" % (i,))
self.join()
return
def feed_groups(self, hit_map, hit_to_scan, scan_hit_type_map, hit_to_group):
"""Push task groups onto the input queue feeding the worker
processes.
Parameters
----------
hit_map : dict
Maps hit id to structure
hit_to_scan : dict
Maps hit id to list of scan ids
scan_hit_type_map : dict
Maps (hit id, scan id) to the type of mass shift
applied for this match
hit_to_group: dict
Maps group id to the set of hit ids which are
"""
i = 0
j = 0
n = len(hit_to_group)
seen = dict()
for group_key, hit_keys in hit_to_group.items():
hit_group = {
"work_orders": {}
}
i += 1
for hit_id in hit_keys:
j += 1
scan_ids = hit_to_scan[hit_id]
hit = hit_map[hit_id]
# This sanity checking is likely unnecessary, and is a hold-over from
# debugging redundancy in the result queue. For the moment, it is retained
# to catch "new" bugs.
# If a hit structure's id doesn't match the id it was looked up with, something
# may be wrong with the upstream process. Log this event.
if hit.id != hit_id:
self.log("Hit %r doesn't match its id %r" % (hit, hit_id))
if hit_to_scan[hit.id] != scan_ids:
self.log("Mismatch leads to different scans! (%d, %d)" % (
len(scan_ids), len(hit_to_scan[hit.id])))
# If a hit structure has been seen multiple times independent of whether or
# not the expected hit id matches, something may be wrong in the upstream process.
# Log this event.
if hit.id in seen:
self.log("Hit %r already dealt under hit_id %r, now again at %r in group %r" % (
hit, seen[hit.id], hit_id, group_key))
raise ValueError(
"Hit %r already dealt under hit_id %r, now again at %r" % (
hit, seen[hit.id], hit_id))
seen[hit.id] = (hit_id, group_key)
work_order = self.build_work_order(
hit_id, hit_map, scan_hit_type_map, hit_to_scan)
hit_group['work_orders'][hit_id] = work_order
self.add(hit_group)
if i % self.batch_size == 0 and i:
self.join()
self.log("...... Finished dealing %d work items" % (i,))
self.join()
return
def __call__(self, hit_map, hit_to_scan, scan_hit_type_map, hit_to_group=None):
if not hit_to_group:
return self.feed(hit_map, hit_to_scan, scan_hit_type_map)
else:
return self.feed_groups(hit_map, hit_to_scan, scan_hit_type_map, hit_to_group)
class TaskDeque(TaskSourceBase):
"""Generate an on-memory buffer of work items
Attributes
----------
queue : :class:`~.deque`
The in-memory work queue
"""
def __init__(self):
self.queue = deque()
def add(self, item):
self.queue.append(item)
def pop(self):
return self.queue.popleft()
def __iter__(self):
return iter(self.queue)
class TaskQueueFeeder(TaskSourceBase):
def __init__(self, input_queue, done_event):
self.input_queue = input_queue
self.done_event = done_event
def add(self, item):
self.input_queue.put(item)
def join(self):
return self.input_queue.join()
def feed(self, hit_map, hit_to_scan, scan_hit_type_map):
"""Push tasks onto the input queue feeding the worker
processes.
Parameters
----------
hit_map : dict
Maps hit id to structure
hit_to_scan : dict
Maps hit id to list of scan ids
scan_hit_type_map : dict
Maps (hit id, scan id) to the type of mass shift
applied for this match
"""
super(TaskQueueFeeder, self).feed(hit_map, hit_to_scan, scan_hit_type_map)
self.done_event.set()
return
def feed_groups(self, hit_map, hit_to_scan, scan_hit_type_map, hit_to_group):
super(TaskQueueFeeder, self).feed_groups(hit_map, hit_to_scan, scan_hit_type_map, hit_to_group)
self.done_event.set()
return
| 37.438017
| 115
| 0.562252
| 1,221
| 9,060
| 3.984439
| 0.181818
| 0.057554
| 0.044399
| 0.048921
| 0.608222
| 0.583145
| 0.583145
| 0.57184
| 0.57184
| 0.57184
| 0
| 0.004263
| 0.352759
| 9,060
| 241
| 116
| 37.593361
| 0.825375
| 0.343709
| 0
| 0.459459
| 0
| 0
| 0.108389
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135135
| false
| 0
| 0.027027
| 0.027027
| 0.306306
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbf803380db0ef251842437e33a2f97c28f09e88
| 795
|
py
|
Python
|
core/render.py
|
ayyuriss/EigenFunctions
|
8cb6c22871fcddb633392c0a12691e960dad5143
|
[
"MIT"
] | null | null | null |
core/render.py
|
ayyuriss/EigenFunctions
|
8cb6c22871fcddb633392c0a12691e960dad5143
|
[
"MIT"
] | null | null | null |
core/render.py
|
ayyuriss/EigenFunctions
|
8cb6c22871fcddb633392c0a12691e960dad5143
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed May 16 09:32:56 2018
@author: gamer
"""
import pygame as pg
import numpy as np
import skimage.transform as transform
class Render(object):
def __init__(self, window_size=(360,480)):
pg.init()
self.h,self.w = window_size
self.display = pg.display.set_mode((self.w,self.h))
pg.display.set_caption("My Game")
def update(self,vect):
arr = transform.resize(vect,(self.h,self.w),mode='edge',clip=True
).transpose((1,0,2))
arr = (255*arr/np.max(arr)).astype('uint8')
img = pg.surfarray.make_surface(arr[:,:,:])
self.display.blit(img, (0,0))
pg.display.flip()
def quit(self):
pg.quit()
| 24.090909
| 73
| 0.548428
| 109
| 795
| 3.917431
| 0.568807
| 0.035129
| 0.042155
| 0.046838
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049911
| 0.29434
| 795
| 33
| 74
| 24.090909
| 0.71123
| 0.093082
| 0
| 0
| 0
| 0
| 0.022409
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbf98d99386d0154fceea52ba139487cd08f628c
| 660
|
py
|
Python
|
scripts/branching_recursion.py
|
ithasnext/python_fractals
|
1eea4e464d2073ddd0f9dd2000af101cad23c0f8
|
[
"MIT"
] | null | null | null |
scripts/branching_recursion.py
|
ithasnext/python_fractals
|
1eea4e464d2073ddd0f9dd2000af101cad23c0f8
|
[
"MIT"
] | null | null | null |
scripts/branching_recursion.py
|
ithasnext/python_fractals
|
1eea4e464d2073ddd0f9dd2000af101cad23c0f8
|
[
"MIT"
] | null | null | null |
import pygame
import sys
def setup(w,h,r):
surf = pygame.Surface((w,h))
fract_circle(w/2, h/2, r, surf)
pygame.image.save(surf, str(r)+"_radius.png")
# branching recursion
def fract_circle(x,y, radius, surface):
if radius > 1:
pygame.draw.circle(surface, (0,0,255), (int(x),int(y)), int(radius), 1)
if radius > 8:
fract_circle(x+radius/2,y,radius/2,surface)
fract_circle(x-radius/2,y,radius/2,surface)
fract_circle(x,y+radius/2,radius/2,surface)
fract_circle(x,y-radius/2,radius/2,surface)
width = input("Enter a width: ")
height = input("Enter a height: ")
radius = input("Enter a radius: ")
setup(int(width), int(height), int(radius))
| 27.5
| 73
| 0.689394
| 117
| 660
| 3.82906
| 0.290598
| 0.125
| 0.133929
| 0.087054
| 0.345982
| 0.303571
| 0.303571
| 0.303571
| 0.303571
| 0.303571
| 0
| 0.031196
| 0.125758
| 660
| 24
| 74
| 27.5
| 0.745234
| 0.028788
| 0
| 0
| 0
| 0
| 0.090625
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.111111
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbfa57bb471088a16fc1c6466ecf225acd101941
| 684
|
py
|
Python
|
WorkInProgress/MagnetoMeter/callibrate.py
|
SpudGunMan/LMS-uart-esp
|
95c905cc3dc99349b6b9e7bf0296a6fe0969d2b4
|
[
"BSD-3-Clause"
] | 8
|
2021-03-21T21:34:59.000Z
|
2022-03-25T20:51:47.000Z
|
WorkInProgress/MagnetoMeter/callibrate.py
|
SpudGunMan/LMS-uart-esp
|
95c905cc3dc99349b6b9e7bf0296a6fe0969d2b4
|
[
"BSD-3-Clause"
] | 7
|
2021-04-07T07:40:23.000Z
|
2022-01-22T21:05:40.000Z
|
WorkInProgress/MagnetoMeter/callibrate.py
|
SpudGunMan/LMS-uart-esp
|
95c905cc3dc99349b6b9e7bf0296a6fe0969d2b4
|
[
"BSD-3-Clause"
] | 5
|
2022-01-21T18:37:20.000Z
|
2022-02-17T00:35:28.000Z
|
from hmc5883l import HMC5883L
sensor = HMC5883L(scl=5, sda=4)
valmin=[0,0,0]
valmax=[0,0,0]
valscaled=[0,0,0]
def convert(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min
f=open("cal.csv",'w')
for count in range(3000):
valread = sensor.read()
# for i in range(3):
# if valread[i]<valmin[i]: valmin[i]=valread[i]
# if valread[i]>valmax[i]: valmax[i]=valread[i]
# valscaled[i]=convert(valread[i],valmin[i],valmax[i],-100,100)
#degrees, minutes = sensor.heading(valscaled[0], valscaled[1])
print("%04d"%count,valmin,valmax,valread)
f.write("%f,%f,%f\n"%valread)
f.close()
| 27.36
| 75
| 0.631579
| 117
| 684
| 3.606838
| 0.376068
| 0.028436
| 0.021327
| 0.07109
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066901
| 0.169591
| 684
| 24
| 76
| 28.5
| 0.676056
| 0.358187
| 0
| 0
| 0
| 0
| 0.050808
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0.076923
| 0.230769
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbfba00ada95ca4b323dab1489addc7b7c3e9bf4
| 13,774
|
py
|
Python
|
pyriemann/utils/mean.py
|
qbarthelemy/pyRiemann
|
b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3
|
[
"BSD-3-Clause"
] | 1
|
2021-09-30T01:18:51.000Z
|
2021-09-30T01:18:51.000Z
|
pyriemann/utils/mean.py
|
qbarthelemy/pyRiemann
|
b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3
|
[
"BSD-3-Clause"
] | null | null | null |
pyriemann/utils/mean.py
|
qbarthelemy/pyRiemann
|
b35873b0a6cf9d81a1db09bbedb72a2fefe7d0c3
|
[
"BSD-3-Clause"
] | null | null | null |
"""Mean covariance estimation."""
from copy import deepcopy
import numpy as np
from .base import sqrtm, invsqrtm, logm, expm
from .ajd import ajd_pham
from .distance import distance_riemann
from .geodesic import geodesic_riemann
def _get_sample_weight(sample_weight, data):
"""Get the sample weights.
If none provided, weights init to 1. otherwise, weights are normalized.
"""
if sample_weight is None:
sample_weight = np.ones(data.shape[0])
if len(sample_weight) != data.shape[0]:
raise ValueError("len of sample_weight must be equal to len of data.")
sample_weight /= np.sum(sample_weight)
return sample_weight
def mean_riemann(covmats, tol=10e-9, maxiter=50, init=None,
sample_weight=None):
r"""Return the mean covariance matrix according to the Riemannian metric.
The procedure is similar to a gradient descent minimizing the sum of
riemannian distance to the mean.
.. math::
\mathbf{C} = \arg\min{(\sum_i \delta_R ( \mathbf{C} , \mathbf{C}_i)^2)}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the gradient descent. If None the Arithmetic mean is used
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
""" # noqa
# init
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
if init is None:
C = np.mean(covmats, axis=0)
else:
C = init
k = 0
nu = 1.0
tau = np.finfo(np.float64).max
crit = np.finfo(np.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter) and (nu > tol):
k = k + 1
C12 = sqrtm(C)
Cm12 = invsqrtm(C)
J = np.zeros((n_channels, n_channels))
for index in range(n_trials):
tmp = np.dot(np.dot(Cm12, covmats[index, :, :]), Cm12)
J += sample_weight[index] * logm(tmp)
crit = np.linalg.norm(J, ord='fro')
h = nu * crit
C = np.dot(np.dot(C12, expm(nu * J)), C12)
if h < tau:
nu = 0.95 * nu
tau = h
else:
nu = 0.5 * nu
return C
def mean_logeuclid(covmats, sample_weight=None):
r"""Return the mean covariance matrix according to the log-Euclidean
metric.
.. math::
\mathbf{C} = \exp{(\frac{1}{N} \sum_i \log{\mathbf{C}_i})}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
T = np.zeros((n_channels, n_channels))
for index in range(n_trials):
T += sample_weight[index] * logm(covmats[index, :, :])
C = expm(T)
return C
def mean_kullback_sym(covmats, sample_weight=None):
"""Return the mean covariance matrix according to KL divergence.
This mean is the geometric mean between the Arithmetic and the Harmonic
mean, as shown in [1]_.
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
References
----------
.. [1] Moakher, Maher, and Philipp G. Batchelor. "Symmetric
positive-definite matrices: From geometry to applications and
visualization." In Visualization and Processing of Tensor Fields, pp.
285-298. Springer Berlin Heidelberg, 2006.
"""
C_Arithmetic = mean_euclid(covmats, sample_weight)
C_Harmonic = mean_harmonic(covmats, sample_weight)
C = geodesic_riemann(C_Arithmetic, C_Harmonic, 0.5)
return C
def mean_harmonic(covmats, sample_weight=None):
r"""Return the harmonic mean of a set of covariance matrices.
.. math::
\mathbf{C} = \left(\frac{1}{N} \sum_i {\mathbf{C}_i}^{-1}\right)^{-1}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
T = np.zeros((n_channels, n_channels))
for index in range(n_trials):
T += sample_weight[index] * np.linalg.inv(covmats[index, :, :])
C = np.linalg.inv(T)
return C
def mean_logdet(covmats, tol=10e-5, maxiter=50, init=None, sample_weight=None):
r"""Return the mean covariance matrix according to the logdet metric.
This is an iterative procedure where the update is:
.. math::
\mathbf{C} = \left(\sum_i \left( 0.5 \mathbf{C} + 0.5 \mathbf{C}_i \right)^{-1} \right)^{-1}
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the iterative procedure. If None the Arithmetic mean is used
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
""" # noqa
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
if init is None:
C = np.mean(covmats, axis=0)
else:
C = init
k = 0
crit = np.finfo(np.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter):
k = k + 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
J += sample_weight[index] * np.linalg.inv(0.5 * Ci + 0.5 * C)
Cnew = np.linalg.inv(J)
crit = np.linalg.norm(Cnew - C, ord='fro')
C = Cnew
return C
def mean_wasserstein(covmats, tol=10e-4, maxiter=50, init=None,
sample_weight=None):
r"""Return the mean covariance matrix according to the Wasserstein metric.
This is an iterative procedure where the update is [1]_:
.. math::
\mathbf{K} = \left(\sum_i \left( \mathbf{K} \mathbf{C}_i \mathbf{K} \right)^{1/2} \right)^{1/2}
with :math:`\mathbf{K} = \mathbf{C}^{1/2}`.
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param init: A covariance matrix used to initialize the iterative procedure. If None the Arithmetic mean is used
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
References
----------
.. [1] Barbaresco, F. "Geometric Radar Processing based on Frechet distance:
Information geometry versus Optimal Transport Theory", Radar Symposium
(IRS), 2011 Proceedings International.
""" # noqa
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
if init is None:
C = np.mean(covmats, axis=0)
else:
C = init
k = 0
K = sqrtm(C)
crit = np.finfo(np.float64).max
# stop when J<10^-9 or max iteration = 50
while (crit > tol) and (k < maxiter):
k = k + 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = np.dot(np.dot(K, Ci), K)
J += sample_weight[index] * sqrtm(tmp)
Knew = sqrtm(J)
crit = np.linalg.norm(Knew - K, ord='fro')
K = Knew
if k == maxiter:
print('Max iter reach')
C = np.dot(K, K)
return C
def mean_euclid(covmats, sample_weight=None):
r"""Return the mean covariance matrix according to the Euclidean metric :
.. math::
\mathbf{C} = \frac{1}{N} \sum_i \mathbf{C}_i
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
"""
return np.average(covmats, axis=0, weights=sample_weight)
def mean_ale(covmats, tol=10e-7, maxiter=50, sample_weight=None):
"""Return the mean covariance matrix according using the AJD-based
log-Euclidean Mean (ALE). See [1].
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop the gradient descent
:param maxiter: The maximum number of iteration, default 50
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
Notes
-----
.. versionadded:: 0.2.4
References
----------
[1] M. Congedo, B. Afsari, A. Barachant, M. Moakher, 'Approximate Joint
Diagonalization and Geometric Mean of Symmetric Positive Definite
Matrices', PLoS ONE, 2015
"""
sample_weight = _get_sample_weight(sample_weight, covmats)
n_trials, n_channels, _ = covmats.shape
crit = np.inf
k = 0
# init with AJD
B, _ = ajd_pham(covmats)
while (crit > tol) and (k < maxiter):
k += 1
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
update = np.diag(np.diag(expm(J)))
B = np.dot(B, invsqrtm(update))
crit = distance_riemann(np.eye(n_channels), update)
A = np.linalg.inv(B)
J = np.zeros((n_channels, n_channels))
for index, Ci in enumerate(covmats):
tmp = logm(np.dot(np.dot(B.T, Ci), B))
J += sample_weight[index] * tmp
C = np.dot(np.dot(A.T, expm(J)), A)
return C
def mean_alm(covmats, tol=1e-14, maxiter=100,
verbose=False, sample_weight=None):
r"""Return Ando-Li-Mathias (ALM) mean
Find the geometric mean recursively [1]_, generalizing from:
.. math::
\mathbf{C} = A^{\frac{1}{2}}(A^{-\frac{1}{2}}B^{\frac{1}{2}}A^{-\frac{1}{2}})^{\frac{1}{2}}A^{\frac{1}{2}}
require a high number of iterations.
This is the adaptation of the Matlab code proposed by Dario Bini and
Bruno Iannazzo, http://bezout.dm.unipi.it/software/mmtoolbox/
Extremely slow, due to the recursive formulation.
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param tol: the tolerance to stop iterations
:param maxiter: maximum number of iteration, default 100
:param verbose: indicate when reaching maxiter
:param sample_weight: the weight of each sample
:returns: the mean covariance matrix
Notes
-----
.. versionadded:: 0.2.8.dev
References
----------
.. [1] T. Ando, C.-K. Li and R. Mathias, "Geometric Means", Linear Algebra
Appl. 385 (2004), 305-334.
""" # noqa
sample_weight = _get_sample_weight(sample_weight, covmats)
C = covmats
C_iter = np.zeros_like(C)
n_trials = covmats.shape[0]
if n_trials == 2:
alpha = sample_weight[1] / sample_weight[0] / 2
X = geodesic_riemann(covmats[0], covmats[1], alpha=alpha)
return X
else:
for k in range(maxiter):
for h in range(n_trials):
s = np.mod(np.arange(h, h + n_trials - 1) + 1, n_trials)
C_iter[h] = mean_alm(C[s], sample_weight=sample_weight[s])
norm_iter = np.linalg.norm(C_iter[0] - C[0], 2)
norm_c = np.linalg.norm(C[0], 2)
if (norm_iter / norm_c) < tol:
break
C = deepcopy(C_iter)
else:
if verbose:
print('Max number of iterations reached')
return C_iter.mean(axis=0)
def mean_identity(covmats, sample_weight=None):
r"""Return the identity matrix corresponding to the covmats sit size
.. math::
\mathbf{C} = \mathbf{I}_d
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:returns: the identity matrix of size n_channels
"""
C = np.eye(covmats.shape[1])
return C
def mean_covariance(covmats, metric='riemann', sample_weight=None, *args):
"""Return the mean covariance matrix according to the metric
:param covmats: Covariance matrices set, (n_trials, n_channels, n_channels)
:param metric: the metric (default 'riemann'), can be : 'riemann',
'logeuclid', 'euclid', 'logdet', 'identity', 'wasserstein', 'ale',
'alm', 'harmonic', 'kullback_sym' or a callable function
:param sample_weight: the weight of each sample
:param args: the argument passed to the sub function
:returns: the mean covariance matrix
"""
if callable(metric):
C = metric(covmats, sample_weight=sample_weight, *args)
else:
C = mean_methods[metric](covmats, sample_weight=sample_weight, *args)
return C
mean_methods = {'riemann': mean_riemann,
'logeuclid': mean_logeuclid,
'euclid': mean_euclid,
'identity': mean_identity,
'logdet': mean_logdet,
'wasserstein': mean_wasserstein,
'ale': mean_ale,
'harmonic': mean_harmonic,
'kullback_sym': mean_kullback_sym,
'alm': mean_alm}
def _check_mean_method(method):
"""checks methods """
if isinstance(method, str):
if method not in mean_methods.keys():
raise ValueError('Unknown mean method')
else:
method = mean_methods[method]
elif not hasattr(method, '__call__'):
raise ValueError('mean method must be a function or a string.')
return method
| 32.795238
| 116
| 0.635908
| 1,934
| 13,774
| 4.405895
| 0.153568
| 0.097172
| 0.035911
| 0.048586
| 0.546884
| 0.501936
| 0.497359
| 0.47248
| 0.459688
| 0.441028
| 0
| 0.018517
| 0.251125
| 13,774
| 419
| 117
| 32.873508
| 0.807562
| 0.449107
| 0
| 0.380952
| 0
| 0
| 0.036361
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068783
| false
| 0
| 0.031746
| 0
| 0.174603
| 0.010582
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbfebfa3a6e07ffb390ccc9c51bbfd1c5eb387b7
| 2,531
|
py
|
Python
|
img-xlsx.py
|
jherskovic/img-xlsx
|
ba301b43c8a3df2282622e70904fcb2d55bad2a3
|
[
"CNRI-Python"
] | null | null | null |
img-xlsx.py
|
jherskovic/img-xlsx
|
ba301b43c8a3df2282622e70904fcb2d55bad2a3
|
[
"CNRI-Python"
] | 4
|
2019-08-25T13:16:03.000Z
|
2021-01-07T23:20:24.000Z
|
img-xlsx.py
|
jherskovic/img-xlsx
|
ba301b43c8a3df2282622e70904fcb2d55bad2a3
|
[
"CNRI-Python"
] | null | null | null |
from PIL import Image
from openpyxl import Workbook
from openpyxl.styles import PatternFill
from openpyxl.utils import get_column_letter
from functools import partial
import sys
import argparse
def rgb_to_xls_hex(rgb_tuple, image_mode='RGB'):
if image_mode == 'RGB':
r, g, b = rgb_tuple
elif image_mode == 'RGBA':
# Ignore alpha channel in images that have one.
r, g, b, _ = rgb_tuple
return f'{r:02x}{g:02x}{b:02x}'
def handle_arguments():
parser = argparse.ArgumentParser(description='Convert an image file to an Excel spreadsheet. I\'m sorry.')
parser.add_argument('--size', dest='size', type=int, default=64,
help='The number of cells for the largest dimension of the image. '
'Defaults to 64. Up to 512 works well for landscape images, up to 256 '
'for portrait images.')
parser.add_argument('--quantize', dest='quantize', metavar='NUM_COLORS', type=int, default=0,
help='Quantize the image (i.e. set an upper bound on the number of colors). '
'Max 255.')
parser.add_argument('image', metavar='FILENAME', type=str,
help='The image file to turn into an Excel spreadsheet. JPGs and PNGs work well.')
parser.add_argument('xlsx', metavar='FILENAME', type=str,
help='The output filename. Should end in .xlsx')
args = parser.parse_args()
return args
def convert(args):
im = Image.open(args.image)
maxsize = (args.size, args.size)
im.thumbnail(maxsize)
if args.quantize > 0 and args.quantize < 256:
quantized = im.quantize(colors=args.quantize)
im = quantized
if im.mode in ['P', 'L']:
image = im.convert("RGB")
else:
image = im
pixels=image.load()
pixel_converter = partial(rgb_to_xls_hex, image_mode=image.mode)
# Get the final image size
size_x, size_y = image.size
out_wb = Workbook()
out = out_wb.active
for y in range(size_y):
for x in range(size_x):
cell = out.cell(y+1, x+1)
rgb = pixels[x, y]
cell.fill = PatternFill("solid", fgColor=pixel_converter(rgb))
for col in range(1, size_x+1):
out.column_dimensions[get_column_letter(col)].width = 3
out_wb.save(args.xlsx)
if __name__ == "__main__":
args = handle_arguments()
convert(args)
| 33.746667
| 111
| 0.600948
| 342
| 2,531
| 4.315789
| 0.391813
| 0.030488
| 0.04607
| 0.014905
| 0.054201
| 0.039295
| 0
| 0
| 0
| 0
| 0
| 0.016219
| 0.29356
| 2,531
| 74
| 112
| 34.202703
| 0.809284
| 0.027657
| 0
| 0
| 0
| 0
| 0.21057
| 0.008809
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.127273
| 0
| 0.218182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
bbff69aa5097c6b5253948d0d9595188ebebf3c2
| 7,502
|
py
|
Python
|
tests/test_multithread_access.py
|
TimChild/dat_analysis
|
2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73
|
[
"MIT"
] | null | null | null |
tests/test_multithread_access.py
|
TimChild/dat_analysis
|
2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73
|
[
"MIT"
] | null | null | null |
tests/test_multithread_access.py
|
TimChild/dat_analysis
|
2902e5cb2f2823a1c7a26faf6b3b6dfeb7633c73
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from dat_analysis.dat_object.dat_hdf import DatHDF
from dat_analysis.hdf_file_handler import HDFFileHandler
from dat_analysis.dat_object.make_dat import get_dat, get_dats, DatHandler
from tests.helpers import get_testing_Exp2HDF
from dat_analysis.data_standardize.exp_specific.Feb21 import Feb21Exp2HDF
import concurrent.futures
import os
import h5py
import numpy as np
import shutil
import time
from tests import helpers
dat_dir = os.path.abspath('fixtures/dats/2021Feb')
# Where to put outputs (i.e. DatHDFs)
output_dir = os.path.abspath('Outputs/test_multithread_access')
hdf_folder_path = os.path.join(output_dir, 'Dat_HDFs')
Testing_Exp2HDF = get_testing_Exp2HDF(dat_dir, output_dir, base_class=Feb21Exp2HDF)
def read(datnum: DatHDF):
dat = get_dat(datnum, exp2hdf=Testing_Exp2HDF)
val = dat._threaded_read_test()
return val
def write(datnum: DatHDF, value):
dat = get_dat(datnum, exp2hdf=Testing_Exp2HDF)
val = dat._threaded_write_test(value)
return val
def mutithread_read(datnums):
with concurrent.futures.ThreadPoolExecutor(max_workers=len(datnums) + 3) as executor:
same_dat_results = [executor.submit(read, datnums[0]) for i in range(3)]
diff_dat_results = [executor.submit(read, num) for num in datnums]
same_dat_results = [r.result() for r in same_dat_results]
diff_dat_results = [r.result() for r in diff_dat_results]
return same_dat_results, diff_dat_results
class TestMultiAccess(TestCase):
def setUp(self):
"""
Note: This actually requires quite a lot of things to be working to run (get_dats does quite a lot of work)
Returns:
"""
print('running setup')
# SetUp before tests
helpers.clear_outputs(output_dir)
self.dats = get_dats([717, 719, 720, 723, 724, 725], exp2hdf=Testing_Exp2HDF, overwrite=True)
# if __name__ == '__main__':
# helpers.clear_outputs(output_dir)
# self.dats = get_dats([717, 719, 720, 723, 724, 725], exp2hdf=Testing_Exp2HDF, overwrite=True)
# else:
# self.dats = get_dats([717, 719, 720, 723, 724, 725], exp2hdf=Testing_Exp2HDF, overwrite=False)
def tearDown(self) -> None:
DatHandler().clear_dats()
def set_test_attrs(self, dats, values):
for dat, value in zip(dats, values):
with HDFFileHandler(dat.hdf.hdf_path, 'r+') as f:
# with h5py.File(dat.hdf.hdf_path, 'r+') as f:
f.attrs['threading_test_var'] = value
def test_threaded_read(self):
"""Check multiple read threads can run at the same time"""
dats = self.dats
values = [dat.datnum for dat in dats]
self.set_test_attrs(dats, values)
with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.dats)+10) as executor:
same_dat_results = [executor.submit(read, dats[0].datnum) for i in range(10)]
diff_dat_results = [executor.submit(read, dat.datnum) for dat in dats]
same_dat_results = [r.result() for r in same_dat_results]
diff_dat_results = [r.result() for r in diff_dat_results]
self.assertEqual(same_dat_results, [dats[0].datnum]*10)
self.assertEqual(diff_dat_results, [dat.datnum for dat in dats])
def test_threaded_write(self):
"""Check multiple threads trying to write at same time don't clash"""
dats = self.dats
values = ['not set' for dat in dats]
self.set_test_attrs(dats, values)
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
same_dat_writes = [executor.submit(write, dats[0].datnum, i) for i in range(10)]
value = read(dats[0].datnum)
self.assertTrue(value in [r.result() for r in same_dat_writes]) # Check that the final value was one of the writes at least
with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.dats)) as executor:
diff_dat_writes = executor.map(lambda args: write(*args), [(dat.datnum, dat.datnum) for dat in dats])
with concurrent.futures.ThreadPoolExecutor(max_workers=len(self.dats)) as executor:
diff_dat_reads = executor.map(read, [dat.datnum for dat in dats])
diff_dat_writes = [r for r in diff_dat_writes]
diff_dat_reads = [r for r in diff_dat_reads]
self.assertEqual(diff_dat_reads, diff_dat_writes)
def test_multiprocess_read(self):
"""Check multiple read threads can run at the same time"""
dats = self.dats
values = [dat.datnum for dat in dats]
self.set_test_attrs(dats, values)
with concurrent.futures.ProcessPoolExecutor(max_workers=len(self.dats)+3) as executor:
same_dat_results = [executor.submit(read, dats[0].datnum) for i in range(3)]
diff_dat_results = [executor.submit(read, dat.datnum) for dat in dats]
same_dat_results = [r.result() for r in same_dat_results]
diff_dat_results = [r.result() for r in diff_dat_results]
self.assertEqual(same_dat_results, [dats[0].datnum]*3)
self.assertEqual(diff_dat_results, [dat.datnum for dat in dats])
def test_multiprocess_write_same_dat(self):
"""Check multiple threads trying to write at same time don't clash"""
dat = self.dats[0]
values = ['not set']
self.set_test_attrs([dat], values)
with concurrent.futures.ProcessPoolExecutor(max_workers=3) as executor:
same_dat_writes = [executor.submit(write, dat.datnum, i) for i in range(3)]
value = read(dat.datnum)
self.assertTrue(value in [r.result() for r in same_dat_writes]) # Check that the final value was one of the writes at least
def test_multiprocess_write_multiple_dats(self):
"""Check multiple threads trying to write at same time don't clash"""
dats = self.dats
values = ['not set' for dat in dats]
self.set_test_attrs(dats, values)
with concurrent.futures.ProcessPoolExecutor(max_workers=len(self.dats)) as executor:
diff_dat_writes = [executor.submit(write, dat.datnum, dat.datnum) for dat in dats]
with concurrent.futures.ProcessPoolExecutor(max_workers=len(self.dats)) as executor:
diff_dat_reads = [executor.submit(read, dat.datnum) for dat in dats]
diff_dat_writes = [r.result() for r in diff_dat_writes]
diff_dat_reads = [r.result() for r in diff_dat_reads]
self.assertEqual(diff_dat_reads, diff_dat_writes)
def test_hdf_write_inside_read(self):
dat = self.dats[0]
before, after = dat._write_inside_read_test()
print(before, after)
self.assertEqual(after, before + 1)
def test_hdf_read_inside_write(self):
dat = self.dats[0]
before, after = dat._read_inside_write_test()
print(before, after)
self.assertEqual(after, before + 1)
def test_multiprocess_multithread_read(self):
dats = self.dats
values = [dat.datnum for dat in dats]
self.set_test_attrs(dats, values)
datnums = [dat.datnum for dat in dats]
with concurrent.futures.ProcessPoolExecutor(max_workers=3) as executor:
results = [executor.submit(mutithread_read, datnums) for i in range(3)]
for r in results:
result = r.result()
same_nums, diff_nums = result
self.assertEqual(same_nums, [datnums[0]]*3)
self.assertEqual(diff_nums, datnums)
| 40.551351
| 132
| 0.681818
| 1,070
| 7,502
| 4.579439
| 0.147664
| 0.04
| 0.022857
| 0.034286
| 0.696122
| 0.682041
| 0.670204
| 0.644898
| 0.617551
| 0.590408
| 0
| 0.019504
| 0.220874
| 7,502
| 184
| 133
| 40.771739
| 0.81882
| 0.119835
| 0
| 0.380165
| 0
| 0
| 0.017447
| 0.007958
| 0
| 0
| 0
| 0
| 0.099174
| 1
| 0.115702
| false
| 0
| 0.107438
| 0
| 0.256198
| 0.024793
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a51f8b0d486e0ae6fcf2e60b6ae5a88312c39cab
| 2,721
|
py
|
Python
|
early_projects/theater.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
early_projects/theater.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
early_projects/theater.py
|
JSBCCA/pythoncode
|
b7f2af8b0efc2d01d3e4568265eb3a5038a8679f
|
[
"MIT"
] | null | null | null |
import myshop
def movie(name):
two = round((9.99 * 1.07), 2)
print("Here is your Ticket and movie receipt.\n[Ticket for", name,
" - $" + str(two) + "]\nEnjoy the film!")
def concession():
print(" Refreshments:\n"
"Popcorn - $5.05\n"
"Coke - $2.19\n"
"Cookies - $1.50\n"
"Alright, you want to buy-\n")
a = int(input("How many Popcorn buckets? ").strip())
b = int(input("How many Cokes? ").strip())
c = int(input("How many Cookies? ").strip())
myshop.myshop(a, b, c)
def theater():
name = input("Hello! What is your name?").strip().capitalize()
film = input("Thank you for coming, " + name + "! " + "Welcome to "
"the Malco Theater!\n"
"What film would you like to go see today?\n"
" Films:\n"
"The Avengers: 8:00\n"
"Frozen: 7:00\n"
"Star Wars: 7:30\n"
"Harry Potter: 5:00\n"
"Shrek: 4:30\n"
"\n"
" Tickets: $9.99").strip().lower()
if film == "the avengers":
would = input("Would you like to buy some concessions?").strip().lower(
)
if would == "yes":
concession()
movie(film.title())
else:
print("Just the movie then? Alright.")
movie(film.title())
elif film == "frozen":
would = input("Would you like to buy some concessions?").strip().lower(
)
if would == "yes":
concession()
movie(film.title())
else:
print("Just the movie then? Alright.")
movie(film.title())
elif film == "star wars":
would = input("Would you like to buy some concessions?").strip().lower(
)
if would == "yes":
concession()
movie(film.title())
else:
print("Just the movie then? Alright.")
movie(film.title())
elif film == "harry potter":
would = input("Would you like to buy some concessions?").strip().lower(
)
if would == "yes":
concession()
movie(film.title())
else:
print("Just the movie then? Alright.")
movie(film.title())
elif film == "shrek":
would = input("Would you like to buy some concessions?").strip().lower(
)
if would == "yes":
concession()
movie(film.title())
else:
print("Just the movie then? Alright.")
movie(film.title())
else:
print("Oh, did you change your mind...? Well then, have a nice day!")
theater()
| 32.011765
| 79
| 0.484748
| 317
| 2,721
| 4.160883
| 0.29653
| 0.068234
| 0.106141
| 0.063685
| 0.531463
| 0.52464
| 0.52464
| 0.52464
| 0.52464
| 0.52464
| 0
| 0.019733
| 0.366777
| 2,721
| 84
| 80
| 32.392857
| 0.745792
| 0
| 0
| 0.473684
| 0
| 0
| 0.346196
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039474
| false
| 0
| 0.013158
| 0
| 0.052632
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a51f8b0f6e2a6c5f1924803b2a7a2c961da769d4
| 43,469
|
py
|
Python
|
TSScall-master/TSScall.py
|
AdelmanLab/GetGeneAnnotation_GGA
|
ae8c8328640892a4e50408ba566dd95e70f18d52
|
[
"MIT"
] | 1
|
2021-04-02T14:36:12.000Z
|
2021-04-02T14:36:12.000Z
|
TSScall-master/TSScall.py
|
AdelmanLab/GetGeneAnnotation_GGA
|
ae8c8328640892a4e50408ba566dd95e70f18d52
|
[
"MIT"
] | 3
|
2018-02-23T19:47:31.000Z
|
2019-07-15T16:58:54.000Z
|
TSScall-master/TSScall.py
|
AdelmanLab/GetGeneAnnotation_GGA
|
ae8c8328640892a4e50408ba566dd95e70f18d52
|
[
"MIT"
] | 1
|
2017-01-06T20:16:07.000Z
|
2017-01-06T20:16:07.000Z
|
#!/usr/bin/env python
# CREATED BY CHRISTOPHER LAVENDER
# BASED ON WORK BY ADAM BURKHOLDER
# INTEGRATIVE BIOINFORMATICS, NIEHS
# WORKING OBJECT ORIENTED VERSION
import os
import math
import argparse
import sys
from operator import itemgetter
def writeBedHeader(file_name, description, OUTPUT):
OUTPUT.write('track name="{}" description="{}"\n'.format(
file_name,
description,
))
# STRAND_STATUS IS USED TO DETERMINE IF STRAND IS USED IN SORT
def sortList(input_list, strand_status):
if strand_status == 'sort_by_strand':
return sorted(input_list, key=lambda k: (
k['strand'],
k['chromosome'],
k['start']
))
elif strand_status == 'ignore_strand':
return sorted(input_list, key=lambda k: (
k['chromosome'],
k['start']
))
# ENTRY 1 IS LESS THAN ENTRY 2?
def isLessThan(entry_1, entry_2):
for val in ['strand', 'chromosome', 'start']:
if entry_1[val] < entry_2[val]:
return True
elif entry_1[val] > entry_2[val]:
return False
return False
# ENTRY 1 IS WITHIN ENTRY 2?
def isWithin(entry_1, entry_2):
if entry_1['strand'] == entry_2['strand'] and\
entry_1['chromosome'] == entry_2['chromosome']:
if entry_1['start'] >= entry_2['start'] and\
entry_1['end'] <= entry_2['end']:
return True
return False
def getID(base_name, count):
max_entries = 999999
feature_name = base_name + '_'
for i in range(len(str(count)), len(str(max_entries))):
feature_name += '0'
feature_name += str(count)
return feature_name
def readInReferenceAnnotation(annotation_file):
reference_annotation = dict()
all_gtf_keys = []
with open(annotation_file) as f:
for line in f:
if not line.startswith('#'): # Check for headers
chromosome, source, feature, start, end, score, strand, \
frame, attributes = line.strip().split('\t')
if feature == 'transcript' or feature == 'exon':
keys = []
values = []
gtf_fields = dict()
for entry in attributes.split(';')[:-1]:
# Check for key-value pair
if len(entry.split('\"')) > 1:
keys.append(entry.split('\"')[0].strip())
values.append(entry.split('\"')[1].strip())
for key, value in zip(keys, values):
gtf_fields[key] = [value]
for key in keys:
if key not in all_gtf_keys:
all_gtf_keys.append(key)
tr_id = gtf_fields.pop('transcript_id')[0]
gene_id = gtf_fields.pop('gene_id')[0]
for val in ('transcript_id', 'gene_id'):
all_gtf_keys.remove(val)
if feature == 'exon':
ref_id = (tr_id, chromosome)
if ref_id not in reference_annotation:
reference_annotation[ref_id] = {
'chromosome': chromosome,
'strand': strand,
'exons': [],
'gene_id': gene_id,
'gtf_fields': gtf_fields,
}
reference_annotation[ref_id]['exons'].append(
[int(start), int(end)]
)
for ref_id in reference_annotation:
t = reference_annotation[ref_id]
# TAKE ADDITIONAL INFORMATION FROM EXON LISTS
t['exons'].sort(key=lambda x: x[0])
t['tr_start'] = t['exons'][0][0]
t['tr_end'] = t['exons'][len(t['exons'])-1][1]
if t['strand'] == '+':
t['tss'] = t['tr_start']
if t['strand'] == '-':
t['tss'] = t['tr_end']
t['gene_length'] = t['tr_end'] - t['tr_start']
# POPULATE MISSING GTF FIELD ENTRIES
for key in all_gtf_keys:
if key not in t['gtf_fields']:
t['gtf_fields'][key] = [None]
return reference_annotation, all_gtf_keys
class TSSCalling(object):
def __init__(self, **kwargs):
self.forward_bedgraph = kwargs['forward_bedgraph']
self.reverse_bedgraph = kwargs['reverse_bedgraph']
self.chrom_sizes = kwargs['chrom_sizes']
self.annotation_file = kwargs['annotation_file']
self.output_bed = kwargs['output_bed']
assert os.path.exists(self.forward_bedgraph)
assert os.path.exists(self.reverse_bedgraph)
assert os.path.exists(self.chrom_sizes)
if self.annotation_file:
assert os.path.exists(self.annotation_file)
self.fdr_threshold = kwargs['fdr']
self.false_positives = kwargs['false_positives']
self.utss_filter_size = kwargs['utss_filter_size']
self.utss_search_window = kwargs['utss_search_window']
self.bidirectional_threshold = kwargs['bidirectional_threshold']
self.cluster_threshold = kwargs['cluster_threshold']
self.detail_file = kwargs['detail_file']
self.cluster_bed = kwargs['cluster_bed']
self.call_method = kwargs['call_method']
self.annotation_join_distance = kwargs['annotation_join_distance']
self.annotation_search_window = kwargs['annotation_search_window']
self.bin_winner_size = kwargs['bin_winner_size']
self.set_read_threshold = kwargs['set_read_threshold']
try:
int(self.set_read_threshold)
except:
pass
else:
self.set_read_threshold = int(self.set_read_threshold)
# EVALUATE THRESHOLD METHOD ARGUMENTS; IF NONE, SET FDR_THRESHOLD
# AT 0.001
implied_threshold_methods = 0
for val in [
self.fdr_threshold,
self.false_positives,
self.set_read_threshold]:
implied_threshold_methods += int(bool(val))
if implied_threshold_methods == 1:
pass
elif implied_threshold_methods > 1:
raise ValueError('More than 1 read threshold method implied!!')
elif implied_threshold_methods == 0:
self.fdr_threshold = 0.001
self.tss_list = []
self.reference_annotation = None
self.gtf_attribute_fields = []
self.annotated_tss_count = 0
self.unannotated_tss_count = 0
self.tss_cluster_count = 0
self.unobserved_ref_count = 0
self.execute()
def createSearchWindowsFromAnnotation(self):
# VALUE USED TO MERGE SEARCH WINDOWS BY PROXIMITY
join_window = self.annotation_join_distance
window_size = self.annotation_search_window
current_entry = sorted(self.reference_annotation, key=lambda k: (
self.reference_annotation[k]['strand'],
self.reference_annotation[k]['chromosome'],
self.reference_annotation[k]['tss'],
# self.reference_annotation[k]['gene'],
k,
))
# POPULATE TRANSCRIPT LIST FROM SORTED LIST;
# ADD SEARCH WINDOW EDGES TO ENTRIES
transcript_list = []
for ref in current_entry:
transcript_list.append({
'transcript_id': [ref[0]],
'chromosome':
self.reference_annotation[ref]['chromosome'],
'tss': [self.reference_annotation[ref]['tss']],
'strand': self.reference_annotation[ref]['strand'],
'gene_id': [self.reference_annotation[ref]['gene_id']],
'hits': [],
'gtf_fields': self.reference_annotation[ref]['gtf_fields'],
})
if self.reference_annotation[ref]['strand'] == '+':
transcript_list[-1]['start'] = \
transcript_list[-1]['tss'][0] - window_size
# MAKE SURE WINDOW END DOES NOT GO PAST TRANSCRIPT END
end = transcript_list[-1]['tss'][0] + window_size
if end > self.reference_annotation[ref]['tr_end']:
transcript_list[-1]['end'] = \
self.reference_annotation[ref]['tr_end']
else:
transcript_list[-1]['end'] = end
elif self.reference_annotation[ref]['strand'] == '-':
# MAKE SURE WINDOW START DOES NOT GO PAST TRANSCRIPT START
start = transcript_list[-1]['tss'][0] - window_size
if start < self.reference_annotation[ref]['tr_start']:
transcript_list[-1]['start'] = \
self.reference_annotation[ref]['tr_end']
else:
transcript_list[-1]['start'] = start
transcript_list[-1]['end'] = \
transcript_list[-1]['tss'][0] + window_size
merged_windows = []
# MERGE WINDOWS BASED PROXIMITY;
# IF WINDOWS ARE WITHIN JOIN THRESHOLD, THEY ARE MERGED;
# IF NOT, BUT STILL OVERLAPPING, MIDPOINT BECOMES BOUNDARY
working_entry = transcript_list.pop(0)
while len(transcript_list) != 0:
next_entry = transcript_list.pop(0)
if (working_entry['strand'] == next_entry['strand']) and \
(working_entry['chromosome'] == next_entry['chromosome']):
if working_entry['tss'][-1] + join_window >= \
next_entry['tss'][0]:
working_entry['transcript_id'].append(
next_entry['transcript_id'][0]
)
working_entry['gene_id'].append(
next_entry['gene_id'][0]
)
for key in working_entry['gtf_fields']:
working_entry['gtf_fields'][key].append(
next_entry['gtf_fields'][key][0]
)
# working_entry['genes'].append(next_entry['genes'][0])
working_entry['end'] = next_entry['end']
working_entry['tss'].append(next_entry['tss'][0])
elif working_entry['end'] >= next_entry['start']:
working_entry['end'] = int(math.floor(
(working_entry['end']+next_entry['start'])/2
))
next_entry['start'] = working_entry['end'] + 1
merged_windows.append(working_entry)
working_entry = next_entry
else:
merged_windows.append(working_entry)
working_entry = next_entry
else:
merged_windows.append(working_entry)
working_entry = next_entry
merged_windows.append(working_entry)
return merged_windows
def combineAndSortBedGraphs(self, forward_bedgraph, reverse_bedgraph):
def readBedGraph(bedgraph_list, bedgraph_fn, strand):
with open(bedgraph_fn) as f:
for line in f:
if not ('track' in line or line == '\n'):
chromosome, start, end, reads = line.strip().split()
for i in range(int(start)+1, int(end)+1):
bedgraph_list.append({
'chromosome': chromosome,
'start': i,
'end': i,
'reads': int(reads),
'strand': strand
})
combined_list = []
readBedGraph(combined_list, forward_bedgraph, '+')
readBedGraph(combined_list, reverse_bedgraph, '-')
return sortList(combined_list, 'sort_by_strand')
# CONSIDERS TAB-DELIMITED CHROM_SIZES FILE (UCSC)
def findGenomeSize(self, chrom_sizes):
genome_size = 0
with open(chrom_sizes) as f:
for line in f:
genome_size += int(line.strip().split()[1])
return genome_size
# FIND THRESHOLD FOR TSS CALLING, BASED ON
# JOTHI ET AL. (2008) NUCLEIC ACIDS RES 36: 5221-5231.
def findReadThreshold(self, bedgraph_list, genome_size):
def countLoci(bedgraph_list, value):
loci = 0
for entry in bedgraph_list:
if entry['reads'] >= value:
loci += 1
return loci
if self.fdr_threshold or self.false_positives:
self.false_positives = 1
mappable_size = 0.8 * 2 * float(genome_size)
read_count = 0
for entry in bedgraph_list:
read_count += entry['reads']
expected_count = float(read_count)/mappable_size
cume_probability = ((expected_count**0)/math.factorial(0)) * \
math.exp(-expected_count)
threshold = 1
while True:
probability = 1 - cume_probability
expected_loci = probability * mappable_size
if self.fdr_threshold:
observed_loci = countLoci(bedgraph_list, threshold)
fdr = float(expected_loci)/float(observed_loci)
if fdr < self.fdr_threshold:
return threshold
else:
if expected_loci < self.false_positives:
return threshold
cume_probability += \
((expected_count**threshold)/math.factorial(threshold)) * \
math.exp(-expected_count)
threshold += 1
else:
return self.set_read_threshold
# FIND INTERSECTION WITH SEARCH_WINDOWS, BEDGRAPH_LIST;
# HITS ARE ADDED TO WINDOW_LIST, REQUIRES SORTED LIST
def findIntersectionWithBedGraph(self, search_windows, bedgraph_list):
search_index = 0
bedgraph_index = 0
while (search_index < len(search_windows)) and \
(bedgraph_index < len(bedgraph_list)):
if isWithin(bedgraph_list[bedgraph_index],
search_windows[search_index]):
search_windows[search_index]['hits'].append([
bedgraph_list[bedgraph_index]['start'],
bedgraph_list[bedgraph_index]['reads']
])
bedgraph_index += 1
else:
if isLessThan(bedgraph_list[bedgraph_index],
search_windows[search_index]):
bedgraph_index += 1
else:
search_index += 1
# CREATE WINDOWS ABOUT KNOWN TSS FOR UNANNOTATED TSSs CALLING;
# CONSIDERS ANNOTATED AND CALLED TSSs IN INSTANCE LISTS
def createFilterWindowsFromAnnotationAndCalledTSSs(self):
filter_windows = []
if self.reference_annotation:
for transcript in self.reference_annotation:
filter_windows.append({
'strand': self.reference_annotation[transcript]['strand'],
'chromosome':
self.reference_annotation[transcript]['chromosome'],
'start': self.reference_annotation[transcript]['tss'] -
self.utss_filter_size,
'end': self.reference_annotation[transcript]['tss'] +
self.utss_filter_size
})
if self.tss_list != []:
for tss in self.tss_list:
filter_windows.append({
'strand': tss['strand'],
'chromosome': tss['chromosome'],
'start': tss['start'] - self.utss_filter_size,
'end': tss['start'] + self.utss_filter_size
})
return sortList(filter_windows, 'sort_by_strand')
def filterBedGraphListByWindows(self, bedgraph_list, filter_windows):
# FILTER BY OVERLAP WITH FILTER WINDOWS
if filter_windows != []:
filter_index = 0
bedgraph_index = 0
working_list = []
while (filter_index < len(filter_windows)) and \
(bedgraph_index < len(bedgraph_list)):
if isWithin(bedgraph_list[bedgraph_index],
filter_windows[filter_index]):
bedgraph_index += 1
else:
if isLessThan(bedgraph_list[bedgraph_index],
filter_windows[filter_index]):
working_list.append(bedgraph_list[bedgraph_index])
bedgraph_index += 1
else:
filter_index += 1
bedgraph_list = working_list
return bedgraph_list
# CREATES WINDOWS FOR UNANNOTATED TSS CALLING
def createUnannotatedSearchWindowsFromBedgraph(self,
bedgraph_list,
read_threshold):
windows = []
for entry in bedgraph_list:
if entry['reads'] > read_threshold:
windows.append({
'strand': entry['strand'],
'chromosome': entry['chromosome'],
'start': entry['start'] - self.utss_search_window,
'end': entry['end'] + self.utss_search_window,
'hits': []
})
# MERGE OVERLAPPING WINDOWS
merged_windows = []
working_entry = windows.pop(0)
while len(windows) != 0:
next_entry = windows.pop(0)
if (working_entry['strand'] == next_entry['strand']) and\
(working_entry['chromosome'] == next_entry['chromosome']):
if working_entry['end'] >= next_entry['start']:
working_entry['end'] = next_entry['end']
else:
merged_windows.append(working_entry)
working_entry = next_entry
else:
merged_windows.append(working_entry)
working_entry = next_entry
return merged_windows
# SORT CALLED TSSs AND ASSOCIATE INTO BIDIRECTIONAL PAIRS
def associateBidirectionalTSSs(self):
self.tss_list = sortList(self.tss_list, 'ignore_strand')
for i in range(len(self.tss_list)-1):
if self.tss_list[i]['chromosome'] == \
self.tss_list[i+1]['chromosome']:
if self.tss_list[i]['strand'] == '-' and \
self.tss_list[i+1]['strand'] == '+':
if self.tss_list[i]['start'] + \
self.bidirectional_threshold >= \
self.tss_list[i+1]['start']:
distance = abs(self.tss_list[i]['start'] -
self.tss_list[i+1]['start'])
self.tss_list[i]['divergent partner'] = \
self.tss_list[i+1]['id']
self.tss_list[i+1]['divergent partner'] = \
self.tss_list[i]['id']
self.tss_list[i]['divergent distance'] = distance
self.tss_list[i+1]['divergent distance'] = distance
if self.tss_list[i]['strand'] == '+' and \
self.tss_list[i+1]['strand'] == '-':
if self.tss_list[i]['start'] + \
self.bidirectional_threshold >= \
self.tss_list[i+1]['start']:
distance = abs(self.tss_list[i]['start'] -
self.tss_list[i+1]['start'])
self.tss_list[i]['convergent partner'] = \
self.tss_list[i+1]['id']
self.tss_list[i+1]['convergent partner'] = \
self.tss_list[i]['id']
self.tss_list[i]['convergent distance'] = distance
self.tss_list[i+1]['convergent distance'] = distance
def findTSSExonIntronOverlap(self):
exons = []
introns = []
if self.reference_annotation:
for transcript in self.reference_annotation:
for i in range(len(
self.reference_annotation[transcript]['exons'])):
strand = self.reference_annotation[transcript]['strand']
chromosome =\
self.reference_annotation[transcript]['chromosome']
start =\
self.reference_annotation[transcript]['exons'][i][0]
end = self.reference_annotation[transcript]['exons'][i][1]
exons.append({
'strand': strand,
'chromosome': chromosome,
'start': start,
'end': end
})
for i in range(
len(self.reference_annotation[transcript]['exons'])-1):
strand = self.reference_annotation[transcript]['strand']
chromosome =\
self.reference_annotation[transcript]['chromosome']
start = \
self.reference_annotation[transcript]['exons'][i][1]+1
end = \
self.reference_annotation[transcript]['exons'][i+1][0]\
- 1
introns.append({
'strand': strand,
'chromosome': chromosome,
'start': start,
'end': end
})
exons = sortList(exons, 'sort_by_strand')
introns = sortList(introns, 'sort_by_strand')
self.tss_list = sortList(self.tss_list, 'sort_by_strand')
def findFeatureOverlap(tss_list, feature_list, feature_key):
if feature_list == []:
for tss in tss_list:
tss[feature_key] = False
else:
feature_index = 0
tss_index = 0
while (feature_index < len(feature_list)) and\
(tss_index < len(tss_list)):
if isWithin(tss_list[tss_index],
feature_list[feature_index]):
tss_list[tss_index][feature_key] = True
tss_index += 1
else:
if isLessThan(tss_list[tss_index],
feature_list[feature_index]):
tss_list[tss_index][feature_key] = False
tss_index += 1
else:
feature_index += 1
findFeatureOverlap(self.tss_list, exons, 'exon_overlap')
findFeatureOverlap(self.tss_list, introns, 'intron_overlap')
# ASSOCIATE TSSs INTO CLUSTERS BY PROXIMITY;
# ADD TSS CLUSTER AND NUMBER OF TSSs IN ASSOCIATED CLUSTER IN TSS ENTRY
def associateTSSsIntoClusters(self):
cluster_count = dict()
self.tss_list = sortList(self.tss_list, 'ignore_strand')
current_cluster = getID('cluster', self.tss_cluster_count)
self.tss_cluster_count += 1
self.tss_list[0]['cluster'] = current_cluster
cluster_count[current_cluster] = 1
for i in range(1, len(self.tss_list)):
if not (self.tss_list[i-1]['chromosome'] ==
self.tss_list[i]['chromosome'] and
self.tss_list[i-1]['start'] + self.cluster_threshold >=
self.tss_list[i]['start']):
current_cluster = getID('cluster', self.tss_cluster_count)
self.tss_cluster_count += 1
self.tss_list[i]['cluster'] = current_cluster
if current_cluster not in cluster_count:
cluster_count[current_cluster] = 1
else:
cluster_count[current_cluster] += 1
for tss in self.tss_list:
tss['cluster_count'] = cluster_count[tss['cluster']]
def createDetailFile(self):
def checkHits(window):
for hit in window['hits']:
if hit[1] >= self.read_threshold:
return True
return False
def writeUnobservedEntry(OUTPUT, tss, tr_ids, gene_ids, window):
tss_id = getID('annoTSS', self.unobserved_ref_count)
self.unobserved_ref_count += 1
transcripts = tr_ids[0]
genes = gene_ids[0]
for i in range(1, len(tr_ids)):
transcripts += ';' + tr_ids[i]
genes += ';' + gene_ids[i]
reads = 0
for hit in window['hits']:
if int(tss) == int(hit[0]):
reads = hit[1]
OUTPUT.write(('{}' + '\t{}' * 15)
.format(
tss_id,
'unobserved reference TSS',
transcripts,
genes,
window['strand'],
window['chromosome'],
str(tss),
str(reads),
'NA',
'NA',
'NA',
'NA',
'NA',
'NA',
'NA',
'NA',
'NA',
))
for key in self.gtf_attribute_fields:
# OUTPUT.write('\t' + ';'.join(window['gtf_fields'][key]))
OUTPUT.write('\t' + ';'.join(['None' if v is None else v for
v in window['gtf_fields'][key]]))
OUTPUT.write('\n')
# self.findTSSExonIntronOverlap()
# self.associateTSSsIntoClusters()
# Remove GTF fields 'exon_number' and 'exon_id' if present
skip_fields = ['exon_number', 'exon_id']
for entry in skip_fields:
if entry in self.gtf_attribute_fields:
self.gtf_attribute_fields.remove(entry)
with open(self.detail_file, 'w') as OUTPUT:
OUTPUT.write(
('{}' + '\t{}' * 15)
.format(
'TSS ID',
'Type',
'Transcripts',
'Gene ID',
'Strand',
'Chromosome',
'Position',
'Reads',
'Divergent?',
'Divergent partner',
'Divergent distance',
'Convergent?',
'Convergent partner',
'Convergent distance',
'TSS cluster',
'TSSs in associated cluster',
))
for field in self.gtf_attribute_fields:
OUTPUT.write('\t' + field)
OUTPUT.write('\n')
for tss in self.tss_list:
OUTPUT.write(tss['id'])
OUTPUT.write('\t' + tss['type'])
for key in ('transcript_id', 'gene_id'):
if key in tss:
OUTPUT.write('\t' + ';'.join(tss[key]))
else:
OUTPUT.write('\tNA')
for entry in ['strand', 'chromosome', 'start', 'reads']:
OUTPUT.write('\t' + str(tss[entry]))
if 'divergent partner' in tss:
OUTPUT.write('\t{}\t{}\t{}'.format(
'True',
tss['divergent partner'],
str(tss['divergent distance']),
))
else:
OUTPUT.write('\tFalse\tNA\tNA')
if 'convergent partner' in tss:
OUTPUT.write('\t{}\t{}\t{}'.format(
'True',
tss['convergent partner'],
str(tss['convergent distance']),
))
else:
OUTPUT.write('\tFalse\tNA\tNA')
# OUTPUT.write('\t' + str(
# tss['exon_overlap'] or tss['intron_overlap']))
for entry in [
'cluster',
'cluster_count']:
OUTPUT.write('\t' + str(tss[entry]))
if 'gtf_fields' in tss:
for key in self.gtf_attribute_fields:
# OUTPUT.write('\t' + ';'.join(tss['gtf_fields'][key]))
OUTPUT.write('\t' + ';'.join(
['None' if v is None else
v for v in tss['gtf_fields'][key]]
))
else:
for key in self.gtf_attribute_fields:
OUTPUT.write('\tNA')
OUTPUT.write('\n')
if self.annotation_file:
for window in self.ref_search_windows:
if not checkHits(window):
window_tss = []
for tr_id, gene_id, tss in zip(window['transcript_id'],
window['gene_id'],
window['tss']):
window_tss.append({
'transcript_id': tr_id,
'gene_id': gene_id,
'tss': int(tss),
})
window_tss.sort(key=itemgetter('tss'))
current_tss = window_tss[0]['tss']
current_tr_ids = [window_tss[0]['transcript_id']]
current_genes = [window_tss[0]['gene_id']]
window_index = 1
while window_index < len(window_tss):
if current_tss == window_tss[window_index]['tss']:
current_tr_ids.append(
window_tss[window_index]['transcript_id'])
current_genes.append(
window_tss[window_index]['gene_id'])
else:
writeUnobservedEntry(OUTPUT, current_tss,
current_tr_ids,
current_genes,
window)
current_tss = window_tss[window_index]['tss']
current_tr_ids = \
[window_tss[window_index]['transcript_id']]
current_genes = [window_tss[0]['gene_id']]
window_index += 1
writeUnobservedEntry(OUTPUT, current_tss,
current_tr_ids, current_genes,
window)
def writeClusterBed(self, tss_list, cluster_bed):
clusters = dict()
with open(cluster_bed, 'w') as OUTPUT:
writeBedHeader(
cluster_bed.split('.bed')[0],
'TSScall clusters',
OUTPUT,
)
for tss in tss_list:
if tss['cluster'] in clusters:
clusters[tss['cluster']]['tss'].append(tss['start'])
else:
clusters[tss['cluster']] = {
'chromosome': tss['chromosome'],
'tss': [tss['start']],
}
for cluster in sorted(clusters):
tss = sorted(clusters[cluster]['tss'])
OUTPUT.write('{}\t{}\t{}\t{}\n'.format(
clusters[cluster]['chromosome'],
str(tss[0] - 1),
str(tss[-1]),
cluster,
))
def writeBedFile(self, tss_list, output_bed):
with open(output_bed, 'w') as OUTPUT:
writeBedHeader(
output_bed.split('.bed')[0],
'TSScall TSSs',
OUTPUT,
)
for tss in tss_list:
OUTPUT.write('{}\t{}\t{}\t{}\t{}\t{}\n'.format(
tss['chromosome'],
str(tss['start'] - 1),
str(tss['start']),
tss['id'],
'0',
tss['strand']
))
# FROM HITS IN SEARCH WINDOWS, CALL TSSs
# COUNT IS RETURNED IN ORDER TO UPDATE INSTANCE VARIABLES
def callTSSsFromIntersection(self, intersection, read_threshold, base_name,
count, tss_type, nearest_allowed):
def callTSS(hits, strand):
if self.call_method == 'global':
max_reads = float('-inf')
max_position = None
for hit in hits:
if hit[1] > max_reads:
max_position = hit[0]
max_reads = hit[1]
elif hit[1] == max_reads:
if strand == '+':
if hit[0] < max_position:
max_position = hit[0]
elif strand == '-':
if hit[0] > max_position:
max_position = hit[0]
return max_position, max_reads
if self.call_method == 'bin_winner':
bin_size = self.bin_winner_size
bins = []
# MAKE BINS
hits.sort(key=itemgetter(0))
for i in range(len(hits)):
bins.append({
'total_reads': 0,
'bin_hits': []
})
for j in range(i, len(hits)):
if abs(hits[i][0] - hits[j][0]) <= bin_size:
bins[-1]['total_reads'] += hits[j][1]
bins[-1]['bin_hits'].append(hits[j])
# SELECT BIN WITH HIGHEST TOTAL READS
# BECAUSE SORTED, WILL TAKE UPSTREAM BIN IN TIES
max_bin_reads = float('-inf')
max_bin_index = None
for i, entry in enumerate(bins):
if entry['total_reads'] > max_bin_reads:
max_bin_index = i
max_bin_reads = entry['total_reads']
# GET LOCAL WINNER
# BECAUSE SORTED, WILL TAKE UPSTREAM TSS IN TIES
max_reads = float('-inf')
max_position = None
for hit in bins[max_bin_index]['bin_hits']:
if hit[1] > max_reads:
max_position = hit[0]
max_reads = hit[1]
return max_position, max_reads
# ITERATE THROUGH WINDOWS IN INTERSECTION
for entry in intersection:
entry_hits = entry['hits']
# LOOP WHILE 'HITS' IS POPULATED
while len(entry_hits) != 0:
# CALL A TSS
tss_position, tss_reads = callTSS(entry_hits, entry['strand'])
if tss_reads >= read_threshold:
self.tss_list.append({
'id': getID(base_name, count),
'type': tss_type,
'start': tss_position,
'end': tss_position,
'reads': tss_reads,
})
# IF VAL IN ENTRY, ADD TO DICT IN TSS LIST
for val in ['transcript_id', 'gene_id', 'strand',
'chromosome', 'gtf_fields']:
if val in entry:
self.tss_list[-1][val] = entry[val]
count += 1
# GO THROUGH HITS, KEEP THOSE WITHIN NEAREST_ALLOWED
temp = []
for hit in entry_hits:
if abs(hit[0] - tss_position) > nearest_allowed:
temp.append(hit)
entry_hits = temp
return count
def callTSSsFromAnnotation(self, bedgraph_list, read_threshold):
self.ref_search_windows = self.createSearchWindowsFromAnnotation()
self.findIntersectionWithBedGraph(self.ref_search_windows,
bedgraph_list)
self.annotated_tss_count = self.callTSSsFromIntersection(
self.ref_search_windows,
read_threshold,
'obsTSS',
self.annotated_tss_count,
'called from reference window',
float('inf')
)
def callUnannotatedTSSs(self, bedgraph_list, read_threshold):
filter_windows = self.createFilterWindowsFromAnnotationAndCalledTSSs()
filtered_bedgraph = self.filterBedGraphListByWindows(bedgraph_list,
filter_windows)
unannotated_search_windows =\
self.createUnannotatedSearchWindowsFromBedgraph(filtered_bedgraph,
read_threshold)
self.findIntersectionWithBedGraph(unannotated_search_windows,
filtered_bedgraph)
self.unannotated_tss_count = self.callTSSsFromIntersection(
unannotated_search_windows,
read_threshold,
'uTSS',
self.unannotated_tss_count,
'unannotated',
self.utss_search_window
)
def execute(self):
sys.stdout.write('Reading in bedGraph files...\n')
bedgraph_list = self.combineAndSortBedGraphs(self.forward_bedgraph,
self.reverse_bedgraph)
genome_size = self.findGenomeSize(self.chrom_sizes)
sys.stdout.write('Calculating read threshold...\n')
self.read_threshold = \
self.findReadThreshold(bedgraph_list, genome_size)
sys.stdout.write('Read threshold set to {}\n'.format(
str(self.read_threshold)))
if self.annotation_file:
sys.stdout.write('Reading in annotation file...\n')
self.reference_annotation, self.gtf_attribute_fields =\
readInReferenceAnnotation(self.annotation_file)
sys.stdout.write('Calling TSSs from annotation...\n')
self.callTSSsFromAnnotation(bedgraph_list, self.read_threshold)
sys.stdout.write('{} TSSs called from annotation\n'.format(
str(self.annotated_tss_count)))
sys.stdout.write('Calling unannotated TSSs...\n')
self.callUnannotatedTSSs(bedgraph_list, self.read_threshold)
sys.stdout.write('{} unannotated TSSs called\n'.format(
str(self.unannotated_tss_count)))
sys.stdout.write('Associating bidirectional TSSs...\n')
self.associateBidirectionalTSSs()
self.associateTSSsIntoClusters()
if self.detail_file:
sys.stdout.write('Creating detail file...\n')
self.createDetailFile()
if self.cluster_bed:
sys.stdout.write('Creating cluster bed...\n')
self.writeClusterBed(self.tss_list, self.cluster_bed)
sys.stdout.write('Creating output bed...\n')
self.writeBedFile(self.tss_list, self.output_bed)
sys.stdout.write('TSS calling complete\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--fdr', default=None, type=float,
help='set read threshold by FDR (FLOAT) (Default \
method: less than 0.001)')
parser.add_argument('--false_positives', default=None, type=int,
help='set read threshold by false positive count')
parser.add_argument('--utss_filter_size', default=750, type=int,
help='set uTSS filter size; any read within INTEGER \
of obsTSS/annoTSS is filtered prior to uTSS calling \
(Default: 750)')
parser.add_argument('--utss_search_window', default=250, type=int,
help='set uTSS search window size to INTEGER \
(Default: 250)')
parser.add_argument('--bidirectional_threshold', default=1000, type=int,
help='INTEGER threshold to associate bidirectional \
TSSs (Default: 1000)')
parser.add_argument('--detail_file', default=None, type=str,
help='create a tab-delimited TXT file with details \
about TSS calls')
parser.add_argument('--cluster_threshold', default=1000, type=int,
help='INTEGER threshold to associate TSSs into \
clusters (Default: 1000)')
parser.add_argument('--annotation_file', '-a', type=str,
help='annotation in GTF format')
parser.add_argument('--call_method', type=str, default='bin_winner',
choices=['global', 'bin_winner'],
help='TSS calling method to use (Default: bin_winner)')
parser.add_argument('--annotation_join_distance', type=int, default=200,
help='set INTEGER distace threshold for joining search \
windows from annotation (Default: 200)')
parser.add_argument('--annotation_search_window', type=int, default=1000,
help='set annotation search window size to INTEGER \
(Default: 1000)')
parser.add_argument('--set_read_threshold', type=float, default=None,
help='set read threshold for TSS calling to FLOAT; do \
not determine threshold from data')
parser.add_argument('--bin_winner_size', type=int, default=200,
help='set bin size for call method bin_winner \
(Default: 200)')
parser.add_argument('--cluster_bed', type=str, default=None,
help='write clusters to output bed file')
parser.add_argument('forward_bedgraph', type=str,
help='forward strand Start-seq bedgraph file')
parser.add_argument('reverse_bedgraph', type=str,
help='reverse strand Start-seq bedgraph file')
parser.add_argument('chrom_sizes', type=str,
help='standard tab-delimited chromosome sizes file')
parser.add_argument('output_bed', type=str, help='output TSS BED file')
args = parser.parse_args()
TSSCalling(**vars(args))
| 43.996964
| 80
| 0.494835
| 4,169
| 43,469
| 4.952746
| 0.08875
| 0.021358
| 0.028235
| 0.018016
| 0.358388
| 0.281528
| 0.235858
| 0.210577
| 0.176095
| 0.153816
| 0
| 0.010035
| 0.403966
| 43,469
| 987
| 81
| 44.04154
| 0.786908
| 0.053141
| 0
| 0.277251
| 0
| 0
| 0.096538
| 0.004185
| 0
| 0
| 0
| 0
| 0.004739
| 1
| 0.03673
| false
| 0.00237
| 0.005924
| 0
| 0.072275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a51fd6b2b0c4c430c0e920bd959a2e1d06f3221b
| 234
|
py
|
Python
|
grayToBinary.py
|
gaurav3dua/OpenCV
|
d816158c40c35b897ce9873c176ce72735220069
|
[
"MIT"
] | 1
|
2018-11-25T19:30:22.000Z
|
2018-11-25T19:30:22.000Z
|
grayToBinary.py
|
gaurav3dua/OpenCV
|
d816158c40c35b897ce9873c176ce72735220069
|
[
"MIT"
] | null | null | null |
grayToBinary.py
|
gaurav3dua/OpenCV
|
d816158c40c35b897ce9873c176ce72735220069
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
img = cv2.imread('lena.jpg', cv2.IMREAD_GRAYSCALE)
thresh = 127
im_bw = cv2.threshold(img, thresh, 255, cv2.THRESH_BINARY)[1]
cv2.imshow('image', im_bw)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 21.272727
| 62
| 0.713675
| 37
| 234
| 4.405405
| 0.621622
| 0.110429
| 0.08589
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080402
| 0.149573
| 234
| 11
| 63
| 21.272727
| 0.738693
| 0
| 0
| 0
| 0
| 0
| 0.057778
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a52068720298fd51fbb513a22dc8a2e7f0bdd3f1
| 652
|
py
|
Python
|
006-argparse.py
|
KitchenTableCoders/cli-video
|
35cacc059f6ac86c7bf6b1f86f42ea178e16165c
|
[
"MIT"
] | 6
|
2016-03-06T05:51:06.000Z
|
2017-01-10T05:49:03.000Z
|
006-argparse.py
|
KitchenTableCoders/cli-video
|
35cacc059f6ac86c7bf6b1f86f42ea178e16165c
|
[
"MIT"
] | null | null | null |
006-argparse.py
|
KitchenTableCoders/cli-video
|
35cacc059f6ac86c7bf6b1f86f42ea178e16165c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Introduces the "argparse" module, which is used to parse more complex argument strings
eg: ./006-argparse.py --name Jeff mauve
"""
import argparse # http://docs.python.org/2/library/argparse.html#module-argparse
import subprocess
def main():
parser = argparse.ArgumentParser(description='Say a sentence')
parser.add_argument('--name', type=str, help='a name')
parser.add_argument('color', type=str, nargs='+', help='a color') # nargs='+' means "at least one"
args = parser.parse_args()
cmd = 'say {0} likes {1}'.format(args.name, args.color[0])
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
main()
| 28.347826
| 99
| 0.707055
| 95
| 652
| 4.736842
| 0.621053
| 0.04
| 0.075556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012259
| 0.124233
| 652
| 23
| 100
| 28.347826
| 0.775832
| 0.369632
| 0
| 0
| 0
| 0
| 0.159601
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.181818
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5209d004c35406d08483e6a8a94534fc1c1b17b
| 4,573
|
py
|
Python
|
solid_attenuator/ioc_lfe_at2l0_calc/at2l0.py
|
ZLLentz/solid-attenuator
|
766ac1df169b3b9459222d979c9ef77a9be2b509
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-04-21T02:55:11.000Z
|
2021-04-21T02:55:11.000Z
|
solid_attenuator/ioc_lfe_at2l0_calc/at2l0.py
|
ZLLentz/solid-attenuator
|
766ac1df169b3b9459222d979c9ef77a9be2b509
|
[
"BSD-3-Clause-LBNL"
] | 27
|
2020-12-07T23:11:42.000Z
|
2022-02-02T23:59:03.000Z
|
solid_attenuator/ioc_lfe_at2l0_calc/at2l0.py
|
ZLLentz/solid-attenuator
|
766ac1df169b3b9459222d979c9ef77a9be2b509
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2020-04-01T05:52:03.000Z
|
2020-07-24T16:56:36.000Z
|
"""
This is the IOC source code for the unique AT2L0, with its 18 in-out filters.
"""
from typing import List
from caproto.server import SubGroup, expand_macros
from caproto.server.autosave import RotatingFileManager
from .. import calculator, util
from ..filters import InOutFilterGroup
from ..ioc import IOCBase
from ..system import SystemGroupBase
from ..util import State
class SystemGroup(SystemGroupBase):
"""
PV group for attenuator system-spanning information.
This system group implementation is specific to AT2L0.
"""
@property
def material_order(self) -> List[str]:
"""Material prioritization."""
# Hard-coded for now.
return ['C', 'Si']
def check_materials(self) -> bool:
"""Ensure the materials specified are OK according to the order."""
bad_materials = set(self.material_order).symmetric_difference(
set(self.all_filter_materials)
)
if bad_materials:
self.log.error(
'Materials not set properly! May not calculate correctly. '
'Potentially bad materials: %s', bad_materials
)
return not bool(bad_materials)
@util.block_on_reentry()
async def run_calculation(self, energy: float, desired_transmission: float,
calc_mode: str
) -> calculator.Config:
if not self.check_materials():
raise util.MisconfigurationError(
f"Materials specified outside of supported ones. AT2L0 "
f"requires that diamond filters be inserted prior to silicon "
f"filters, but the following were found:"
f"{self.all_filter_materials}"
)
# Update all of the filters first, to determine their transmission
# at this energy
stuck = self.get_filters(stuck=True, inactive=False, normal=False)
filters = self.get_filters(stuck=False, inactive=False, normal=True)
materials = list(flt.material.value for flt in filters)
transmissions = list(flt.transmission.value for flt in filters)
for filter in stuck + filters:
await filter.set_photon_energy(energy)
# Account for stuck filters when calculating desired transmission:
stuck_transmission = self.calculate_stuck_transmission()
adjusted_tdes = desired_transmission / stuck_transmission
# Using the above-calculated transmissions, find the best configuration
config = calculator.get_best_config_with_material_priority(
materials=materials,
transmissions=transmissions,
material_order=self.material_order,
t_des=adjusted_tdes,
mode=calc_mode,
)
filter_to_state = {
flt: State.from_filter_index(idx)
for flt, idx in zip(filters, config.filter_states)
}
filter_to_state.update(
{flt: flt.get_stuck_state() for flt in stuck}
)
# Reassemble filter states in order:
config.filter_states = [
# Inactive filters will be implicitly marked as "Out" here.
filter_to_state.get(flt, State.Out)
for flt in self.filters.values()
]
# Include the stuck transmission in the result:
config.transmission *= stuck_transmission
return config
def create_ioc(prefix, filter_group, macros, **ioc_options):
"""IOC Setup."""
filter_index_to_attribute = {
index: f'filter_{suffix}'
for index, suffix in filter_group.items()
}
subgroups = {
filter_index_to_attribute[index]: SubGroup(
InOutFilterGroup, prefix=f':FILTER:{suffix}:', index=index)
for index, suffix in filter_group.items()
}
subgroups['sys'] = SubGroup(SystemGroup, prefix=':SYS:')
low_index = min(filter_index_to_attribute)
high_index = max(filter_index_to_attribute)
motor_prefix = expand_macros(macros["motor_prefix"], macros)
motor_prefixes = {
idx: f'{motor_prefix}{idx:02d}:STATE'
for idx in range(low_index, high_index + 1)
}
IOCMain = IOCBase.create_ioc_class(filter_index_to_attribute, subgroups,
motor_prefixes)
ioc = IOCMain(prefix=prefix, macros=macros, **ioc_options)
autosave_path = expand_macros(macros['autosave_path'], macros)
ioc.autosave_helper.filename = autosave_path
ioc.autosave_helper.file_manager = RotatingFileManager(autosave_path)
return ioc
| 36.293651
| 79
| 0.650776
| 523
| 4,573
| 5.510516
| 0.340344
| 0.022901
| 0.022554
| 0.038168
| 0.061069
| 0.028452
| 0.028452
| 0.028452
| 0
| 0
| 0
| 0.00329
| 0.26897
| 4,573
| 125
| 80
| 36.584
| 0.858809
| 0.144107
| 0
| 0.023256
| 0
| 0
| 0.093402
| 0.014489
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034884
| false
| 0
| 0.093023
| 0
| 0.186047
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a52cc5e0156fbef790ecdf07862d92b75464ebf8
| 399
|
py
|
Python
|
classifier/nets/build.py
|
yidarvin/firstaid_classification
|
5cb1ec5a896766ec4670e0daca23014a879e6c14
|
[
"MIT"
] | null | null | null |
classifier/nets/build.py
|
yidarvin/firstaid_classification
|
5cb1ec5a896766ec4670e0daca23014a879e6c14
|
[
"MIT"
] | null | null | null |
classifier/nets/build.py
|
yidarvin/firstaid_classification
|
5cb1ec5a896766ec4670e0daca23014a879e6c14
|
[
"MIT"
] | null | null | null |
import torch
from os.path import join
from fvcore.common.registry import Registry
ARCHITECTURE_REGISTRY = Registry("ARCHITECTURE")
def build_model(cfg):
arch = cfg.MODEL.ARCHITECTURE
model = ARCHITECTURE_REGISTRY.get(arch)(cfg)
if cfg.SAVE.MODELPATH and cfg.MODEL.LOADPREV:
model.load_state_dict(torch.load(join(cfg.SAVE.MODELPATH, cfg.NAME + '_best.pth')))
return model
| 26.6
| 91
| 0.749373
| 55
| 399
| 5.327273
| 0.509091
| 0.136519
| 0.109215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14787
| 399
| 14
| 92
| 28.5
| 0.861765
| 0
| 0
| 0
| 0
| 0
| 0.052764
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.3
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5320d08df77982d660989950f89ae694eb0d00c
| 2,870
|
py
|
Python
|
C45Tree/apply.py
|
ManuelFreytag/Algorithm_implementation
|
380453c2bd4a66e8d604ecdf91c68cb1e14f6bb8
|
[
"MIT"
] | 1
|
2018-07-31T08:29:11.000Z
|
2018-07-31T08:29:11.000Z
|
C45Tree/apply.py
|
ManuelFreytag/Algorithm_implementation
|
380453c2bd4a66e8d604ecdf91c68cb1e14f6bb8
|
[
"MIT"
] | null | null | null |
C45Tree/apply.py
|
ManuelFreytag/Algorithm_implementation
|
380453c2bd4a66e8d604ecdf91c68cb1e14f6bb8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 30 16:08:04 2016
@author: Manuel
"""
from C45Tree_own import split
import pandas as pa
def apply(X, tree):
results = []
for x in range(0,len(X.index)):
temp_tree = tree.copy()
example = X.loc[x,:]
while(True == True):
#Search for the correct next value
for i in range(0,len(temp_tree)):
node = searchNextNode(temp_tree[i])
#Check for numeric attributes
try:
if(X[node[0]].str.isnumeric().loc[0] == True):
#Phrase the first part and cast the second part
#check the what portion of the string needs to be removed
example = checkAndCompose(example, node)
except AttributeError:
if(split.typeCheck(X[node[0]].loc[0].dtype)=="numeric"):
example = checkAndCompose(example, node)
if(example.loc[node[0]] == node[1]):
#Cut the correct subtree
temp_tree = temp_tree[i]
break
#Check if we already have a classification solution
if(isinstance(temp_tree[0], list) == True):
#No solution, new cut
temp_tree = temp_tree[1]
else:
#Solution, add the result to array
results = results + [temp_tree[2]]
break
return results
def checkAndCompose(example, node):
pa.options.mode.chained_assignment = None
if(node[1][0:2] == "<="):
if(float(example.loc[node[0]]) <= float(node[1][2:])):
example.loc[node[0]] = node[1]
# example.loc.__setitem__((node[0]), node[1])
if(node[1][0] == ">"):
if(float(example.loc[node[0]]) > float(node[1][1:])):
example.loc[node[0]] = node[1]
# example.loc.__setitem__((node[0]), node[1])
pa.options.mode.chained_assignment = 'warn'
return example
def searchInTree(tree, path):
temp_path = path.copy()
temp_tree = tree.copy()
while(isinstance(temp_tree, list) == True):
#move one dimension in
if(len(temp_path) > 0):
temp_tree = temp_tree[temp_path[0]]
#Remove done path part
temp_path.pop(0)
else:
#If all parts of the path are used, we search for the very first item
temp_tree = temp_tree[0]
return temp_tree
def searchNextNode(tree):
temp_tree = tree.copy()
while(isinstance(temp_tree[0], list) == True):
temp_tree = temp_tree[0]
return temp_tree
| 32.247191
| 81
| 0.501394
| 337
| 2,870
| 4.163205
| 0.341246
| 0.119743
| 0.051319
| 0.053457
| 0.295082
| 0.252316
| 0.212402
| 0.212402
| 0.112616
| 0.066999
| 0
| 0.028879
| 0.384669
| 2,870
| 89
| 82
| 32.247191
| 0.765572
| 0.206272
| 0
| 0.3
| 0
| 0
| 0.0062
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.04
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5379fd45bcc411d7e294e71572901a73fd67651
| 8,204
|
py
|
Python
|
cogs/original_command.py
|
RT-Team/rt-bot
|
39698efb6b2465de1e84063cba9d207a5bf07fa5
|
[
"BSD-4-Clause"
] | 26
|
2021-11-30T02:48:16.000Z
|
2022-03-26T04:47:25.000Z
|
cogs/original_command.py
|
RT-Team/rt-bot
|
39698efb6b2465de1e84063cba9d207a5bf07fa5
|
[
"BSD-4-Clause"
] | 143
|
2021-11-04T07:47:53.000Z
|
2022-03-31T23:13:33.000Z
|
cogs/original_command.py
|
RT-Team/rt-bot
|
39698efb6b2465de1e84063cba9d207a5bf07fa5
|
[
"BSD-4-Clause"
] | 14
|
2021-11-12T15:32:27.000Z
|
2022-03-28T04:04:44.000Z
|
# RT - Original Command
from __future__ import annotations
from discord.ext import commands
import discord
from aiomysql import Pool, Cursor
from rtutil import DatabaseManager
class DataManager(DatabaseManager):
TABLE = "OriginalCommand"
def __init__(self, pool: Pool):
self.pool = pool
async def _prepare_table(self, cursor: Cursor = None) -> None:
await cursor.execute(
"""CREATE TABLE IF NOT EXISTS OriginalCommand (
GuildID BIGINT, Command TEXT,
Content TEXT, Reply TINYINT
);"""
)
async def _exists(self, cursor, guild_id: int, command: str) -> tuple[bool, str]:
# コマンドが存在しているかを確認します。
condition = "GuildID = %s AND Command = %s"
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE {condition};",
(guild_id, command)
)
return bool(await cursor.fetchone()), condition
async def write(
self, guild_id: int, command: str,
content: str, reply: bool, cursor: Cursor = None
) -> None:
"書き込みます。"
if (c := await self._exists(cursor, guild_id, command))[0]:
await cursor.execute(
f"UPDATE {self.TABLE} SET Content = %s, Reply = %s WHERE {c[1]};",
(content, reply, guild_id, command)
)
else:
await cursor.execute(
f"INSERT INTO {self.TABLE} VALUES (%s, %s, %s, %s);",
(guild_id, command, content, reply)
)
async def delete(self, guild_id: int, command: str, cursor: Cursor = None) -> None:
"データを削除します"
if (c := await self._exists(cursor, guild_id, command))[0]:
await cursor.execute(
f"DELETE FROM {self.TABLE} WHERE GuildID = %s AND Command = %s;",
(guild_id, command)
)
else:
raise KeyError("そのコマンドが見つかりませんでした。")
async def read(self, guild_id: int, cursor: Cursor = None) -> list:
"データを読み込みます。"
await cursor.execute(
f"SELECT * FROM {self.TABLE} WHERE GuildID = %s;",
(guild_id,)
)
return await cursor.fetchall()
async def read_all(self, cursor: Cursor = None) -> list:
"全てのデータを読み込みます。"
await cursor.execute(f"SELECT * FROM {self.TABLE};")
return await cursor.fetchall()
class OriginalCommand(commands.Cog, DataManager):
def __init__(self, bot):
self.bot = bot
self.data = {}
self.bot.loop.create_task(self.on_ready())
async def on_ready(self):
super(commands.Cog, self).__init__(self.bot.mysql.pool)
await self._prepare_table()
await self.update_cache()
async def update_cache(self):
self.data = {}
for row in await self.read_all():
if row:
if row[0] not in self.data:
self.data[row[0]] = {}
self.data[row[0]][row[1]] = {
"content": row[2],
"reply": row[3]
}
LIST_MES = {
"ja": ("自動返信一覧", "部分一致"),
"en": ("AutoReply", "Partially consistent")
}
@commands.group(
aliases=["cmd", "コマンド", "こまんど"],
extras={
"headding": {
"ja": "自動返信、オリジナルコマンド機能",
"en": "Auto reply, Original Command."
}, "parent": "ServerUseful"
}
)
async def command(self, ctx):
"""!lang ja
--------
自動返信、オリジナルコマンド機能です。
`rt!command`で登録されているコマンドの確認が可能です。
Aliases
-------
cmd, こまんど, コマンド
!lang en
--------
Auto reply, original command.
You can do `rt!command` to see commands which has registered.
Aliases
-------
cmd"""
if not ctx.invoked_subcommand:
if (data := self.data.get(ctx.guild.id)):
lang = self.bot.cogs["Language"].get(ctx.author.id)
embed = discord.Embed(
title=self.LIST_MES[lang][0],
description="\n".join(
(f"{cmd}:{data[cmd]['content']}\n "
f"{self.LIST_MES[lang][1]}:{bool(data[cmd]['reply'])}")
for cmd in data
),
color=self.bot.colors["normal"]
)
await ctx.reply(embed=embed)
else:
await ctx.reply(
{"ja": "自動返信はまだ登録されていません。",
"en": "AutoReplies has not registered anything yet."}
)
@command.command("set", aliases=["せっと"])
@commands.has_permissions(manage_messages=True)
@commands.cooldown(1, 7, commands.BucketType.guild)
async def set_command(self, ctx, command, auto_reply: bool, *, content):
"""!lang ja
--------
オリジナルコマンドを登録します。
Parameters
----------
command : str
コマンド名です。
auto_reply : bool
部分一致で返信をするかどうかです。
これをonにするとcommandがメッセージに含まれているだけで反応します。
offにするとcommandがメッセージに完全一致しないと反応しなくなります。
content : str
返信内容です。
Examples
--------
`rt!command set ようこそ off ようこそ!RTサーバーへ!!`
`rt!command set そうだよ on そうだよ(便乗)`
Aliases
-------
せっと
!lang en
--------
Register original command.
Parameters
----------
command : str
Command name.
auto_reply : bool
This is whether or not to reply with a partial match.
If you turn this on, it will respond only if the command is included in the message.
If you turn it off, it will not respond unless the command is an exact match to the message.
content : str
The content of the reply.
Examples
--------
`rt!command set Welcome! off Welcome to RT Server!!`
`rt!command set Yes on Yes (free ride)`"""
await ctx.trigger_typing()
if len(self.data.get(ctx.guild.id, ())) == 50:
await ctx.reply(
{"ja": "五十個より多くは登録できません。",
"en": "You cannot register more than 50."}
)
else:
await self.write(ctx.guild.id, command, content, auto_reply)
await self.update_cache()
await ctx.reply("Ok")
@command.command("delete", aliases=["del", "rm", "さくじょ", "削除"])
@commands.has_permissions(manage_messages=True)
@commands.cooldown(1, 7, commands.BucketType.guild)
async def delete_command(self, ctx, *, command):
"""!lang ja
--------
コマンドを削除します。
Parameters
----------
command : str
削除するコマンドの名前です。
Aliases
-------
del, rm, さくじょ, 削除
!lang en
--------
Delete command.
Parameters
----------
command : str
Target command name.
Aliases
-------
del, rm"""
await ctx.trigger_typing()
try:
await self.delete(ctx.guild.id, command)
except KeyError:
await ctx.reply(
{"ja": "そのコマンドが見つかりませんでした。",
"en": "The command is not found."}
)
else:
await self.update_cache()
await ctx.reply("Ok")
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if not message.guild:
return
if ((data := self.data.get(message.guild.id))
and message.author.id != self.bot.user.id
and not message.content.startswith(
tuple(self.bot.command_prefix))):
count = 0
for command in data:
if ((data[command]["reply"] and command in message.content)
or command == message.content):
await message.reply(data[command]["content"])
count += 1
if count == 3:
break
def setup(bot):
bot.add_cog(OriginalCommand(bot))
| 30.385185
| 104
| 0.50902
| 845
| 8,204
| 4.868639
| 0.253254
| 0.027224
| 0.027224
| 0.02771
| 0.187895
| 0.14876
| 0.118619
| 0.118619
| 0.092368
| 0.071463
| 0
| 0.004238
| 0.36726
| 8,204
| 269
| 105
| 30.498141
| 0.788287
| 0.004998
| 0
| 0.217105
| 0
| 0.006579
| 0.137455
| 0.013318
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019737
| false
| 0
| 0.032895
| 0
| 0.105263
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a53a2c90ed2f68c611f75caaa74a581e8ab0f1b5
| 12,626
|
py
|
Python
|
cli_stats/get_data/api_scraper/api_scraper.py
|
timoudas/premier_league_api
|
2b850466ed1c910ee901c68e660706d55f53df61
|
[
"MIT"
] | 2
|
2020-02-13T12:30:47.000Z
|
2020-03-21T16:32:47.000Z
|
cli_stats/get_data/api_scraper/api_scraper.py
|
timoudas/premier_league_api
|
2b850466ed1c910ee901c68e660706d55f53df61
|
[
"MIT"
] | 2
|
2021-04-06T18:27:57.000Z
|
2021-06-02T03:51:47.000Z
|
cli_stats/get_data/api_scraper/api_scraper.py
|
timoudas/premier_league_api
|
2b850466ed1c910ee901c68e660706d55f53df61
|
[
"MIT"
] | null | null | null |
import re
import requests
import sys
sys.path.append('cli_stats')
from directory import Directory
from pprint import pprint
from storage_config import StorageConfig
from tqdm import tqdm
session = requests.Session()
#TODO
"""
*Program is not scaling well
"""
"""***HOW TO USE***
1. Create an instance of Football, this initiates the leagues dict which holds
all the leagueIDs.
fb = Football()
2. To get the all the seasons for all leagues, first run the the method
fb.load_leagues()
this fills the leagues dict with nessesery info to make further querys.
To get season values the league abbreviation has to be passed like below:
fb.leagues['EN_PR'].load_seasons()
This selects the key 'EN_PR' which is the parent key in leagues and loads
the season for that league by running the method load.seasons() which is in
class Leagues(). This returns a dict seasons holding the following:
1992/93': {'competition': 1, 'id': 1, 'label': '1992/93'}
Where the '1992/93' is the key containing that seasons information.
***WHAT IS NEEDED FOR ARBITRAIRY QUERYS***
League abbreviation
Season label
Team name
"""
def load_raw_data(url):
"""Retreives Ids for different pages on the API"""
page = 0
data_temp = []
while True:
headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin': 'https://www.premierleague.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'
}
params = (('pageSize', '100'),
('page', str(page),))
# request to obtain the team info
try:
response = session.get(url, headers=headers, params=params).json()
if url.endswith('staff'):
data = response['players']
return data
elif 'fixtures' in url:
data = response["content"]
#loop to get info for each game
data_temp.extend(data)
else:
data = response['content']
# note: bit of a hack, for some reason 'id' is a float, but everywhere it's referenced, it's an int
for d in data:
d['id'] = int(d['id'])
return data
except Exception as e:
print(e, 'Something went wrong with the request')
return {}
page += 1
if page >= response["pageInfo"]["numPages"]:
break
for d in data_temp:
d['id'] = int(d['id'])
return data_temp
class TeamPlayers(dict):
_players = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def load_players_for_team(self, team, season):
ds = load_raw_data(
f'https://footballapi.pulselive.com/football/teams/{team}/compseasons/{season}/staff')
self._players.clear()
self.clear()
for d in ds:
if d:
self._players[d['id']] = d
self[d['id']] = self._players[d['id']]
return self._players
class FixtureInfo(dict):
_fixtures = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def load_info_for_fixture(self, season):
ds = load_raw_data(
f'https://footballapi.pulselive.com/football/fixtures?compSeasons={season}')
self.clear()
for d in ds:
self._fixtures[d['id']] = d
self[d['id']] = self._fixtures[d['id']]
return self._fixtures
class SeasonTeams(dict):
"""Creates an object for a team given a season """
_teams = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Team(dict):
"""Creates an object for a team in a competion and specific season
Args:
competition (str): Competition abbreviation
"""
def __init__(self, competition, *args, **kwargs):
super().__init__(*args, **kwargs)
self['competition'] = competition
self.players = TeamPlayers()#Returns Ids and info for every player on a team
def load_players(self):
"""returns info for all the players given their id and a season _id"""
return self.players.load_players_for_team(self['id'], self['competition'])
def load_teams_for_season(self, season, comp):
ds = load_raw_data(
f'https://footballapi.pulselive.com/football/teams?comps={comp}&compSeasons={season}')
self.clear()
self._teams.clear()
for d in ds:
d['competition'] = comp
self._teams[d['id']] = self.Team(season, d)
self[d['shortName']] = self._teams[d['id']]
return self._teams
#NO IDE HOW THIS WORKS - REPLICATE SeasonTeams
class SeasonFixtures(dict):
"""Creates an object for all fixtures in a given a season """
_fixtures = {}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Fixture(dict):
"""Creates an object for a fixture in a competion and specific season"""
def __init__(self, competition, *args, **kwargs):
super().__init__(*args, **kwargs)
self['competition'] = competition
self.fixture = FixtureInfo()#Returns Ids and info for every player on a team
def load_fixture(self):
"""returns info for a fixture given it's Id"""
self.fixture.load_info_for_fixture(self['id'])
def load_fixture_for_season(self, season):
ds = load_raw_data(
f'https://footballapi.pulselive.com/football/fixtures?compSeasons={season}')
self.clear()
for d in ds:
d['competition'] = season
self._fixtures[d['id']] = self.Fixture(season, d)
self[d['status']] = self._fixtures[d['id']]
return self._fixtures
class Season(dict):
all_teams = SeasonTeams()
def __init__(self, competition, *args, **kwargs):
super().__init__(*args, **kwargs)
self['competition'] = competition
self.teams = SeasonTeams()
self.fixtures = SeasonFixtures()
def load_teams(self):
return self.teams.load_teams_for_season(self['id'], self['competition'])
def load_played_fixtures(self):
return self.fixtures.load_fixture_for_season(self['id'])
def load_unplayed_fixtures(self):
pass
def load_all_fixtures(self):
pass
class League(dict):
"""Gets Season_ids, returns a dict"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.seasons = {} #Initates dictionairy to hold seasonIds
def season_label(self, label):
try:
return re.search( r'(\d{4}/\d{4})', label).group()
except:
label = re.search( r'(\d{4}/\d{2})', label).group()
return re.sub(r'(\d{4}/)', r'\g<1>20', label)
def load_seasons(self):
"""Returns a dict with season label as key and season id as value"""
ds = load_raw_data(f'https://footballapi.pulselive.com/football/competitions/{self["id"]}/compseasons')
self.seasons = {self.season_label(d['label']): Season(self['id'], d) for d in ds}
return self.seasons
class Football:
"""Gets Competition_abbreviation, returns a dict"""
def __init__(self):
self.leagues = {} #Initates dictionairy to hold leagueIds
def load_leagues(self):
"""Returns a dict with league abbreviation as key and league id as value"""
ds = load_raw_data('https://footballapi.pulselive.com/football/competitions')
self.leagues = {d['abbreviation']: League(d) for d in ds}
return self.leagues
class ValidateParams():
"""Checks if all needed information exist on api for a league by season.
Input: A leagueID to check
Output: Console output with True/False values if information exist
**How the class checks if data exists**:
User provides a known leagueID, a request is made with the ID to see which seasons
exist.
If no seasonIDs exist, it stops else takes all the seasonIDs and stores them.
For each seasonID it checks if fixtures exists, if it exists it stores them and
uses them to see if fixture stats exists.
If fixture stats exist it requests att teams in
"""
dir = Directory()
fb = Football()
def __init__(self, league_file='league_params.json', team_seasons_file='teams_params.json' ):
self.leagues = self.import_id(league_file)
self.team_seasons = self.import_id(team_seasons_file)
self.league_file = league_file
def import_id(self, file):
"""Imports a json file in read mode
Args:
file(str): Name of file
"""
return self.dir.load_json(file , StorageConfig.PARAMS_DIR)
def make_request(self, url):
"""Makes a GET request
Args:
url (str): url to webbsite
"""
headers = {'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Origin': 'https://www.premierleague.com',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36'
}
params = (('pageSize', '100'),)
response = requests.get(url, params = params, headers=headers)
return response.status_code
def check_current_season(self):
"""
Checks if request gives response code 200
"""
failed = {}
league = self.leagues
print('Checking leagues..')
for league_name, league_id in tqdm(league.items()):
status = self.make_request(f'https://footballapi.pulselive.com/football/competitions/{league_id}/compseasons/current')
if status != 200:
failed.update({league_name:league_id})
print(failed)
return failed
def remove_failed_leagues(self, failed_leagues):
"""Removes failed leagues from .json file
Args:
failed_leagues (dict): dict with leagues existing in initial file
"""
league = self.import_id('season_params.json')
deleted = []
print('Deleting failed leagues..')
for failed in failed_leagues.keys():
if failed in league:
del league[failed]
deleted.append(failed)
print("Below leagues have been removed from", self.league_file)
print("\n".join(deleted))
self.dir.save_json('season_params', league, StorageConfig.PARAMS_DIR)
def check_stats_urls(self):
failed = {}
self.fb.load_leagues()
#loads league and their seasons from season_params.json
league_season_info = self.dir.load_json('season_params.json', StorageConfig.PARAMS_DIR)
#Iterates over league-season in league_season_info
for league, season in league_season_info.items():
seasons = self.fb.leagues[str(league)].load_seasons()
#Iterates over season_label and ID in seasons
for season_label, season_id in seasons.items():
s_id = season_id['id']
#Gets teams for a specific season
league_teams = self.fb.leagues[str(league)].seasons[str(season_label)].load_teams()
for team in league_teams.keys():
status = self.make_request(
f'https://footballapi.pulselive.com/football/teams/{team}/compseasons/{s_id}/staff')
if status != 200 and league not in failed:
failed.update({s_id:league})
print(failed)
return failed
def main(self):
return self.remove_failed_leagues(self.check_current_season())
if __name__ == '__main__':
# ValidateParams().main()
# Dir = Directory()
fb = Football()
# lg = League()
# fx = FixtureInfo()
fb.load_leagues()
pprint(fb.leagues['EN_PR'].load_seasons())
pprint(fb.leagues['EN_PR'].seasons['2019/2020'].load_teams())
# pprint(fb.leagues['EN_PR'].seasons['2016/2017'].teams['Arsenal'].load_players())
# ds = fb.leagues['EU_CL'].load_seasons()
# fb.leagues['EU_CL'].seasons['2016/2017'].load_teams()
# pprint(fb.leagues['EU_CL'].seasons['2016/2017'].teams['Atlético'].load_players())
| 34.497268
| 165
| 0.606368
| 1,591
| 12,626
| 4.65682
| 0.191075
| 0.021595
| 0.014847
| 0.020516
| 0.358618
| 0.312728
| 0.262384
| 0.213254
| 0.202996
| 0.192874
| 0
| 0.014757
| 0.275463
| 12,626
| 365
| 166
| 34.591781
| 0.795146
| 0.187946
| 0
| 0.287805
| 0
| 0.009756
| 0.173826
| 0.007548
| 0
| 0
| 0
| 0.00274
| 0
| 1
| 0.146341
| false
| 0.009756
| 0.053659
| 0.014634
| 0.380488
| 0.04878
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a53bcdd38f44a14806e05907ccae272513b9cf1c
| 1,787
|
py
|
Python
|
archive/least_squares_BCES.py
|
Alexander-Serov/abortive-initiation-analysis
|
2a036a5186459b79e7cdbd84aa8a7b130226b5e1
|
[
"MIT"
] | null | null | null |
archive/least_squares_BCES.py
|
Alexander-Serov/abortive-initiation-analysis
|
2a036a5186459b79e7cdbd84aa8a7b130226b5e1
|
[
"MIT"
] | null | null | null |
archive/least_squares_BCES.py
|
Alexander-Serov/abortive-initiation-analysis
|
2a036a5186459b79e7cdbd84aa8a7b130226b5e1
|
[
"MIT"
] | null | null | null |
import numpy as np
def least_squares_BCES(Y1, Y2, V11, V22, V12=0, origin=False):
"""
Make a least-squares fit for non-NaN values taking into account the errors in both rho and J variables. This implementation is based on Akritas1996 article. It is a generalization of the least-squares method. The variance of the slope is also calculated. The intersect is checked to be 0, otherwise a warning is issued.
The fit is performed for the model
X2i = alpha + beta * X1i + ei
Yki = Xki + eki
alpha = 0
so the slope is for X2(X1) function and not the inverse.
If origin == True, no intersect assumed. This doesn't change the lest-squares slope, but changes it's error estimate.
Input:
vectors of data points and errors corresponding to different embryos and ncs.
Output:
(beta, beta_V, alpha, alpha_V)
"""
# Find and drop nans
inds_not_nan = list(set(np.flatnonzero(~np.isnan(Y1))) & set(
np.flatnonzero(~np.isnan(Y2))))
Y1, Y2, V11, V22 = [v[inds_not_nan] for v in (Y1, Y2, V11, V22)]
Y1m = Y1.mean()
Y2m = Y2.mean()
n = len(Y1)
# Estimates for slope (beta) and intersect (alpha)
beta = (
np.sum((Y1 - Y1m) * (Y2 - Y2m) - V12) /
np.sum((Y1 - Y1m)**2 - V11)
)
if not origin:
alpha = (Y2m - beta * Y1m)
else:
alpha = 0
# Error on the estimates
ksi = ((Y1 - Y1m) * (Y2 - beta * Y1 - alpha) + beta * V11 - V12) / (Y1.var() - V11.mean())
zeta = Y2 - beta * Y1 - Y1m * ksi
beta_V = ksi.var() / n
alpha_V = zeta.var() / n
# T, _, _, _ = np.linalg.lstsq(slopes[:, np.newaxis], Ns, rcond=None)
# print(beta, np.sqrt(beta_V), alpha, np.sqrt(alpha_V))
# print('Finished!')
return (beta, beta_V, alpha, alpha_V)
| 33.716981
| 323
| 0.613318
| 280
| 1,787
| 3.853571
| 0.45
| 0.018536
| 0.019462
| 0.027804
| 0.079703
| 0.037071
| 0
| 0
| 0
| 0
| 0
| 0.050267
| 0.265249
| 1,787
| 52
| 324
| 34.365385
| 0.771516
| 0.533856
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a53c77391ca18888fe3d4f6374d65264bcebc717
| 7,696
|
py
|
Python
|
tests/test_face.py
|
andfranklin/ErnosCube
|
a9dd7feda4bc0e9162cd884cd450f47c6b19c350
|
[
"MIT"
] | null | null | null |
tests/test_face.py
|
andfranklin/ErnosCube
|
a9dd7feda4bc0e9162cd884cd450f47c6b19c350
|
[
"MIT"
] | 4
|
2020-10-28T19:27:47.000Z
|
2020-11-04T00:12:25.000Z
|
tests/test_face.py
|
andfranklin/ErnosCube
|
a9dd7feda4bc0e9162cd884cd450f47c6b19c350
|
[
"MIT"
] | null | null | null |
from ErnosCube.face_enum import FaceEnum
from ErnosCube.orient_enum import OrientEnum
from ErnosCube.sticker import Sticker
from ErnosCube.face import Face
from ErnosCube.face import RowFaceSlice, ColFaceSlice
from plane_rotatable_tests import PlaneRotatableTests
from hypothesis import given
from strategies import sticker_matrices
from strategies_face import faces, faces_minus_c2, faces_minus_c4
from utils import N_and_flatten
from copy import deepcopy
from pytest import mark, fixture
class TestFace(PlaneRotatableTests):
"""Collection of all tests run on instances of the Face Class."""
objs = faces
objs_minus_c2 = faces_minus_c2
objs_minus_c4 = faces_minus_c4
@given(sticker_matrices)
def construction_test(self, sticker_matrix):
Face(*N_and_flatten(sticker_matrix))
@fixture
def front_face(self):
sticker_matrix = []
for i in range(3):
row = [Sticker(FaceEnum.FRONT, OrientEnum.UP) for _ in range(3)]
sticker_matrix.append(row)
return Face(*N_and_flatten(sticker_matrix))
@mark.dependency(depends=["construction"])
@given(faces)
def test_str(self, face):
gold = f"Face(N={face.N})"
assert str(face) == gold
@mark.dependency(depends=["construction"])
def test_repr(self, front_face):
gold = "\x1b[7m\x1b[1m\x1b[32m ↑ \x1b[0m\x1b[7m\x1b[1m\x1b[32m ↑ \x1b"
gold += "[0m\x1b[7m\x1b[1m\x1b[32m ↑ \x1b[0m\n\x1b[7m\x1b[1m\x1b[32m ↑"
gold += " \x1b[0m\x1b[7m\x1b[1m\x1b[32m ↑ \x1b[0m\x1b[7m\x1b[1m\x1b"
gold += "[32m ↑ \x1b[0m\n\x1b[7m\x1b[1m\x1b[32m ↑ \x1b[0m\x1b[7m\x1b"
gold += "[1m\x1b[32m ↑ \x1b[0m\x1b[7m\x1b[1m\x1b[32m ↑ \x1b[0m"
err_str = f"{repr(front_face)}: {repr(repr(front_face))}"
assert repr(front_face) == gold, err_str
@mark.dependency(depends=["construction"])
def test_get_raw_repr_size(self, front_face):
assert front_face.get_raw_repr_size() == 9
def rotate_cw_test(self):
sticker_mat = []
s00 = Sticker(FaceEnum.FRONT, OrientEnum.UP)
s01 = Sticker(FaceEnum.RIGHT, OrientEnum.RIGHT)
s02 = Sticker(FaceEnum.BACK, OrientEnum.DOWN)
sticker_mat.append([s00, s01, s02])
s10 = Sticker(FaceEnum.LEFT, OrientEnum.LEFT)
s11 = Sticker(FaceEnum.UP, OrientEnum.UP)
s12 = Sticker(FaceEnum.DOWN, OrientEnum.RIGHT)
sticker_mat.append([s10, s11, s12])
s20 = Sticker(FaceEnum.FRONT, OrientEnum.DOWN)
s21 = Sticker(FaceEnum.RIGHT, OrientEnum.LEFT)
s22 = Sticker(FaceEnum.BACK, OrientEnum.UP)
sticker_mat.append([s20, s21, s22])
comp_face = Face(*N_and_flatten(sticker_mat))
cw_sticker_mat = []
sticker_row = [s20, s10, s00]
cw_sticker_mat.append([deepcopy(s).rotate_cw() for s in sticker_row])
sticker_row = [s21, s11, s01]
cw_sticker_mat.append([deepcopy(s).rotate_cw() for s in sticker_row])
sticker_row = [s22, s12, s02]
cw_sticker_mat.append([deepcopy(s).rotate_cw() for s in sticker_row])
cw_comp_face = Face(*N_and_flatten(cw_sticker_mat))
assert (
comp_face.rotate_cw() == cw_comp_face
), f"failed for {str(comp_face)}\n{repr(comp_face)}"
def rotate_ccw_test(self):
ccw_sticker_mat = []
s00 = Sticker(FaceEnum.FRONT, OrientEnum.UP)
s01 = Sticker(FaceEnum.RIGHT, OrientEnum.RIGHT)
s02 = Sticker(FaceEnum.BACK, OrientEnum.DOWN)
ccw_sticker_mat.append([s00, s01, s02])
s10 = Sticker(FaceEnum.LEFT, OrientEnum.LEFT)
s11 = Sticker(FaceEnum.UP, OrientEnum.UP)
s12 = Sticker(FaceEnum.DOWN, OrientEnum.RIGHT)
ccw_sticker_mat.append([s10, s11, s12])
s20 = Sticker(FaceEnum.FRONT, OrientEnum.DOWN)
s21 = Sticker(FaceEnum.RIGHT, OrientEnum.LEFT)
s22 = Sticker(FaceEnum.BACK, OrientEnum.UP)
ccw_sticker_mat.append([s20, s21, s22])
ccw_comp_face = Face(*N_and_flatten(ccw_sticker_mat))
sticker_mat = []
sticker_row = [s20, s10, s00]
sticker_mat.append([deepcopy(s).rotate_cw() for s in sticker_row])
sticker_row = [s21, s11, s01]
sticker_mat.append([deepcopy(s).rotate_cw() for s in sticker_row])
sticker_row = [s22, s12, s02]
sticker_mat.append([deepcopy(s).rotate_cw() for s in sticker_row])
comp_face = Face(*N_and_flatten(sticker_mat))
assert (
comp_face.rotate_ccw() == ccw_comp_face
), f"failed for {str(comp_face)}\n{repr(comp_face)}"
def rotate_ht_test(self):
sticker_mat = []
s00 = Sticker(FaceEnum.FRONT, OrientEnum.UP)
s01 = Sticker(FaceEnum.RIGHT, OrientEnum.RIGHT)
s02 = Sticker(FaceEnum.BACK, OrientEnum.DOWN)
sticker_mat.append([s00, s01, s02])
s10 = Sticker(FaceEnum.LEFT, OrientEnum.LEFT)
s11 = Sticker(FaceEnum.UP, OrientEnum.UP)
s12 = Sticker(FaceEnum.DOWN, OrientEnum.RIGHT)
sticker_mat.append([s10, s11, s12])
s20 = Sticker(FaceEnum.FRONT, OrientEnum.DOWN)
s21 = Sticker(FaceEnum.RIGHT, OrientEnum.LEFT)
s22 = Sticker(FaceEnum.BACK, OrientEnum.UP)
sticker_mat.append([s20, s21, s22])
comp_face = Face(*N_and_flatten(sticker_mat))
ht_sticker_mat = []
sticker_row = [s22, s21, s20]
ht_sticker_mat.append([deepcopy(s).rotate_ht() for s in sticker_row])
sticker_row = [s12, s11, s10]
ht_sticker_mat.append([deepcopy(s).rotate_ht() for s in sticker_row])
sticker_row = [s02, s01, s00]
ht_sticker_mat.append([deepcopy(s).rotate_ht() for s in sticker_row])
ht_comp_face = Face(*N_and_flatten(ht_sticker_mat))
assert (
comp_face.rotate_ht() == ht_comp_face
), f"failed for {str(comp_face)}\n{repr(comp_face)}"
def stickers_and_face(self):
s1 = Sticker(FaceEnum.FRONT, OrientEnum.UP)
s2 = Sticker(FaceEnum.BACK, OrientEnum.RIGHT)
s3 = Sticker(FaceEnum.LEFT, OrientEnum.DOWN)
stickers = [s1, s2, s3]
cs = Sticker(FaceEnum.RIGHT, OrientEnum.LEFT)
face_stickers = []
face_stickers.append([cs, s1, cs])
face_stickers.append([s1, s2, s3])
face_stickers.append([cs, s3, cs])
return stickers, Face(*N_and_flatten(face_stickers))
@mark.dependency(name="get_row_slice", depends=["construction"])
def test_get_row_slice(self):
stickers, face = self.stickers_and_face()
face_slice = face.get_row_slice(1)
assert isinstance(face_slice, RowFaceSlice)
assert all(a == b for a, b in zip(face_slice.stickers, stickers))
@mark.dependency(name="get_col_slice", depends=["construction"])
def test_get_col_slice(self):
stickers, face = self.stickers_and_face()
face_slice = face.get_col_slice(1)
assert isinstance(face_slice, ColFaceSlice)
assert all(a == b for a, b in zip(face_slice.stickers, stickers))
@mark.dependency(depends=["get_row_slice"])
def test_apply_row_slice(self):
stickers, face = self.stickers_and_face()
face_slice = face.get_row_slice(1)
face.apply_slice(face_slice, 0)
for col_indx in range(face.N):
assert face[0, col_indx] == stickers[col_indx], f"\n{repr(face)}"
@mark.dependency(depends=["get_col_slice"])
def test_apply_col_slice(self):
stickers, face = self.stickers_and_face()
face_slice = face.get_col_slice(1)
face.apply_slice(face_slice, 0)
for row_indx in range(face.N):
assert face[row_indx, 0] == stickers[row_indx], f"\n{repr(face)}"
| 37
| 79
| 0.652677
| 1,069
| 7,696
| 4.492049
| 0.104771
| 0.099958
| 0.059975
| 0.028113
| 0.698875
| 0.664515
| 0.575385
| 0.552478
| 0.545606
| 0.534361
| 0
| 0.045394
| 0.224272
| 7,696
| 207
| 80
| 37.178744
| 0.757454
| 0.007666
| 0
| 0.474684
| 0
| 0.031646
| 0.082558
| 0.045866
| 0
| 0
| 0
| 0
| 0.075949
| 1
| 0.082278
| false
| 0
| 0.075949
| 0
| 0.196203
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a53cb8a72414679c109b52c99f7c00abcac934ad
| 19,752
|
py
|
Python
|
tests/test_djangoes.py
|
Exirel/djangoes
|
7fee0ec0383077fc8ac5da8515c51a0b304f84be
|
[
"CC0-1.0"
] | 4
|
2015-01-05T21:04:20.000Z
|
2015-09-16T12:56:47.000Z
|
tests/test_djangoes.py
|
Exirel/djangoes
|
7fee0ec0383077fc8ac5da8515c51a0b304f84be
|
[
"CC0-1.0"
] | 15
|
2015-01-14T10:08:01.000Z
|
2021-06-02T07:09:49.000Z
|
tests/test_djangoes.py
|
Exirel/djangoes
|
7fee0ec0383077fc8ac5da8515c51a0b304f84be
|
[
"CC0-1.0"
] | 2
|
2015-02-17T11:11:31.000Z
|
2016-05-06T07:11:24.000Z
|
from unittest.case import TestCase
from django.core.exceptions import ImproperlyConfigured
from django.test.utils import override_settings
from djangoes import (ConnectionHandler,
IndexDoesNotExist,
ConnectionDoesNotExist,
load_backend)
from djangoes.backends.abstracts import Base
from djangoes.backends import elasticsearch
class TestConnectionHandler(TestCase):
"""Test the ConnectionHandler class.
The ConnectionHandler is a major entry point for a good integration of
ElasticSearch in a Django project. It must ensure appropriate default
values, settings conformity, and prepare tests settings.
"""
# Test behavior with the default and/or empty values
# ==================================================
# Makes assertions about the default behavior when nothing is configured,
# or when very few information is given. Using djangoes should be as
# transparent as possible, in particular with the default behavior.
def test_empty(self):
"""Assert an empty configuration fallback on default values."""
servers = {}
indices = {}
handler = ConnectionHandler(servers, indices)
# A default alias appear in servers, while nothing changed in indices.
assert handler.servers == {'default': {}}
assert handler.indices == indices
def test_empty_with_default(self):
"""Assert the ensured default configuration is acceptable as input."""
servers = {
'default': {
'ENGINE': 'djangoes.backends.elasticsearch.SimpleHttpBackend',
'HOSTS': [],
'PARAMS': {},
'INDICES': []
}
}
indices = {
'index': {
'NAME': 'index',
'ALIASES': []
}
}
handler = ConnectionHandler(servers, indices)
# Both must be equal, without changes.
assert handler.servers == servers
assert handler.indices == indices
def test_empty_with_default_fallback(self):
"""Assert the fallback configuration is acceptable as input."""
servers = {
'default': {}
}
indices = {}
handler = ConnectionHandler(servers, indices)
assert handler.servers == {'default': {}}
assert handler.indices == {}
# Test with django project settings
# =================================
def test_project_settings_by_default(self):
"""Assert values come from the django project settings if not given."""
servers = {
'default': {},
'by_settings': {}
}
indices = {
'index_by_settings': {}
}
with override_settings(ES_SERVERS=servers, ES_INDICES=indices):
# No argument
handler = ConnectionHandler()
# Servers and indices are the one set in django settings.
assert handler.servers == servers
assert handler.indices == indices
# Test improperly configured behaviors
# ====================================
def test_improperly_configured_servers(self):
"""Assert raise when settings are not empty but without `default`."""
servers = {
'not_default': {}
}
handler = ConnectionHandler(servers, {})
with self.assertRaises(ImproperlyConfigured) as raised:
# A simple call to servers must raise.
handler.servers
assert str(raised.exception) == "You must define a 'default' ElasticSearch server"
# Test ensure default values
# ==========================
# Server
def test_empty_ensure_server_defaults(self):
"""Assert default values are set properly on an empty server."""
handler = ConnectionHandler({}, {})
handler.ensure_server_defaults('default')
default_server = handler.servers['default']
expected_server = {
'ENGINE': 'djangoes.backends.elasticsearch.SimpleHttpBackend',
'HOSTS': [],
'PARAMS': {},
'INDICES': []
}
assert default_server == expected_server
def test_ensure_server_defaults_not_exists(self):
"""Assert raise when the argument given is not a configured server."""
servers = {}
indices = {}
handler = ConnectionHandler(servers, indices)
with self.assertRaises(ConnectionDoesNotExist) as raised:
handler.ensure_server_defaults('index')
assert str(raised.exception) == '%r' % 'index'
# Index
def test_empty_ensure_index_defaults(self):
"""Assert default values are set properly on an empty index."""
indices = {
'index': {}
}
handler = ConnectionHandler({}, indices)
handler.ensure_index_defaults('index')
index = handler.indices['index']
expected_index = {
'NAME': 'index',
'ALIASES': [],
'SETTINGS': None,
}
assert index == expected_index
def test_ensure_index_defaults_not_exists(self):
"""Assert raise when the argument given is not a configured index."""
servers = {}
indices = {}
handler = ConnectionHandler(servers, indices)
with self.assertRaises(IndexDoesNotExist) as raised:
handler.ensure_index_defaults('index')
assert str(raised.exception) == '%r' % 'index'
# Test prepare test settings
# ==========================
# Prepare server
def test_empty_prepare_server_test_settings(self):
"""Assert prepare adds a TEST key in the defaul server's settings."""
servers = {
'default': {
'ENGINE': 'djangoes.backends.elasticsearch.SimpleHttpBackend'
}
}
handler = ConnectionHandler(servers, {})
handler.prepare_server_test_settings('default')
default_server = handler.servers['default']
expected_test_server = {
'INDICES': []
}
assert 'TEST' in default_server
assert default_server['TEST'] == expected_test_server
def test_prepare_server_test_settings_not_exists(self):
"""Assert raise when the argument given is not a configured server."""
servers = {
'default': {
'ENGINE': 'djangoes.backends.elasticsearch.SimpleHttpBackend'
}
}
indices = {}
handler = ConnectionHandler(servers, indices)
with self.assertRaises(ConnectionDoesNotExist) as raised:
handler.prepare_server_test_settings('index')
assert str(raised.exception) == '%r' % 'index'
# Prepare index
def test_empty_prepare_index_test_settings(self):
indices = {
'index': {}
}
handler = ConnectionHandler({}, indices)
handler.ensure_index_defaults('index')
handler.prepare_index_test_settings('index')
index = handler.indices['index']
expected_test_index = {
'NAME': 'index_test',
'ALIASES': [],
'SETTINGS': None,
}
assert 'TEST' in index
assert index['TEST'] == expected_test_index
def test_prepare_index_test_settings_not_exists(self):
"""Assert raise when the argument given is not a configured index."""
servers = {}
indices = {}
handler = ConnectionHandler(servers, indices)
with self.assertRaises(IndexDoesNotExist) as raised:
handler.prepare_index_test_settings('index')
assert str(raised.exception) == '%r' % 'index'
def test_prepare_index_test_settings_use_alias_not_index_name(self):
"""Assert raise even if the index NAME is given as argument.
The prepare_index_test_settings method expects an index alias as used
in the indices dict, not its NAME (nor any of its ALIASES).
"""
servers = {}
indices = {
'index': {
'NAME': 'not_this_index',
'ALIASES': ['not_this_index']
}
}
handler = ConnectionHandler(servers, indices)
with self.assertRaises(IndexDoesNotExist) as raised:
handler.prepare_index_test_settings('not_this_index')
assert str(raised.exception) == '%r' % 'not_this_index'
def test_prepare_index_test_settings_name_improperly_configured(self):
"""Assert raise when name and test name are the same."""
servers = {}
indices = {
'index': {
'NAME': 'index_production_name',
'ALIASES': [],
'TEST': {
'NAME': 'index_production_name',
'ALIASES': [],
}
}
}
handler = ConnectionHandler(servers, indices)
with self.assertRaises(ImproperlyConfigured) as raised:
# A simple call to servers must raise.
handler.prepare_index_test_settings('index')
assert str(raised.exception) == (
'Index \'index\' uses improperly the same NAME and TEST\'s NAME '
'settings: \'index_production_name\'.'
)
def test_prepare_index_test_settings_aliases_improperly_configured(self):
"""Assert raise when name and test name are the same."""
servers = {}
indices = {
'index': {
'NAME': 'index',
'ALIASES': ['alias_prod', 'alias_prod_2'],
'TEST': {
'NAME': 'index_valid_test_name',
'ALIASES': ['alias_prod', 'alias_test']
}
}
}
handler = ConnectionHandler(servers, indices)
handler.ensure_index_defaults('index')
with self.assertRaises(ImproperlyConfigured) as raised:
# A simple call to servers must raise.
handler.prepare_index_test_settings('index')
assert str(raised.exception) == (
'Index \'index\' uses improperly the same index alias in ALIASES '
'and in TEST\'s ALIASES settings: \'alias_prod\'.'
)
# Test get server indices
# =======================
def test_empty_get_server_indices(self):
"""Assert there is no index by default, ie. `_all` will be used.
ElasticSearch allows query on all indices. It is not safe for testing
purposes, but it does not have to be checked in the connection handler.
"""
handler = ConnectionHandler({}, {})
# Yes, it is acceptable to get indices from a non-configured servers.
# The purpose of get_server_indices is not to validate the input.
test_server = {
'INDICES': []
}
indices = handler.get_server_indices(test_server)
assert indices == {}
def test_get_server_indices(self):
"""Assert indices are found for a given server."""
servers = {}
indices = {
'used': {},
'not_used': {}
}
handler = ConnectionHandler(servers, indices)
test_server = {
'INDICES': ['used'],
}
indices = handler.get_server_indices(test_server)
expected_indices = {
'used': {
'NAME': 'used',
'ALIASES': [],
'SETTINGS': None,
'TEST': {
'NAME': 'used_test',
'ALIASES': [],
'SETTINGS': None,
}
}
}
assert indices == expected_indices
# Test backend loading
# ====================
# Backend loading takes the given settings to import a module and
# instantiate a subclass of djangoes.backends.Base.
def test_function_load_backend(self):
"""Assert load_backend function imports and returns the given path.
An external function is used to import a module attribute from an
import path: it extracts the module import path and the attribute name,
then it imports the module and get its attribute, catching
``ImportError`` and ``AttributeError`` to raise a djangoes custom error
instead of basic errors.
"""
datetime_class = load_backend('datetime.datetime')
assert hasattr(datetime_class, 'now')
isfile_function = load_backend('os.path.isfile')
assert type(isfile_function) == type(lambda x: x)
with self.assertRaises(ImproperlyConfigured) as raised:
load_backend('module.does.not.exist')
assert str(raised.exception) == '\n'.join(
["'module.does.not.exist' isn't an available ElasticSearch backend.",
"Error was: No module named 'module'"])
with self.assertRaises(ImproperlyConfigured) as raised:
load_backend('os.path.not_exist')
assert str(raised.exception) == '\n'.join(
["'os.path.not_exist' isn't an available ElasticSearch backend.",
"Error was: 'module' object has no attribute 'not_exist'"])
def test_load_backend(self):
"""Assert load_backend method loads the configured server engine."""
servers = {
'default': {
'ENGINE': 'tests.backend.ConnectionWrapper'
}
}
indices = {}
handler = ConnectionHandler(servers, indices)
result = handler.load_backend('default')
assert isinstance(result, Base)
assert result.alias == 'default'
assert result.indices == []
assert result.index_names == []
assert result.alias_names == []
def test_load_backend_with_index(self):
servers = {
'default': {
'ENGINE': 'tests.backend.ConnectionWrapper',
'INDICES': ['index_1'],
}
}
indices = {
'index_1': {
'NAME': 'index_1',
'ALIASES': ['alias_1', 'alias_2'],
}
}
handler = ConnectionHandler(servers, indices)
result = handler.load_backend('default')
assert sorted(result.indices) == ['alias_1', 'alias_2']
assert result.index_names == ['index_1']
assert sorted(result.alias_names) == ['alias_1', 'alias_2']
def test_load_backend_with_indices(self):
servers = {
'default': {
'ENGINE': 'tests.backend.ConnectionWrapper',
'INDICES': ['index_1', 'index_2'],
}
}
indices = {
'index_1': {
'NAME': 'index_1',
'ALIASES': ['alias_1', 'alias_2'],
},
'index_2': {
'NAME': 'index_2_name',
}
}
handler = ConnectionHandler(servers, indices)
result = handler.load_backend('default')
assert sorted(result.indices) == ['alias_1', 'alias_2', 'index_2_name']
assert sorted(result.index_names) == ['index_1', 'index_2_name']
assert sorted(result.alias_names) == ['alias_1', 'alias_2']
# Test loading of backends.elasticsearch
# ======================================
def test_loading_elasticsearch(self):
servers = {
'default': {
'ENGINE': 'djangoes.backends.elasticsearch.SimpleHttpBackend'
}
}
indices = {}
handler = ConnectionHandler(servers, indices)
result = handler.load_backend('default')
assert isinstance(result, elasticsearch.SimpleHttpBackend)
# Test object and attributes manipulation
# =======================================
def test_iterable(self):
"""Assertions about list behavior of ConnectionHandler."""
servers = {
'default': {},
'task': {},
}
indices = {}
handler = ConnectionHandler(servers, indices)
assert sorted(list(handler)) == ['default', 'task']
def test_items(self):
"""Assertions about key:value behavior of ConnectionHandler."""
servers = {
'default': {
'ENGINE': 'tests.backend.ConnectionWrapper',
'INDICES': ['index_1'],
},
}
indices = {
'index_1': {},
'index_2': {}
}
handler = ConnectionHandler(servers, indices)
# Get the connection wrapper
wrapper = handler['default']
assert wrapper.indices == ['index_1']
# Change handler settings
handler.servers['default']['INDICES'] = ['index_2']
# The wrapper is not updated
wrapper = handler['default']
assert wrapper.indices == ['index_1']
# Delete the `default` connection
del handler['default']
# The new wrapper now use the new index
wrapper = handler['default']
assert wrapper.indices == ['index_2']
# Also, set item works without control
handler['something'] = 'else'
assert handler['something'] == 'else'
def test_all(self):
"""Assert all connection wrappers are returned."""
servers = {
'default': {
'ENGINE': 'tests.backend.ConnectionWrapper',
},
'task': {
'ENGINE': 'tests.backend.ConnectionWrapper'
}
}
indices = {}
handler = ConnectionHandler(servers, indices)
all_connections = handler.all()
assert len(all_connections) == 2
assert isinstance(all_connections[0], Base)
assert isinstance(all_connections[1], Base)
assert sorted([c.alias for c in all_connections]) == ['default', 'task']
def test_check_for_multiprocess(self):
"""Assert method will reset connections with a different PID.
.. note::
We don't really test "multi-processing" behavior. We are only
messing with a flag here to test connections reset.
"""
servers = {
'default': {
'HOSTS': ['localhost']
}
}
handler = ConnectionHandler(servers, {})
conn = handler['default']
conn_again = handler['default']
assert conn is conn_again
assert id(conn) == id(conn_again)
# Changing the PID to "reset" connections.
handler._pid = 1
conn_again = handler['default']
assert conn is not conn_again
assert id(conn) != id(conn_again)
class TestProxyConnectionHandler(TestCase):
def test_attributes(self):
# Local import to manipulate elements
from djangoes import connections, connection
connections._servers = {
'default': {
'ENGINE': 'tests.backend.ConnectionWrapper'
}
}
connections._indices = {}
# Existing attribute.
assert connection.alias == 'default'
# New attribute.
assert not hasattr(connection, 'new_attribute')
connections['default'].new_attribute = 'test_value'
assert hasattr(connection, 'new_attribute')
assert connection.new_attribute == 'test_value'
del connection.new_attribute
assert not hasattr(connection, 'new_attribute')
assert not hasattr(connections['default'], 'new_attribute')
connection.new_attribute = 'test_new_attribute_again'
assert hasattr(connection, 'new_attribute')
assert hasattr(connections['default'], 'new_attribute')
assert connection == connections['default']
assert not (connection != connections['default'])
| 31.552716
| 90
| 0.571132
| 1,882
| 19,752
| 5.837407
| 0.138682
| 0.017841
| 0.062079
| 0.062261
| 0.517659
| 0.466685
| 0.408611
| 0.352631
| 0.275715
| 0.244038
| 0
| 0.002731
| 0.314145
| 19,752
| 625
| 91
| 31.6032
| 0.808223
| 0.201094
| 0
| 0.489744
| 0
| 0
| 0.153583
| 0.038347
| 0
| 0
| 0
| 0
| 0.187179
| 1
| 0.071795
| false
| 0
| 0.017949
| 0
| 0.094872
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a53ce607d2484b47e38e0b6a97b11b56e4d3bb58
| 8,497
|
py
|
Python
|
bin/yap_conflict_check.py
|
Novartis/yap
|
8399e87e6083e6394d1f9340e308a01751465a03
|
[
"Apache-2.0"
] | 23
|
2015-01-14T21:32:11.000Z
|
2021-07-19T12:59:10.000Z
|
bin/yap_conflict_check.py
|
Novartis/yap
|
8399e87e6083e6394d1f9340e308a01751465a03
|
[
"Apache-2.0"
] | 1
|
2017-06-30T10:54:57.000Z
|
2017-06-30T10:54:57.000Z
|
bin/yap_conflict_check.py
|
Novartis/yap
|
8399e87e6083e6394d1f9340e308a01751465a03
|
[
"Apache-2.0"
] | 9
|
2015-09-02T17:44:24.000Z
|
2021-07-05T18:59:16.000Z
|
#!/usr/bin/env python
"""
Copyright 2014 Novartis Institutes for Biomedical Research
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
class yap_conflict_check:
"""
Provides methods to perform file-file, file-sample, file-group and
sample-group comparisons and find conflicts.
"""
def __init__(self, input_files):
self.input_files = map(self.translate_path, input_files)
self.filename_dict = \
self.generate_filename_dict(self.input_files)
def translate_path(self, path):
"""
Given a path,
Returns a path after expanding environment and user variables and
relative paths to absolute path
"""
path = os.path.expandvars(path) # expand environment variables
path = os.path.expanduser(path) # expand user's home directory
# don't convert to absolute if just filename
if len(os.path.dirname(path)) == 0 and (path not in ['.', ".."]):
return path
path = os.path.abspath(path) # convert relative path to absolute
return path # return output
def translate_paired_end_paths(self, paired_end_files):
'''
Given a list of paired end files
Returns a new list of paired end files with each file translated
using translate path function
'''
if len(paired_end_files) <= 0:
return [] # return empty o/p
paired_end_files_out = [] # output variable
for paired_list in paired_end_files: # translate each paths
paired_list_out = map(self.translate_path, paired_list)
paired_end_files_out.append(paired_list) # append to o/p
return paired_end_files_out # return output
def get_paths(self, name):
'''
Given a name,
Returns the list of paths matching to the key similar to the
name
'''
if len(name) <= 0:
return None # return null for empty input
# return if an exact match is found
if name in self.filename_dict:
return self.filename_dict[name]
# return all values for a partial match
matches = []
for key in self.filename_dict:
if key.find(name) == 0:
new_paths = self.find_new_items(matches,
self.filename_dict[key])
# extend only if a unique match is found
if len(new_paths) > 0:
matches.extend(new_paths)
if len(matches) == 0:
return None # return null if no matches
else:
return matches # return output
def find_new_items(self, current_list, new_list):
'''
Given two lists,
Returns items which are not available in current lists,
Return empty list if no such items are found
'''
if len(current_list) == 0:
return new_list # all paths are new
# select an items not in current list and return list
return filter((lambda item: item not in current_list),
new_list)
def validate_names_and_find_duplicates(self, names):
'''
Given list of filenames,
Calls validate_names_and_find_duplicates_with_finder with
get_paths as finder and returns the result
'''
return self.validate_names_and_find_duplicates_with_finder(
names,
self.get_paths)
def validate_names_and_find_duplicates_with_finder(self, filenames,
finder):
"""
Input:
--filenames: a list of filenames occured in contaminant file
Check if all filenames exist in input files name and
there is no filename duplicate in filenames.
Return values:
--match_list:
--error_list: all filenames which not exist in input files name
--duplicate_dict: [key:value]
-key: filename which duplicate happens
-value: all path this filename occurs
"""
match_list = []
error_list = []
duplicate_dict = {}
# translate all filenames paths to complete paths
filenames = map(self.strip_space_tab_newline, filenames)
filenames = map(self.translate_path, filenames)
for fn in filenames:
if fn in self.input_files:
# filename exist in self.input_files
match_list.append(fn)
else:
# treat fn as basename
paths = finder(fn)
if paths is not None:
# basename exists
if len(paths) > 1:
# duplicate happens
duplicate_dict[fn] = paths
else:
# no duplicate
match_list.extend(paths)
else:
# basename not exists
error_list.append(fn)
return match_list, error_list, duplicate_dict
def generate_filename_dict(self, paths):
"""
Given a list of complete filepaths,
Returns a dictionary, with keys as filenames and values as list of
all paths that contain the corresponding key
Invariant: Paths contain filenames complete with extension.
"""
output = {} # output variable
if len(paths) <= 0:
return output # return empty output for empty input
for path in paths:
output[path] = [path] # add each path as key also.
basename = os.path.basename(path) # get filename from path
if len(basename) <= 0:
continue # skip if no filename in path
# get name without extension
basename_no_ext = os.path.splitext(basename)[0]
# create a new entry if it does not exist, append otherwise
if basename in output:
output[basename].append(path)
else:
output[basename] = [path]
# include a name with filename without extension also
if len(basename_no_ext) <= 0:
continue # skip if name is exmpty
if basename_no_ext != basename: # add an entry for just filename
if basename_no_ext in output:
output[basename_no_ext].append(path)
else:
output[basename_no_ext] = [path]
return output # return dict
def find_duplicates_in_list(self, input):
"""
Given a list,
Returns a dictionary of all duplicates in the list,
Return empty dictionary if no duplicate entries are found.
"""
output = {} # output variable
if len(input) <= 0:
return output # return empty output for empty input
for item in input:
if item not in output: # check only if item not seen earlier
item_count = input.count(item) # count items
# add to output if item occurs more than once in list
if item_count > 1:
output[item] = item_count
return output
def list_to_sentence(self, list):
"""
Translate the given list to a string.
"""
sentence = ""
for i in range(0, len(list)):
if i == len(list) - 1:
sentence += "'" + list[i] + "'"
else:
sentence += "'" + list[i] + "' and "
return sentence
def strip_space_tab_newline(self, input):
'''
Given a string,
Returns a string after removing starting and trailing spaces,
tabs and new line character
'''
if len(input) <= 0:
return '' # empty o/p for empty i/p
input = input.strip()
input = input.strip('\n')
input = input.strip('\t')
return input
| 37.933036
| 79
| 0.578086
| 1,032
| 8,497
| 4.638566
| 0.224806
| 0.012534
| 0.023397
| 0.016712
| 0.117401
| 0.064759
| 0.044287
| 0.019219
| 0.019219
| 0.019219
| 0
| 0.004566
| 0.355655
| 8,497
| 223
| 80
| 38.103139
| 0.869772
| 0.393904
| 0
| 0.185841
| 0
| 0
| 0.003449
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097345
| false
| 0
| 0.00885
| 0
| 0.283186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a53d6a324052f390797cf713195803de6c9fa43f
| 1,148
|
py
|
Python
|
PS4/ps4a.py
|
PanPapag/MIT-OCW-Introduction-to-Computer-Science-and-Programming-in-Python-6.0001
|
f9aeb55c1473920a7d283bfc09726bdef5614331
|
[
"MIT"
] | 3
|
2019-05-20T19:37:49.000Z
|
2020-05-16T08:57:04.000Z
|
PS4/ps4a.py
|
PanPapag/MIT-OCW-6.0001
|
f9aeb55c1473920a7d283bfc09726bdef5614331
|
[
"MIT"
] | null | null | null |
PS4/ps4a.py
|
PanPapag/MIT-OCW-6.0001
|
f9aeb55c1473920a7d283bfc09726bdef5614331
|
[
"MIT"
] | null | null | null |
def get_permutations(sequence):
'''
Enumerate all permutations of a given string
sequence (string): an arbitrary string to permute. Assume that it is a
non-empty string.
You MUST use recursion for this part. Non-recursive solutions will not be
accepted.
Returns: a list of all permutations of sequence
Example:
>>> get_permutations('abc')
['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
Note: depending on your implementation, you may return the permutations in
a different order than what is listed here.
'''
if len(sequence) == 0 or len(sequence) == 1:
result = [sequence]
else:
x = sequence[0]
permutations = get_permutations(sequence[1:])
result = []
for p in permutations:
for i in range(len(p) + 1):
result.append(p[:i] + x + p[i:])
return result
if __name__ == '__main__':
example_input = 'abc'
print('Input:', example_input)
print('Expected Output:', ['abc', 'acb', 'bac', 'bca', 'cab', 'cba'])
print('Actual Output:', get_permutations(example_input))
| 30.210526
| 79
| 0.595819
| 143
| 1,148
| 4.678322
| 0.531469
| 0.089686
| 0.068759
| 0.035874
| 0.053812
| 0.053812
| 0
| 0
| 0
| 0
| 0
| 0.006075
| 0.283101
| 1,148
| 37
| 80
| 31.027027
| 0.806804
| 0.408537
| 0
| 0
| 0
| 0
| 0.110922
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0
| 0.125
| 0.1875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a541ad6227bc2976b930cd5ee28105b474b1a9e3
| 1,350
|
py
|
Python
|
flash_test/utils/log.py
|
nikolas-hermanns/flash-test
|
dda642e96f76113b42a7d64415eb3d8cdc03fca5
|
[
"Apache-2.0"
] | null | null | null |
flash_test/utils/log.py
|
nikolas-hermanns/flash-test
|
dda642e96f76113b42a7d64415eb3d8cdc03fca5
|
[
"Apache-2.0"
] | null | null | null |
flash_test/utils/log.py
|
nikolas-hermanns/flash-test
|
dda642e96f76113b42a7d64415eb3d8cdc03fca5
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Jan 16, 2016
@author: enikher
'''
import logging
import datetime
LOG = logging.getLogger(__name__)
LOG_LEVEL = logging.DEBUG
LOG_PATH = "./dlService.log"
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
filename=LOG_PATH,
datefmt='%Y-%m-%dT:%H:%M:%s', level=LOG_LEVEL)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
console.setFormatter(formatter)
LOG.addHandler(console)
def log_enter_exit(func):
def inner(self, *args, **kwargs):
LOG.debug(("Entering %(cls)s.%(method)s "
"args: %(args)s, kwargs: %(kwargs)s") %
{'cls': self.__class__.__name__,
'method': func.__name__,
'args': args,
'kwargs': kwargs})
start = datetime.datetime.now()
ret = func(self, *args, **kwargs)
end = datetime.datetime.now()
LOG.debug(("Exiting %(cls)s.%(method)s. "
"Spent %(duration)s sec. "
"Return %(return)s") %
{'cls': self.__class__.__name__,
'duration': end - start,
'method': func.__name__,
'return': ret})
return ret
return inner
| 31.395349
| 71
| 0.545926
| 144
| 1,350
| 4.881944
| 0.388889
| 0.042674
| 0.048364
| 0.051209
| 0.122333
| 0.073969
| 0
| 0
| 0
| 0
| 0
| 0.006342
| 0.299259
| 1,350
| 42
| 72
| 32.142857
| 0.736786
| 0.03037
| 0
| 0.121212
| 0
| 0
| 0.216756
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.060606
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a546651f1dcad01340583064244d142fb1215fd5
| 1,061
|
py
|
Python
|
EasyPortfolioExplorer/app/utils/resource_loader.py
|
jblemoine/EasyPortfolioExplorer
|
88484a1acb8f41f7497129ffefc89608af2d34d5
|
[
"MIT"
] | null | null | null |
EasyPortfolioExplorer/app/utils/resource_loader.py
|
jblemoine/EasyPortfolioExplorer
|
88484a1acb8f41f7497129ffefc89608af2d34d5
|
[
"MIT"
] | null | null | null |
EasyPortfolioExplorer/app/utils/resource_loader.py
|
jblemoine/EasyPortfolioExplorer
|
88484a1acb8f41f7497129ffefc89608af2d34d5
|
[
"MIT"
] | 1
|
2018-05-07T23:44:40.000Z
|
2018-05-07T23:44:40.000Z
|
from EasyPortfolioExplorer.app.easy.base import EasyBase
class ResourceLoader(EasyBase):
"""
Class for adding external resources such as css and js file.
The current version is based on boostrap 3.3.7.
"""
def __init__(self, **kwargs):
super(ResourceLoader, self).__init__(**kwargs)
self._css_urls = [
'https://cdn.rawgit.com/jblemoine/EasyPortfolioExplorer/117125bb/EasyPortfolioExplorer/app/static/extra.css',
'https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css',
]
self._js_urls = [
'https://code.jquery.com/'
'jquery-3.1.1.slim.min.js',
'https://maxcdn.bootstrapcdn.com/'
'bootstrap/3.3.7/js/bootstrap.min.js',
'/static/extra.js'
]
def load_resources(self):
for url in self._css_urls:
self.app.css.append_css({'external_url': url})
for url in self._js_urls:
self.app.scripts.append_script({'external_url': url})
| 33.15625
| 122
| 0.600377
| 128
| 1,061
| 4.8125
| 0.4375
| 0.00974
| 0.01461
| 0.084416
| 0.123377
| 0.123377
| 0.123377
| 0.123377
| 0
| 0
| 0
| 0.023256
| 0.2705
| 1,061
| 32
| 123
| 33.15625
| 0.77261
| 0.101791
| 0
| 0
| 0
| 0.1
| 0.365854
| 0.06541
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.05
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5488a57c13d79bfc459f46fd458c1c896f8b4d3
| 1,268
|
py
|
Python
|
Python/1289.MatrixSpiral.py
|
nizD/LeetCode-Solutions
|
7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349
|
[
"MIT"
] | 263
|
2020-10-05T18:47:29.000Z
|
2022-03-31T19:44:46.000Z
|
Python/1289.MatrixSpiral.py
|
nizD/LeetCode-Solutions
|
7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349
|
[
"MIT"
] | 1,264
|
2020-10-05T18:13:05.000Z
|
2022-03-31T23:16:35.000Z
|
Python/1289.MatrixSpiral.py
|
nizD/LeetCode-Solutions
|
7f4ca37bab795e0d6f9bfd9148a8fe3b62aa5349
|
[
"MIT"
] | 760
|
2020-10-05T18:22:51.000Z
|
2022-03-29T06:06:20.000Z
|
"""This program takes a matrix of size mxn as input, and prints the matrix in a spiral format
for example: input ->> [[1,2,3],
[4,5,6],
[7,8,9],
[10,11,12]]
output ->> 1 2 3 6 9 12 11 10 7 4 5 8"""
class Solution:
def matrix_spiral(self, matrix):
"""
:type matrix: list[list[]]
"""
starting_row = 0
ending_row = len(matrix)
starting_col = 0
ending_col = len(matrix[0])
while starting_row < ending_row and starting_col < ending_col:
for k in range(starting_col, ending_col):
print(matrix[starting_row][k], end=" ")
starting_row += 1
for k in range(starting_row, ending_row):
print(matrix[k][ending_col-1], end=" ")
ending_col -= 1
if starting_row < ending_row:
for k in range(ending_col-1, starting_col-1, -1):
print(matrix[ending_row-1][k], end=" ")
ending_row -= 1
if starting_col < ending_col:
for k in range(ending_row-1, starting_row-1, -1):
print(matrix[k][starting_col], end=" ")
starting_col += 1
| 37.294118
| 93
| 0.502366
| 168
| 1,268
| 3.619048
| 0.291667
| 0.126645
| 0.039474
| 0.072368
| 0.184211
| 0.101974
| 0.101974
| 0.101974
| 0
| 0
| 0
| 0.057692
| 0.384858
| 1,268
| 33
| 94
| 38.424242
| 0.721795
| 0.238959
| 0
| 0
| 0
| 0
| 0.004274
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0
| 0
| 0.095238
| 0.190476
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a54a675c308dee0b53b78a00aef279613875fd2d
| 4,694
|
py
|
Python
|
lib/sde.py
|
NCIA-Diffusion/ScoreSDE
|
b5a562908daf66e6dcf0b791beb83f1fcb61174b
|
[
"MIT"
] | 2
|
2022-03-02T06:54:28.000Z
|
2022-03-02T06:56:45.000Z
|
lib/sde.py
|
NCIA-Diffusion/ScoreSDE
|
b5a562908daf66e6dcf0b791beb83f1fcb61174b
|
[
"MIT"
] | null | null | null |
lib/sde.py
|
NCIA-Diffusion/ScoreSDE
|
b5a562908daf66e6dcf0b791beb83f1fcb61174b
|
[
"MIT"
] | 2
|
2022-02-23T11:49:15.000Z
|
2022-03-02T06:56:46.000Z
|
import abc
import numpy as np
import torch
import torch.nn as nn
class AbstractSDE(abc.ABC):
def __init__(self):
super().__init__()
self.N = 1000
@property
@abc.abstractmethod
def T(self):
"""End time of the SDE."""
raise NotImplementedError
@abc.abstractmethod
def sde(self, x_t, t):
"""Compute the drift/diffusion of the forward SDE
dx = b(x_t, t)dt + s(x_t, t)dW
"""
raise NotImplementedError
@abc.abstractmethod
def marginal_prob(self, x_0, t):
"""Compute the mean/std of the transitional kernel
p(x_t | x_0).
"""
raise NotImplementedError
@abc.abstractmethod
def prior_logp(self, z):
"""Compute log-density of the prior distribution."""
raise NotImplementedError
@abc.abstractmethod
def scale_start_to_noise(self, t):
"""Compute the scale of conversion
from the original image estimation loss, i.e, || x_0 - x_0_pred ||
to the noise prediction loss, i.e, || e - e_pred ||.
Denoting the output of this function by C,
C * || x_0 - x_0_pred || = || e - e_pred || holds.
"""
raise NotImplementedError
# @abc.abstractmethod
# def proposal_distribution(self):
# raise NotImplementedError
def reverse(self, model, model_pred_type='noise'):
"""The reverse-time SDE."""
sde_fn = self.sde
marginal_fn = self.marginal_prob
class RSDE(self.__class__):
def __init__(self):
pass
def score_fn(self, x_t, t):
if model_pred_type == 'noise':
x_noise_pred = model(x_t, t)
_, x_std = marginal_fn(
torch.zeros_like(x_t),
t,
)
score = -x_noise_pred / x_std
elif model_pred_type == 'original':
x_0_pred = model(x_t, t)
x_mean, x_std = marginal_fn(
x_0_pred,
t
)
score = (x_mean - x_t) / x_std
return score
def sde(self, x_t, t):
# Get score function values
score = self.score_fn(x_t, t)
# Forward SDE's drift & diffusion
drift, diffusion = sde_fn(x_t, t)
# Reverse SDE's drift & diffusion (Anderson, 1982)
drift = drift - diffusion ** 2 * score
return drift, diffusion
return RSDE()
class VPSDE(AbstractSDE):
def __init__(self, beta_min=0.1, beta_max=20, N=1000):
super().__init__()
self.beta_0 = beta_min
self.beta_1 = beta_max
self.N = N
self.discrete_betas = torch.linspace(beta_min / N, beta_max / N, N)
self.alphas = 1. - self.discrete_betas
# self.IS_dist, self.norm_const = self.proposal_distribution()
@property
def T(self):
return 1
def sde(self, x_t, t):
beta_t = (self.beta_0 + t * (self.beta_1 - self.beta_0))[:, None, None, None]
drift = -0.5 * beta_t * x_t
diffusion = torch.sqrt(beta_t)
return drift, diffusion
def marginal_prob(self, x_0, t):
log_mean_coeff = (
-0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
)[:, None, None, None]
marginal_mean = torch.exp(log_mean_coeff) * x_0
marginal_std = torch.sqrt(1. - torch.exp(2. * log_mean_coeff))
return marginal_mean, marginal_std
def prior_logp(self, z):
shape = z.shape
N = np.prod(shape[1:])
logps = - N / 2. * np.log(2 * np.pi) - torch.sum(z ** 2, dim=(1, 2, 3)) / 2.
return logps
def scale_start_to_noise(self, t):
log_mean_coeff = (
-0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0
)[:, None, None, None]
marginal_coeff = torch.exp(log_mean_coeff)
marginal_std = torch.sqrt(1. - torch.exp(2. * log_mean_coeff))
scale = marginal_coeff / (marginal_std + 1e-12)
return scale
# def proposal_distribution(self):
# def g2(t):
# return self.beta_0 + t * (self.beta_1 - self.beta_0)
# def a2(t):
# log_mean_coeff = -0.25 * t ** 2 * (self.beta_1 - self.beta_0) \
# - 0.5 * t * self.beta_0
# return 1. - torch.exp(2. * log_mean_coeff)
# t = torch.arange(1, 1001) / 1000
# p = g2(t) / a2(t)
# normalizing_const = p.sum()
# return p, normalizing_const
| 31.503356
| 85
| 0.532169
| 621
| 4,694
| 3.782609
| 0.196457
| 0.061303
| 0.014049
| 0.087271
| 0.346105
| 0.219242
| 0.186462
| 0.137931
| 0.137931
| 0.137931
| 0
| 0.031767
| 0.356199
| 4,694
| 148
| 86
| 31.716216
| 0.745533
| 0.241585
| 0
| 0.420455
| 0
| 0
| 0.005242
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0.011364
| 0.045455
| 0.011364
| 0.352273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a54b68b3a18c130ef71abef51b17c638d75ff918
| 1,166
|
py
|
Python
|
diagrams/seq-tables.py
|
PerFuchs/master-thesis
|
85386c266fecf72348114bcbafeeb896a9e74601
|
[
"MIT"
] | 1
|
2019-11-02T20:23:03.000Z
|
2019-11-02T20:23:03.000Z
|
diagrams/seq-tables.py
|
PerFuchs/master-thesis
|
85386c266fecf72348114bcbafeeb896a9e74601
|
[
"MIT"
] | null | null | null |
diagrams/seq-tables.py
|
PerFuchs/master-thesis
|
85386c266fecf72348114bcbafeeb896a9e74601
|
[
"MIT"
] | null | null | null |
import pandas as pd
import matplotlib.pyplot as plt
from diagrams.base import *
DATASET = DATASET_FOLDER + "ama0302.csv"
def tabulize_data(data_path, output_path):
data = pd.read_csv(data_path)
fix_count(data)
fix_neg(data, "copy")
data["total_time"] = data["End"] - data["Start"]
grouped = data.groupby(["partitioning_base", "Query", "Parallelism"])
data.to_latex(buf=open(output_path, "w"),
columns=["Query", "Count", "Time", "WCOJTime_wcoj", "setup", "ratio"],
header = ["Query", "\\# Result", "\\texttt{BroadcastHashJoin}", "\\texttt{seq}", "setup", "Speedup"],
column_format="lr||r|rr||r",
formatters = {
"ratio": lambda r: str(round(r, 1)),
"Count": lambda c: "{:,}".format(c),
},
escape=False,
index=False
)
tabulize_data(DATASET_FOLDER + "ama0302.csv", GENERATED_PATH + "seq-table-ama0302.tex")
tabulize_data(DATASET_FOLDER + "ama0601.csv", GENERATED_PATH + "seq-table-ama0601.tex")
tabulize_data(DATASET_FOLDER + "snb-sf1.csv", GENERATED_PATH + "seq-table-snb-sf1.tex")
| 32.388889
| 117
| 0.596913
| 138
| 1,166
| 4.876812
| 0.5
| 0.077266
| 0.084695
| 0.111441
| 0.190193
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025814
| 0.235849
| 1,166
| 35
| 118
| 33.314286
| 0.729517
| 0
| 0
| 0
| 0
| 0
| 0.250429
| 0.077187
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.125
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a54b6dc0f255b7a92415a48a23ac09a9d0e01321
| 1,513
|
py
|
Python
|
instance-segmentation/detectron_train_PointRend.py
|
diwgan32/IKEA_ASM_Dataset
|
8f41c15c4a7fb47f53235d2292d0eff8136ae492
|
[
"MIT"
] | null | null | null |
instance-segmentation/detectron_train_PointRend.py
|
diwgan32/IKEA_ASM_Dataset
|
8f41c15c4a7fb47f53235d2292d0eff8136ae492
|
[
"MIT"
] | null | null | null |
instance-segmentation/detectron_train_PointRend.py
|
diwgan32/IKEA_ASM_Dataset
|
8f41c15c4a7fb47f53235d2292d0eff8136ae492
|
[
"MIT"
] | null | null | null |
# Run training with PointRend head
# uses default configuration from detectron2
# The model is initialized via pre-trained coco models from detectron2 model zoo
#
# Fatemeh Saleh <fatemehsadat.saleh@anu.edu.au>
import os
from detectron2.config import get_cfg
from detectron2.data.datasets import register_coco_instances
from detectron2.engine import DefaultTrainer
import sys; sys.path.insert(1, "projects/PointRend")
import point_rend
from detectron2.utils.logger import setup_logger
setup_logger()
if __name__=='__main__':
register_coco_instances("ikea_train", {}, "path/to/annotation/train_manual_coco_format.json", "/path/to/images/")
cfg = get_cfg()
point_rend.add_pointrend_config(cfg)
cfg.merge_from_file("projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco.yaml")
cfg.MODEL.POINT_HEAD.NUM_CLASSES = 7
cfg.DATASETS.TRAIN = ("ikea_train",)
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 2
# initialize training
cfg.MODEL.WEIGHTS = "detectron2://PointRend/InstanceSegmentation/pointrend_rcnn_R_50_FPN_3x_coco/164955410/model_final_3c3198.pkl"
cfg.SOLVER.IMS_PER_BATCH = 2
cfg.SOLVER.BASE_LR = 0.0025 # pick a good LR
cfg.SOLVER.MAX_ITER = 60000
cfg.SOLVER.STEPS = (20000, 40000)
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 7
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()
| 36.902439
| 134
| 0.769993
| 216
| 1,513
| 5.125
| 0.532407
| 0.075881
| 0.03794
| 0.061427
| 0.081301
| 0.081301
| 0.081301
| 0.081301
| 0
| 0
| 0
| 0.041953
| 0.13351
| 1,513
| 40
| 135
| 37.825
| 0.802441
| 0.155321
| 0
| 0
| 0
| 0
| 0.237795
| 0.188976
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a54c3694831528b032a63a41c9cef6f955e863a0
| 11,775
|
py
|
Python
|
dataviva/attrs/views.py
|
dogobox/datavivamaster
|
c89596778e2d8d01a2193b02ca5960bd17f4468d
|
[
"MIT"
] | null | null | null |
dataviva/attrs/views.py
|
dogobox/datavivamaster
|
c89596778e2d8d01a2193b02ca5960bd17f4468d
|
[
"MIT"
] | null | null | null |
dataviva/attrs/views.py
|
dogobox/datavivamaster
|
c89596778e2d8d01a2193b02ca5960bd17f4468d
|
[
"MIT"
] | null | null | null |
import urllib2
from sqlalchemy import func, distinct, asc, desc, and_, or_
from flask import Blueprint, request, jsonify, abort, g, render_template, make_response, redirect, url_for, flash
from dataviva import db, __latest_year__
from dataviva.attrs.models import Bra, Wld, Hs, Isic, Cbo, Yb
from dataviva.secex.models import Yp, Yw
from dataviva.rais.models import Yi, Yo
from dataviva.ask.models import Question
from dataviva.utils.gzip_data import gzip_data
from dataviva.utils.cached_query import cached_query
from dataviva.utils.exist_or_404 import exist_or_404
from dataviva.utils.title_case import title_case
mod = Blueprint('attrs', __name__, url_prefix='/attrs')
@mod.errorhandler(404)
def page_not_found(error):
return error, 404
def fix_name(attr, lang):
name_lang = "name_" + lang
desc_lang = "desc_" + lang
keywords_lang = "keywords_" + lang
if desc_lang in attr:
attr["desc"] = title_case(attr[desc_lang])
if "desc_en" in attr: del attr["desc_en"]
if "desc_pt" in attr: del attr["desc_pt"]
if name_lang in attr:
attr["name"] = title_case(attr[name_lang])
if "name_en" in attr: del attr["name_en"]
if "name_pt" in attr: del attr["name_pt"]
if keywords_lang in attr:
attr["keywords"] = title_case(attr[keywords_lang])
if "keywords_en" in attr: del attr["keywords_en"]
if "keywords_pt" in attr: del attr["keywords_pt"]
return attr
############################################################
# ----------------------------------------------------------
# All attribute views
#
############################################################
@mod.route('/<attr>/')
@mod.route('/<attr>/<Attr_id>/')
def attrs(attr="bra",Attr_id=None):
Attr = globals()[attr.title()]
Attr_weight_mergeid = "{0}_id".format(attr)
if attr == "bra":
Attr_weight_tbl = Yb
Attr_weight_col = "population"
elif attr == "isic":
Attr_weight_tbl = Yi
Attr_weight_col = "num_emp"
elif attr == "cbo":
Attr_weight_tbl = Yo
Attr_weight_col = "num_emp"
elif attr == "hs":
Attr_weight_tbl = Yp
Attr_weight_col = "val_usd"
elif attr == "wld":
Attr_weight_tbl = Yw
Attr_weight_col = "val_usd"
depths = {}
depths["bra"] = [2,4,7,8]
depths["isic"] = [1,3,5]
depths["cbo"] = [1,2,4]
depths["hs"] = [2,4,6]
depths["wld"] = [2,5]
depth = request.args.get('depth', None)
order = request.args.get('order', None)
offset = request.args.get('offset', None)
limit = request.args.get('limit', None)
if offset:
offset = float(offset)
limit = limit or 50
lang = request.args.get('lang', None) or g.locale
ret = {}
dataset = "rais"
if Attr == Cbo or Attr == Hs:
dataset = "secex"
latest_year = __latest_year__[dataset]
cache_id = request.path + lang
if depth:
cache_id = cache_id + "/" + depth
# first lets test if this query is cached
cached_q = cached_query(cache_id)
if cached_q and limit is None:
ret = make_response(cached_q)
ret.headers['Content-Encoding'] = 'gzip'
ret.headers['Content-Length'] = str(len(ret.data))
return ret
# if an ID is supplied only return that
if Attr_id:
# the '.show.' indicates that we are looking for a specific nesting
if ".show." in Attr_id:
this_attr, ret["nesting_level"] = Attr_id.split(".show.")
# filter table by requested nesting level
attrs = Attr.query \
.filter(Attr.id.startswith(this_attr)) \
.filter(func.char_length(Attr.id) == ret["nesting_level"]).all()
# the 'show.' indicates that we are looking for a specific nesting
elif "show." in Attr_id:
ret["nesting_level"] = Attr_id.split(".")[1]
# filter table by requested nesting level
attrs = Attr.query.filter(func.char_length(Attr.id) == ret["nesting_level"]).all()
# the '.' here means we want to see all attrs within a certain distance
elif "." in Attr_id:
this_attr, distance = Attr_id.split(".")
this_attr = Attr.query.get_or_404(this_attr)
attrs = this_attr.get_neighbors(distance)
else:
attrs = [Attr.query.get_or_404(Attr_id)]
ret["data"] = [fix_name(a.serialize(), lang) for a in attrs]
# an ID/filter was not provided
else:
query = db.session.query(Attr,Attr_weight_tbl) \
.outerjoin(Attr_weight_tbl, and_(getattr(Attr_weight_tbl,"{0}_id".format(attr)) == Attr.id, Attr_weight_tbl.year == latest_year))
if depth:
query = query.filter(func.char_length(Attr.id) == depth)
else:
query = query.filter(func.char_length(Attr.id).in_(depths[attr]))
if order:
direction = "asc"
if "." in order:
o, direction = order.split(".")
else:
o = order
if o == "name":
o = "name_{0}".format(lang)
if o == Attr_weight_col:
order_table = Attr_weight_tbl
else:
order_table = Attr
if direction == "asc":
query = query.order_by(asc(getattr(order_table,o)))
elif direction == "desc":
query = query.order_by(desc(getattr(order_table,o)))
if limit:
query = query.limit(limit).offset(offset)
attrs_all = query.all()
# just get items available in DB
attrs_w_data = None
if depth is None and limit is None:
attrs_w_data = db.session.query(Attr, Attr_weight_tbl) \
.filter(getattr(Attr_weight_tbl, Attr_weight_mergeid) == Attr.id) \
.group_by(Attr.id)
# raise Exception(attrs_w_data.all())
attrs_w_data = [a[0].id for a in attrs_w_data]
attrs = []
for i, a in enumerate(attrs_all):
b = a[0].serialize()
if a[1]:
b[Attr_weight_col] = a[1].serialize()[Attr_weight_col]
else:
b[Attr_weight_col] = 0
a = b
if attrs_w_data:
a["available"] = False
if a["id"] in attrs_w_data:
a["available"] = True
if Attr_weight_col == "population" and len(a["id"]) == 8 and a["id"][:2] == "mg":
plr = Bra.query.get_or_404(a["id"]).pr2.first()
if plr: a["plr"] = plr.id
if order:
a["rank"] = int(i+offset+1)
attrs.append(fix_name(a, lang))
ret["data"] = attrs
ret = jsonify(ret)
ret.data = gzip_data(ret.data)
if limit is None and cached_q is None:
cached_query(cache_id, ret.data)
ret.headers['Content-Encoding'] = 'gzip'
ret.headers['Content-Length'] = str(len(ret.data))
return ret
@mod.route('/table/<attr>/<depth>/')
def attrs_table(attr="bra",depth="2"):
g.page_type = "attrs"
data_url = "/attrs/{0}/?depth={1}".format(attr,depth)
return render_template("general/table.html", data_url=data_url)
@mod.route('/search/<term>/')
def attrs_search(term=None):
# Dictionary
bra_query = {}
cbo_query = {}
isic_query = {}
hs_query = {}
question_query = {}
wld = {}
lang = request.args.get('lang', None) or g.locale
result = []
bra = Bra.query.filter(or_(Bra.id == term, or_(Bra.name_pt.ilike("%"+term+"%"), Bra.name_en.ilike("%"+term+"%"))))
items = bra.limit(50).all()
items = [i.serialize() for i in items]
for i in items:
bra_query = {}
bra_query["id"] = i["id"]
bra_query["name_pt"] = i["name_pt"]
if i["id"] == "bra":
icon = "all"
else:
icon = i["id"][0:2]
bra_query["icon"] = "/static/images/icons/bra/bra_" + icon
bra_query["name_en"] = i["name_en"]
bra_query["color"] = i["color"]
bra_query["content_type"] = "bra"
bra_query = fix_name(bra_query, lang)
result.append(bra_query)
if lang == "pt":
cbo = Cbo.query.filter(or_(Cbo.id == term, Cbo.name_pt.ilike("%"+term+"%")))
else:
cbo = Cbo.query.filter(or_(Cbo.id == term, Cbo.name_en.ilike("%"+term+"%")))
items = cbo.limit(50).all()
items = [i.serialize() for i in items]
for i in items:
cbo_query = {}
cbo_query["id"] = i["id"]
cbo_query["name_pt"] = i["name_pt"]
cbo_query["name_en"] = i["name_en"]
cbo_query["color"] = i["color"]
cbo_query["content_type"] = "cbo"
cbo_query = fix_name(cbo_query, lang)
result.append(cbo_query)
isic_match = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u"]
if lang == "pt":
isic = Isic.query.filter(and_(Isic.name_pt.ilike("%"+term+"%"), Isic.id.in_(isic_match)))
else:
isic = Isic.query.filter(and_(Isic.name_en.ilike("%"+term+"%"), Isic.id.in_(isic_match)))
items = isic.limit(50).all()
items = [i.serialize() for i in items]
for i in items:
isic_query = {}
isic_query["id"] = i["id"]
isic_query["name_pt"] = i["name_pt"]
isic_query["name_en"] = i["name_en"]
isic_query["color"] = i["color"]
isic_query["content_type"] = "isic"
isic_query = fix_name(isic_query, lang)
result.append(isic_query)
if lang == "pt":
hs = Hs.query.filter(or_(Hs.id.like("%"+term+"%"), Hs.name_pt.like("%"+term+"%")))
else:
hs = Hs.query.filter(or_(Hs.id.like("%"+term+"%"), Hs.name_en.ilike("%"+term+"%")))
items = hs.limit(50).all()
print(items)
items = [i.serialize() for i in items]
for i in items:
hs_query = {}
hs_query["id"] = i["id"]
hs_query["name_pt"] = i["name_pt"]
hs_query["name_en"] = i["name_en"]
hs_query["color"] = i["color"]
hs_query["content_type"] = "hs"
hs_query = fix_name(hs_query,lang)
result.append(hs_query)
if lang == "pt":
wld = Wld.query.filter(or_(Wld.id == term, Wld.name_pt.like("%"+term+"%")))
else:
wld = Wld.query.filter(or_(Wld.id == term, Wld.name_en.like("%"+term+"%")))
items = wld.limit(50).all()
items = [i.serialize() for i in items]
for i in items:
wld_query = {}
wld_query["id"] = i["id"]
wld_query["name_pt"] = i["name_pt"]
wld_query["name_en"] = i["name_en"]
wld_query["color"] = i["color"]
wld_query["content_type"] = "wld"
wld_query = fix_name(wld_query, lang)
result.append(wld_query)
question = Question.query.filter(and_(Question.language == lang, or_(Question.question.ilike("%"+term+"%"), Question.body.ilike("%"+term+"%"))))
items = question.limit(50).all()
items = [i.serialize() for i in items]
for i in items:
question_query = {}
question_query["id"] = i["slug"]
question_query["name"] = i["question"]
question_query["color"] = '#D67AB0'
question_query["content_type"] = "learnmore"
question_query = fix_name(question_query, lang)
result.append(question_query)
ret = jsonify({"activities":result})
return ret
| 34.429825
| 148
| 0.545563
| 1,557
| 11,775
| 3.924213
| 0.140655
| 0.03928
| 0.025532
| 0.021604
| 0.307529
| 0.249918
| 0.211948
| 0.174632
| 0.162848
| 0.151391
| 0
| 0.008622
| 0.290786
| 11,775
| 341
| 149
| 34.530792
| 0.723027
| 0.046369
| 0
| 0.206107
| 0
| 0
| 0.093925
| 0.00649
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019084
| false
| 0
| 0.045802
| 0.003817
| 0.087786
| 0.01145
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a54cde621c4d8d9c2e11ad32222e88ab799ae414
| 701
|
py
|
Python
|
leetcode/easy/sort-array-by-parity.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 8
|
2019-05-14T12:50:29.000Z
|
2022-03-01T09:08:27.000Z
|
leetcode/easy/sort-array-by-parity.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 46
|
2019-03-24T20:59:29.000Z
|
2019-04-09T16:28:43.000Z
|
leetcode/easy/sort-array-by-parity.py
|
vtemian/interviews-prep
|
ddef96b5ecc699a590376a892a804c143fe18034
|
[
"Apache-2.0"
] | 1
|
2022-01-28T12:46:29.000Z
|
2022-01-28T12:46:29.000Z
|
"""
Given an array A of non-negative integers, return an array consisting of all the even elements of A,
followed by all the odd elements of A.
You may return any answer array that satisfies this condition.
Example 1:
Input: [3,1,2,4]
Output: [2,4,3,1]
The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.
Note:
1 <= A.length <= 5000
0 <= A[i] <= 5000
"""
class Solution:
def sortArrayByParity(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
return [element for element in A if not element % 2] + \
[element for element in A if element % 2]
result = Solution().sortArrayByParity([3,1,2,4])
print(result)
| 20.028571
| 100
| 0.617689
| 117
| 701
| 3.700855
| 0.504274
| 0.018476
| 0.020785
| 0.027714
| 0.101617
| 0.101617
| 0
| 0
| 0
| 0
| 0
| 0.070209
| 0.248217
| 701
| 34
| 101
| 20.617647
| 0.751423
| 0.584879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.5
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a5504cacd4d378cc9aecf50aa2070a23b003b4f8
| 3,878
|
py
|
Python
|
app/service/messages/dispatcher.py
|
ryan4yin/flypy-backend
|
7fcc2971ac27d3b44e352dfed73acd12e1913d65
|
[
"MIT"
] | 6
|
2019-03-14T02:39:17.000Z
|
2021-10-31T11:43:58.000Z
|
app/service/messages/dispatcher.py
|
ryan4yin/flypy-backend
|
7fcc2971ac27d3b44e352dfed73acd12e1913d65
|
[
"MIT"
] | null | null | null |
app/service/messages/dispatcher.py
|
ryan4yin/flypy-backend
|
7fcc2971ac27d3b44e352dfed73acd12e1913d65
|
[
"MIT"
] | 2
|
2020-02-04T07:44:37.000Z
|
2021-04-02T23:02:20.000Z
|
# -*- coding: utf-8 -*-
import copy
import logging
from operator import attrgetter
from typing import Dict
from app.service.messages.handler import Handler
logger = logging.getLogger(__name__)
class Dispatcher(object):
"""
消息分派器,暂时忽略 Notice
platform: 平台,目前只有 qq,未来可能会添加 telegtram、wechat
group_id: 群组id,四种可能:private(仅私聊)、group(仅群聊),或者特定的群号
"""
def __init__(self):
self.handlers: Dict[str, Dict[str, list]] = {
"qq": dict(),
"telegram": dict(),
"wechat": dict(),
"default": {
"group": [],
"private": [],
},
}
self.sort_key = attrgetter("weight") # 用于 handles 排序的 key
def get_handlers(self, data: dict):
"""根据消息的内容,返回对应的 handlers 列表"""
platform = data['platform']
message = data['message']
if message['type'] == 'group':
group_id = message['group']['id']
handlers = self.handlers[platform].get(group_id) # 首先考虑使用群自定义的 handlers
if not handlers:
handlers = self.handlers["default"]['group'] # 没有则使用默认 handlers(这个所有平台通用)
elif message['type'] == 'private':
handlers = self.handlers['default']['private'] # 同样是所有平台通用
else:
logger.error("无法解析!消息格式不正确!")
return None
return handlers
def handle_update(self, data: dict):
"""处理消息"""
handlers = self.get_handlers(data)
data_back = copy.deepcopy(data) # 用于回复的 dict,在 data 上稍做修改就行
reply: dict = data_back['message']
reply.update({"text": "", "images": []}) # 先清除收到的消息
if reply['type'] == "group":
reply['group'] = {'id': reply['group']['id']}
# 处理消息
for handler in handlers:
match, res = handler.handle_update(data)
if match:
if reply['type'] == "group":
reply['group']['at_members'] = res.get("at_members")
reply['text'] = res.get('text')
reply['images'] = res.get('images')
elif res is not None: # 解析出现问题
reply['text'] = res.get("message") # 返回错误信息
if reply['text'] or reply['images']: # 有回复消息
return data_back # 这个 dict 会被发送回 qq/telegram 前端
else:
return None # 没有消息要回复
def add_handler(self, handler, platform='default', group_id="group", extra_doc=None):
"""
注册消息处理器,default 表示该处理器为所有平台/群组所通用。
1. 对每条消息而言,只可能触发最多一个消息处理器。处理器之间按权重排序。
:param handler: 需要添加的 handler
:param platform: 有 qq telegram wechat, 和 default
:param group_id: group、private、或者群 id
:param extra_doc: 补充的 docstring,不同的命令,在不同环境下,效果也可能不同
:return:
"""
if not isinstance(handler, Handler):
raise TypeError('handlers is not an instance of {0}'.format(Handler.__name__))
if not isinstance(platform, str):
raise TypeError('platform is not str')
if not isinstance(group_id, str):
raise TypeError('group_id is not str')
if extra_doc: # 添加补充的说明文档
handler.extra_doc = extra_doc
if platform not in self.handlers:
self.handlers[platform] = {
group_id: [handler]
}
elif group_id not in self.handlers[platform]:
self.handlers[platform][group_id] = [handler]
else:
handlers_list = self.handlers[platform][group_id]
handlers_list.append(handler)
handlers_list.sort(key=self.sort_key, reverse=True) # 权重高的优先
def remove_handler(self, handler, platform='default', group_id="group"):
"""移除消息处理器"""
if platform in self.handlers \
and group_id in self.handlers[platform]:
self.handlers[platform][group_id].remove(handler)
| 34.318584
| 90
| 0.566787
| 426
| 3,878
| 5.049296
| 0.319249
| 0.055323
| 0.065086
| 0.04649
| 0.143189
| 0.130637
| 0.087401
| 0.087401
| 0.04556
| 0
| 0
| 0.001118
| 0.308149
| 3,878
| 112
| 91
| 34.625
| 0.800596
| 0.160392
| 0
| 0.093333
| 0
| 0
| 0.106322
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a551e5731106adef0abaef205055eb2d9ca12152
| 15,493
|
py
|
Python
|
bfs/bfs.py
|
NordFk/bfs-soap-api-wrapper
|
f149e33db9a19f325e3ae335bb6682e15b667e6a
|
[
"Apache-2.0"
] | 2
|
2021-11-20T14:16:56.000Z
|
2021-12-15T10:33:01.000Z
|
bfs/bfs.py
|
NordFk/bfs-soap-api-wrapper
|
f149e33db9a19f325e3ae335bb6682e15b667e6a
|
[
"Apache-2.0"
] | null | null | null |
bfs/bfs.py
|
NordFk/bfs-soap-api-wrapper
|
f149e33db9a19f325e3ae335bb6682e15b667e6a
|
[
"Apache-2.0"
] | 2
|
2021-11-20T16:49:38.000Z
|
2021-11-20T21:26:16.000Z
|
from collections import OrderedDict
from zeep import Client
from zeep import xsd
import zeep.helpers
import zeep.exceptions
import logging.config
import re
from .constants import methods
class Bfs:
client = None
factory = None
credentials = None
identifier = None
methods = methods
def __init__(self, config, verbose: bool = False):
self.__init_logging(verbose)
self.__init_client(config)
@staticmethod
def __init_logging(self, verbose: bool = False):
if verbose:
logging.config.dictConfig({
'version': 1,
'formatters': {
'verbose': {
'format': '%(name)s: %(message)s'
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'zeep.transports': {
'level': 'DEBUG',
'propagate': True,
'handlers': ['console'],
},
}
})
else:
logging.getLogger('zeep').setLevel(logging.ERROR)
def __init_client(self, config: dict):
if self.client is None:
if not 'bricknode' in config:
raise ValueError('"bricknode" element missing from configuration')
if not 'wsdl' in config['bricknode']:
raise ValueError('"wsdl" element missing from "bricknode" configuration')
self.client = Client(config['bricknode']['wsdl'])
self.factory = self.client.type_factory('ns0')
self.credentials = self.factory.Credentials(UserName=config['bricknode']['credentials']['username'],
Password=config['bricknode']['credentials']['password'])
self.identifier = config['bricknode']['identifier']
def get_fields(self, method: str, default_value: bool = True):
"""
Gets fields object based on results object. Mitigates the plural form inconsistency present in the API
:param method:
:param default_value:
:return:
"""
try:
fields_method = getattr(self.factory, method + 'Fields')
except zeep.exceptions.LookupError:
fields_method = getattr(self.factory, method[:-1] + 'Fields')
fields = fields_method()
for key in fields:
fields[key] = default_value
return fields
def get_args(self, method: str):
"""
Gets args object based on results object. Mitigates the plural form inconsistency present in the API
:param method:
:return:
"""
try:
args_method = getattr(self.factory, method + 'Args')
except zeep.exceptions.LookupError:
args_method = getattr(self.factory, method[:-1] + 'Args')
return args_method()
@staticmethod
def get_entity_class_name(method: str):
"""
This method aligns the expected object names with the method that will use it. Eg. CreateAccount uses Account as
object, while the UpdateAccount method uses UpdateAccount objects and arrays thereof.
CreateMessage, on the other hand, uses CreateMessage as object.
:param method:
:return:
"""
# "Create" entities are not prefixed with "Create". Pattern changed for newer additions, omitted below.
method = re.sub('^%s' % 'Create', '', method) if method not in [
'CreateMessages',
'CreateNotes',
'CreateTasks',
'CreateTradingVenues',
'CreateWebhookSubscriptions'
] else method
# "Update" entities are always prefix with "Update". Unless, of course, it is UpdateAllocationProfiles
method = re.sub('^%s' % 'Update', '', method) if method in [
'UpdateAllocationProfiles'
] else method
# Casing anomalies
method = 'UpdateFundCompanies' if method == 'UpdateFundcompanies' else method
method = 'UpdateFundEntities' if method == 'UpdateFundentities' else method
# Inconsistent casing and plural form not at end
method = 'RecurringOrderTemplateAutoGiro' if method == 'RecurringOrderTemplatesAutogiro' else method
# Completely different entity type
method = 'FileInfoUpload' if method == 'File' else method
method = 'SuperTransactions' if method == 'BusinessTransactions' else method
return method
def _resolve_derived_class_from_abstract(self, class_name: str, entity: dict = None):
"""
Resolved any derived classes that we would rather use, based on the contents of the entity
:param class_name: The class name of the potential abstract class
:param entity: The entity used for evaluation
:return:
"""
if entity is None:
return
if class_name == 'CurrencyExchangeOrder':
if 'BuyAmount' in entity.keys():
return getattr(self.factory, 'CurrencyExchangeOrderBuy')
elif 'SellAmount' in entity.keys():
return getattr(self.factory, 'CurrencyExchangeOrderSell')
return None
def get_entity(self, class_name: str, entity: dict = None, skip_validation_for_empty_values: bool = False):
"""
Gets entity object based on method
:param class_name: The class name of the entity
:param entity: Optional entity object to convert
:param skip_validation_for_empty_values: Set this to True to ignore validation that required values are set
:return:
"""
try:
entity_method = getattr(self.factory, class_name)
except zeep.exceptions.LookupError:
try:
entity_method = getattr(self.factory, class_name[:-1])
except zeep.exceptions.LookupError:
entity_method = getattr(self.factory, class_name[:-3] + "y")
derived_entity_method = self._resolve_derived_class_from_abstract(entity_method.name, entity)
if derived_entity_method is not None:
entity_method = derived_entity_method
_entity = entity_method()
if skip_validation_for_empty_values:
for key in [a for a in dir(_entity) if not a.startswith('__')]:
_entity[key] = xsd.SkipValue
if type(entity) is dict:
for key in entity.keys():
_entity[key] = entity[key]
return _entity
def get_entity_array(self, class_name: str, entities: list):
"""
Gets an entity array based on class_name
:param class_name:
:param entities:
:return:
"""
try:
entity_array_method = getattr(self.factory, "ArrayOf" + class_name)
except zeep.exceptions.LookupError:
entity_array_method = getattr(self.factory, "ArrayOf" + class_name[:-1])
return entity_array_method(entities)
def __argument_transform(self, value):
"""
Transforms the argument to suit the soap client
:param value:
:return:
"""
p = re.compile('^[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$')
if isinstance(value, list) and len(value) > 0:
if p.match(value[0]):
return self.factory.ArrayOfGuid(value)
else:
return self.factory.ArrayOfString(value)
return value
def get(self, method: str, args: dict = None, fields: dict = None, raw_result: bool = False):
"""
Makes a call to the API, preparing the request and default fields (true) and adds+transforms the arguments
:param method: The Bricknode API method name
:param args: Any arguments you would like to pass (optional)
:param fields: Any field settings you would like to pass (optional)
:param raw_result: Set to True to get the raw result back (optional)
:return:
"""
_fields = self.get_fields(method)
if type(fields) is dict:
for key in fields.keys():
_fields[key] = fields[key]
_args = self.get_args(method)
if type(args) is dict:
for key in args.keys():
_args[key] = self.__argument_transform(args[key])
query_method = getattr(self.client.service, method)
result = query_method({
'Credentials': self.credentials,
'identify': self.identifier,
'Args': _args,
'Fields': _fields
})
return result if raw_result \
else self.ordered_dict_to_object(self.get_response_rows(zeep.helpers.serialize_object(result), method))
def execute(self, method: str, entities: list = None, skip_validation_for_empty_values: bool = False):
"""
Makes a call to the API, preparing the request and default fields (true) and adds+transforms the arguments
:param method: The Bricknode API method name
:param entities: The entities we want to execute
:param skip_validation_for_empty_values: Set this to True to ignore validation that required values are set
:return:
"""
return self.create(method=method, entities=entities,
skip_validation_for_empty_values=skip_validation_for_empty_values, raw_result=True)
def create(self, method: str, entities: list = None, skip_validation_for_empty_values: bool = False,
raw_result=False):
"""
Makes a call to the API, preparing the request and default fields (true) and adds+transforms the arguments
:param method: The Bricknode API method name
:param entities: The entities we want to create
:param skip_validation_for_empty_values: Set this to True to ignore validation that required values are set
:param raw_result: Set to True to get the raw result back (optional)
:return:
"""
_entities = []
for entity in entities:
_entities.append(entity if type(entity) != dict
else self.get_entity(self.get_entity_class_name(method), entity, skip_validation_for_empty_values))
query_method = getattr(self.client.service, method)
result = query_method({
'Credentials': self.credentials,
'identify': self.identifier,
'Entities': self.get_entity_array(self.get_entity_class_name(method), _entities)
})
return result if raw_result \
else self.ordered_dict_to_object(self.get_response_rows(zeep.helpers.serialize_object(result), method))
def update(self, method: str, entities: list = None, fields: dict = None,
skip_validation_for_empty_values: bool = False, raw_result=False):
"""
Makes a call to the API, preparing the request and default fields (true) and adds+transforms the arguments
:param method: The Bricknode API method name
:param entities: The entities we want to update
:param fields: Any field settings you would like to pass (optional)
:param skip_validation_for_empty_values: Set this to True to ignore validation that required values are set
:param raw_result: Set to True to get the raw result back (optional)
:return:
"""
_fields = self.get_fields(method, False)
if type(fields) is dict:
for key in fields.keys():
_fields[key] = fields[key]
_entities = []
for entity in entities:
_entities.append(entity if type(entity) != dict
else self.get_entity(self.get_entity_class_name(method), entity, skip_validation_for_empty_values))
query_method = getattr(self.client.service, method)
result = query_method({
'Credentials': self.credentials,
'identify': self.identifier,
'Entities': self.get_entity_array(self.get_entity_class_name(method), _entities),
'Fields': _fields
})
return result if raw_result \
else self.ordered_dict_to_object(self.get_response_rows(zeep.helpers.serialize_object(result), method))
def delete(self, method: str, brick_ids: list = None):
"""
Makes a call to the API, preparing the request and default fields (true) and adds+transforms the arguments
:param method: The Bricknode API method name
:param brick_ids: The brickIds of the entities we want to delete
:param skip_validation_for_empty_values: Set this to True to ignore validation that required values are set
:param raw_result: Set to True to get the raw result back (optional)
:return:
"""
query_method = getattr(self.client.service, method)
result = query_method({
'Credentials': self.credentials,
'identify': self.identifier,
'BrickIds': self.__argument_transform(brick_ids)
})
return result
def cancel(self, method: str, entity: dict = None):
"""
Makes a call to the API using the entity as WorkflowTriggerDataEntity property
:param method: The Bricknode API method name
:param entity: The WorkflowTriggerDataEntity we want to supply
:return:
"""
query_method = getattr(self.client.service, method)
result = query_method({
'Credentials': self.credentials,
'identify': self.identifier,
'WorkflowTriggerDataEntity': entity
})
return result
@staticmethod
def get_response_rows(result: dict, method: str):
"""
Gets response rows based on results object. Mitigates the plural form inconsistency present in the API
:param result:
:param method:
:return:
"""
if 'Result' in result.keys() and result['Result'] is not None:
response_field = method + 'ResponseRow' \
if method + 'ResponseRow' in result['Result'] \
else method[:-1] + 'ResponseRow'
if result['Result'][response_field] is not None:
return result['Result'][response_field]
if 'Entities' in result.keys() and result['Entities'] is not None:
class_name = Bfs.get_entity_class_name(method)
response_field = class_name \
if class_name in result['Entities'] \
else class_name[:-1]
if result['Entities'][response_field] is not None:
return result['Entities'][response_field]
@staticmethod
def ordered_dict_to_object(value: dict):
"""
Recursively gets an object based on an ordered dictionary that may contain lists
:param value:
:return:
"""
if isinstance(value, list):
a = []
for item in value:
a.append(Bfs.ordered_dict_to_object(item))
return a
if isinstance(value, OrderedDict):
o = {}
for key, value in value.items():
o[key] = Bfs.ordered_dict_to_object(value)
return o
return value
| 40.033592
| 128
| 0.606274
| 1,738
| 15,493
| 5.26122
| 0.147296
| 0.024606
| 0.026028
| 0.033683
| 0.509843
| 0.46752
| 0.442039
| 0.411089
| 0.390639
| 0.374125
| 0
| 0.002886
| 0.30659
| 15,493
| 386
| 129
| 40.137306
| 0.848273
| 0.247983
| 0
| 0.315789
| 0
| 0.004386
| 0.107186
| 0.027601
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0.004386
| 0.035088
| 0
| 0.241228
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a554983edfe142d8b785a94b5027ce1bfbe95b20
| 1,370
|
py
|
Python
|
booking_microservice/migrations/versions/7eb209b7ab1e_booking_status.py
|
7552-2020C2-grupo5/bookings-microservice
|
92fd3c8c5e4c8462aa0e7f00e50f3c60680ab161
|
[
"Apache-2.0"
] | null | null | null |
booking_microservice/migrations/versions/7eb209b7ab1e_booking_status.py
|
7552-2020C2-grupo5/bookings-microservice
|
92fd3c8c5e4c8462aa0e7f00e50f3c60680ab161
|
[
"Apache-2.0"
] | null | null | null |
booking_microservice/migrations/versions/7eb209b7ab1e_booking_status.py
|
7552-2020C2-grupo5/bookings-microservice
|
92fd3c8c5e4c8462aa0e7f00e50f3c60680ab161
|
[
"Apache-2.0"
] | null | null | null |
"""booking_status
Revision ID: 7eb209b7ab1e
Revises: 0a95c6679356
Create Date: 2021-02-22 01:19:10.744915
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from booking_microservice.constants import BookingStatus
# revision identifiers, used by Alembic.
revision = '7eb209b7ab1e'
down_revision = '0a95c6679356'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
connection = op.get_bind()
if connection.dialect.name == "postgresql":
status_enum = postgresql.ENUM(
*[x.value for x in BookingStatus.__members__.values()],
name='booking_status'
)
else:
status_enum = sa.Enum(
*[x.value for x in BookingStatus.__members__.values()],
name='booking_status'
)
status_enum.create(op.get_bind())
op.add_column(
'booking',
sa.Column(
'booking_status',
status_enum,
nullable=False,
default=BookingStatus.PENDING.value,
server_default=BookingStatus.PENDING.value,
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('booking', 'booking_status')
# ### end Alembic commands ###
| 24.909091
| 67
| 0.642336
| 148
| 1,370
| 5.77027
| 0.459459
| 0.076112
| 0.04918
| 0.053864
| 0.236534
| 0.236534
| 0.236534
| 0.138173
| 0.138173
| 0.138173
| 0
| 0.050633
| 0.250365
| 1,370
| 54
| 68
| 25.37037
| 0.780915
| 0.216058
| 0
| 0.121212
| 0
| 0
| 0.100289
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0
| 0.121212
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a555224273d739957311d97daec8970ec07b9037
| 669
|
py
|
Python
|
cookbookex/c01/3.2.3.py
|
fengchunhui/cookbookex
|
0c97ed92b7963ed6cef9140f3dbd5a559c1d1c79
|
[
"Apache-2.0"
] | null | null | null |
cookbookex/c01/3.2.3.py
|
fengchunhui/cookbookex
|
0c97ed92b7963ed6cef9140f3dbd5a559c1d1c79
|
[
"Apache-2.0"
] | null | null | null |
cookbookex/c01/3.2.3.py
|
fengchunhui/cookbookex
|
0c97ed92b7963ed6cef9140f3dbd5a559c1d1c79
|
[
"Apache-2.0"
] | null | null | null |
records = [('foo', 1, 2), ('bar', 'hello'), ('foo', 3, 4)]
def do_foo(x, y):
print('foo', x, y)
def do_bar(s):
print('bar', s)
for tag, *args in records:
if tag == 'foo':
do_foo(*args)
elif tag == 'bar':
do_bar(*args)#该例子没看懂
line = 'nobody:*:-2:-2:Unprivileged User:/var/empty:/user/bin/flase'
uname, *fields, homedir, sh = line.split(':')
print(uname)
print(fields)
print(homedir)
print(sh)
record = ('ACME', 50, 123.45, (12, 18, 2017))
name, *_, (*_, year) = record
print(name)
print(year)
def sum(items):
head, *tail = items
return head + sum(tail) if tail else head
items = [1, 10, 7, 4, 5, 9]
print(sum(items))#没看懂
| 19.114286
| 68
| 0.578475
| 108
| 669
| 3.527778
| 0.509259
| 0.026247
| 0.026247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.204783
| 669
| 34
| 69
| 19.676471
| 0.663534
| 0.013453
| 0
| 0
| 0
| 0
| 0.136986
| 0.08828
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12
| false
| 0
| 0
| 0
| 0.16
| 0.36
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a555c2aabfb2fed9428a296a73e22048b9b84d87
| 14,288
|
py
|
Python
|
rotkehlchen/exchanges/iconomi.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 137
|
2018-03-05T11:53:29.000Z
|
2019-11-03T16:38:42.000Z
|
rotkehlchen/exchanges/iconomi.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 385
|
2018-03-08T12:43:41.000Z
|
2019-11-10T09:15:36.000Z
|
rotkehlchen/exchanges/iconomi.py
|
rotkehlchenio/rotkehlchen
|
98f49cd3ed26c641fec03b78eff9fe1872385fbf
|
[
"BSD-3-Clause"
] | 59
|
2018-03-08T10:08:27.000Z
|
2019-10-26T11:30:44.000Z
|
import base64
import hashlib
import hmac
import json
import logging
import time
from json.decoder import JSONDecodeError
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Tuple
from urllib.parse import urlencode
import requests
from rotkehlchen.accounting.ledger_actions import LedgerAction
from rotkehlchen.accounting.structures.balance import Balance
from rotkehlchen.assets.asset import Asset
from rotkehlchen.assets.converters import UNSUPPORTED_ICONOMI_ASSETS, asset_from_iconomi
from rotkehlchen.constants import ZERO
from rotkehlchen.constants.assets import A_AUST
from rotkehlchen.errors.asset import UnknownAsset, UnsupportedAsset
from rotkehlchen.errors.misc import RemoteError
from rotkehlchen.errors.serialization import DeserializationError
from rotkehlchen.exchanges.data_structures import (
AssetMovement,
Location,
MarginPosition,
Price,
Trade,
TradeType,
)
from rotkehlchen.exchanges.exchange import ExchangeInterface, ExchangeQueryBalances
from rotkehlchen.inquirer import Inquirer
from rotkehlchen.logging import RotkehlchenLogsAdapter
from rotkehlchen.serialization.deserialize import (
deserialize_asset_amount,
deserialize_fee,
deserialize_fval,
)
from rotkehlchen.types import ApiKey, ApiSecret, Timestamp
from rotkehlchen.user_messages import MessagesAggregator
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
logger = logging.getLogger(__name__)
log = RotkehlchenLogsAdapter(logger)
def trade_from_iconomi(raw_trade: Dict) -> Trade:
"""Turn an iconomi trade entry to our own trade format
May raise:
- UnknownAsset
- DeserializationError
- KeyError
"""
timestamp = raw_trade['timestamp']
if raw_trade['type'] == 'buy_asset':
trade_type = TradeType.BUY
tx_asset = asset_from_iconomi(raw_trade['target_ticker'])
tx_amount = deserialize_asset_amount(raw_trade['target_amount'])
native_asset = asset_from_iconomi(raw_trade['source_ticker'])
native_amount = deserialize_asset_amount(raw_trade['source_amount'])
elif raw_trade['type'] == 'sell_asset':
trade_type = TradeType.SELL
tx_asset = asset_from_iconomi(raw_trade['source_ticker'])
tx_amount = deserialize_asset_amount(raw_trade['source_amount'])
native_amount = deserialize_asset_amount(raw_trade['target_amount'])
native_asset = asset_from_iconomi(raw_trade['target_ticker'])
amount = tx_amount
rate = Price(native_amount / tx_amount)
fee_amount = deserialize_fee(raw_trade['fee_amount'])
fee_asset = asset_from_iconomi(raw_trade['fee_ticker'])
return Trade(
timestamp=timestamp,
location=Location.ICONOMI,
base_asset=tx_asset,
quote_asset=native_asset,
trade_type=trade_type,
amount=amount,
rate=rate,
fee=fee_amount,
fee_currency=fee_asset,
link=str(raw_trade['transactionId']),
)
class Iconomi(ExchangeInterface): # lgtm[py/missing-call-to-init]
def __init__(
self,
name: str,
api_key: ApiKey,
secret: ApiSecret,
database: 'DBHandler',
msg_aggregator: MessagesAggregator,
):
super().__init__(
name=name,
location=Location.ICONOMI,
api_key=api_key,
secret=secret,
database=database,
)
self.uri = 'https://api.iconomi.com'
self.msg_aggregator = msg_aggregator
def edit_exchange_credentials(
self,
api_key: Optional[ApiKey],
api_secret: Optional[ApiSecret],
passphrase: Optional[str],
) -> bool:
changed = super().edit_exchange_credentials(api_key, api_secret, passphrase)
return changed
def _generate_signature(self, request_type: str, request_path: str, timestamp: str) -> str:
signed_data = ''.join([timestamp, request_type.upper(), request_path, '']).encode()
signature = hmac.new(
self.secret,
signed_data,
hashlib.sha512,
)
return base64.b64encode(signature.digest()).decode()
def _api_query(
self,
verb: Literal['get', 'post'],
path: str,
options: Optional[Dict] = None,
authenticated: bool = True,
) -> Any:
"""
Queries ICONOMI with the given verb for the given path and options
"""
assert verb in ('get', 'post'), (
'Given verb {} is not a valid HTTP verb'.format(verb)
)
request_path_no_args = '/v1/' + path
data = ''
if not options:
request_path = request_path_no_args
else:
request_path = request_path_no_args + '?' + urlencode(options)
timestamp = str(int(time.time() * 1000))
request_url = self.uri + request_path
headers = {}
if authenticated:
signature = self._generate_signature(
request_type=verb.upper(),
request_path=request_path_no_args,
timestamp=timestamp,
)
headers.update({
'ICN-SIGN': signature,
# set api key only here since if given in non authenticated endpoint gives 400
'ICN-API-KEY': self.api_key,
'ICN-TIMESTAMP': timestamp,
})
if data != '':
headers.update({
'Content-Type': 'application/json',
'Content-Length': str(len(data)),
})
log.debug('ICONOMI API Query', verb=verb, request_url=request_url)
try:
response = getattr(self.session, verb)(
request_url,
data=data,
timeout=30,
headers=headers,
)
except requests.exceptions.RequestException as e:
raise RemoteError(f'ICONOMI API request failed due to {str(e)}') from e
try:
json_ret = json.loads(response.text)
except JSONDecodeError as exc:
raise RemoteError('ICONOMI returned invalid JSON response') from exc
if response.status_code not in (200, 201):
if isinstance(json_ret, dict) and 'message' in json_ret:
raise RemoteError(json_ret['message'])
raise RemoteError(
'ICONOMI api request for {} failed with HTTP status code {}'.format(
response.url,
response.status_code,
),
)
return json_ret
def validate_api_key(self) -> Tuple[bool, str]:
"""
Validates that the ICONOMI API key is good for usage in rotki
"""
try:
self._api_query('get', 'user/balance')
return True, ""
except RemoteError:
return False, 'Provided API Key is invalid'
def query_balances(self, **kwargs: Any) -> ExchangeQueryBalances:
assets_balance: Dict[Asset, Balance] = {}
try:
resp_info = self._api_query('get', 'user/balance')
except RemoteError as e:
msg = (
'ICONOMI API request failed. Could not reach ICONOMI due '
'to {}'.format(e)
)
log.error(msg)
return None, msg
if resp_info['currency'] != 'USD':
raise RemoteError('Iconomi API did not return values in USD')
for balance_info in resp_info['assetList']:
ticker = balance_info['ticker']
try:
asset = asset_from_iconomi(ticker)
try:
usd_value = deserialize_fval(balance_info['value'], 'usd_value', 'iconomi')
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'missing key entry for {msg}.'
self.msg_aggregator.add_warning(
f'Skipping iconomi balance entry {balance_info} due to {msg}',
)
continue
try:
amount = deserialize_asset_amount(balance_info['balance'])
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'missing key entry for {msg}.'
self.msg_aggregator.add_warning(
f'Skipping iconomi balance entry {balance_info} due to {msg}',
)
continue
assets_balance[asset] = Balance(
amount=amount,
usd_value=usd_value,
)
except (UnknownAsset, UnsupportedAsset) as e:
asset_tag = 'unknown' if isinstance(e, UnknownAsset) else 'unsupported'
self.msg_aggregator.add_warning(
f'Found {asset_tag} ICONOMI asset {ticker}. '
f' Ignoring its balance query.',
)
continue
for balance_info in resp_info['daaList']:
ticker = balance_info['ticker']
if ticker == 'AUSTS':
# The AUSTS strategy is 'ICONOMI Earn'. We know that this strategy holds its
# value in Anchor UST (AUST). That's why we report the user balance for this
# strategy as usd_value / AUST price.
try:
aust_usd_price = Inquirer().find_usd_price(asset=A_AUST)
except RemoteError as e:
self.msg_aggregator.add_error(
f'Error processing ICONOMI balance entry due to inability to '
f'query USD price: {str(e)}. Skipping balance entry',
)
continue
if aust_usd_price == ZERO:
self.msg_aggregator.add_error(
'Error processing ICONOMI balance entry because the USD price '
'for AUST was reported as 0. Skipping balance entry',
)
continue
try:
usd_value = deserialize_fval(balance_info['value'], 'usd_value', 'iconomi')
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'missing key entry for {msg}.'
self.msg_aggregator.add_warning(
f'Skipping iconomi balance entry {balance_info} due to {msg}',
)
continue
assets_balance[A_AUST] = Balance(
amount=usd_value / aust_usd_price,
usd_value=usd_value,
)
else:
self.msg_aggregator.add_warning(
f'Found unsupported ICONOMI strategy {ticker}. '
f' Ignoring its balance query.',
)
return assets_balance, ''
def query_online_trade_history(
self,
start_ts: Timestamp,
end_ts: Timestamp,
) -> Tuple[List[Trade], Tuple[Timestamp, Timestamp]]:
page = 0
all_transactions = []
while True:
resp = self._api_query('get', 'user/activity', {"pageNumber": str(page)})
if len(resp['transactions']) == 0:
break
all_transactions.extend(resp['transactions'])
page += 1
log.debug('ICONOMI trade history query', results_num=len(all_transactions))
trades = []
for tx in all_transactions:
timestamp = tx['timestamp']
if timestamp < start_ts:
continue
if timestamp > end_ts:
continue
if tx['type'] in ('buy_asset', 'sell_asset'):
try:
trades.append(trade_from_iconomi(tx))
except UnknownAsset as e:
self.msg_aggregator.add_warning(
f'Ignoring an iconomi transaction because of unsupported '
f'asset {str(e)}')
except (DeserializationError, KeyError) as e:
msg = str(e)
if isinstance(e, KeyError):
msg = f'Missing key entry for {msg}.'
self.msg_aggregator.add_error(
'Error processing an iconomi transaction. Check logs '
'for details. Ignoring it.',
)
log.error(
'Error processing an iconomi transaction',
error=msg,
trade=tx,
)
return trades, (start_ts, end_ts)
def query_supported_tickers(
self,
) -> List[str]:
tickers = []
resp = self._api_query('get', 'assets', authenticated=False)
for asset_info in resp:
if not asset_info['supported']:
continue
if asset_info['ticker'] in UNSUPPORTED_ICONOMI_ASSETS:
continue
tickers.append(asset_info['ticker'])
return tickers
def query_online_deposits_withdrawals(
self, # pylint: disable=no-self-use
start_ts: Timestamp, # pylint: disable=unused-argument
end_ts: Timestamp, # pylint: disable=unused-argument
) -> List[AssetMovement]:
return [] # noop for iconomi
def query_online_margin_history(
self, # pylint: disable=no-self-use
start_ts: Timestamp, # pylint: disable=unused-argument
end_ts: Timestamp, # pylint: disable=unused-argument
) -> List[MarginPosition]:
return [] # noop for iconomi
def query_online_income_loss_expense(
self, # pylint: disable=no-self-use
start_ts: Timestamp, # pylint: disable=unused-argument
end_ts: Timestamp, # pylint: disable=unused-argument
) -> List[LedgerAction]:
return [] # noop for iconomi
| 35.542289
| 95
| 0.568309
| 1,458
| 14,288
| 5.384088
| 0.203704
| 0.032484
| 0.021656
| 0.02293
| 0.277325
| 0.264586
| 0.215287
| 0.188535
| 0.162293
| 0.162293
| 0
| 0.003114
| 0.348194
| 14,288
| 401
| 96
| 35.630923
| 0.839794
| 0.06033
| 0
| 0.269113
| 0
| 0
| 0.130914
| 0
| 0
| 0
| 0
| 0
| 0.003058
| 1
| 0.036697
| false
| 0.006116
| 0.082569
| 0.009174
| 0.16208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a555e99a46c6efc7e9dda4b03dbc6e9937a3b54b
| 620
|
py
|
Python
|
pytorch-extension/pytorch_extension_official/cpp/perform_test.py
|
xdr940/utils
|
c4b7b1479956475a7feee90a723541904ec82306
|
[
"MIT"
] | null | null | null |
pytorch-extension/pytorch_extension_official/cpp/perform_test.py
|
xdr940/utils
|
c4b7b1479956475a7feee90a723541904ec82306
|
[
"MIT"
] | null | null | null |
pytorch-extension/pytorch_extension_official/cpp/perform_test.py
|
xdr940/utils
|
c4b7b1479956475a7feee90a723541904ec82306
|
[
"MIT"
] | null | null | null |
import time
from lltm.lltm import LLTM
import torch
batch_size = 16
input_features = 32
state_size = 128
X = torch.randn(batch_size, input_features)
h = torch.randn(batch_size, state_size)
C = torch.randn(batch_size, state_size)
rnn = LLTM(input_features, state_size)#net init
forward = 0
backward = 0
for _ in range(1000):
start = time.time()
new_h, new_C = rnn(X, (h, C))
forward += time.time() - start
start = time.time()
(new_h.sum() + new_C.sum()).backward()
backward += time.time() - start
print('Forward: {:.3f} us | Backward {:.3f} us'.format(forward * 1e6/1e3, backward * 1e6/1e3))
| 23.846154
| 94
| 0.675806
| 98
| 620
| 4.112245
| 0.367347
| 0.08933
| 0.111663
| 0.141439
| 0.223325
| 0.138958
| 0
| 0
| 0
| 0
| 0
| 0.045098
| 0.177419
| 620
| 26
| 94
| 23.846154
| 0.745098
| 0.012903
| 0
| 0.1
| 0
| 0
| 0.063725
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.15
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a55636a8a913811f2be1912dad1aedac22c6a849
| 1,980
|
py
|
Python
|
helper/create_functions_table.py
|
Abhisheknishant/iteration_utilities
|
b2bf8d8668ed54d1aadf8c31884fc8a7d28551cc
|
[
"Apache-2.0"
] | 72
|
2016-09-12T03:01:02.000Z
|
2022-03-05T16:54:45.000Z
|
helper/create_functions_table.py
|
Abhisheknishant/iteration_utilities
|
b2bf8d8668ed54d1aadf8c31884fc8a7d28551cc
|
[
"Apache-2.0"
] | 127
|
2016-09-14T02:07:33.000Z
|
2022-03-19T13:17:32.000Z
|
helper/create_functions_table.py
|
Abhisheknishant/iteration_utilities
|
b2bf8d8668ed54d1aadf8c31884fc8a7d28551cc
|
[
"Apache-2.0"
] | 11
|
2017-02-22T20:40:37.000Z
|
2022-03-05T16:55:40.000Z
|
# Licensed under Apache License Version 2.0 - see LICENSE
"""This is a helper that prints the content of the function overview tables .
- docs/index.rst
- README.rst
Both contain a table of functions defined in iteration_utilities and
manually updating them is a pain. Therefore this file can be executed and the
contents can be copy pasted there. Just use::
>>> python helper/create_functions_table.py
Unfortunately the header lines of these tables have to be removed manually,
I haven't found a way to remove them programmatically using the
astropy.io.ascii.RST class.
It's actually important to call this helper from the main repo directory
so the file resolution works correctly.
"""
def _create_overview_table(repo_path, readme=False):
"""Creates an RST table to insert in the "Readme.rst" file for the
complete overview of the package.
Requires `astropy`!
"""
from iteration_utilities import Iterable
from astropy.table import Table
from astropy.io.ascii import RST
import pathlib
p = pathlib.Path(repo_path).joinpath('docs', 'generated')
funcs = sorted([file.name.split('.rst')[0] for file in p.glob('*.rst')],
key=str.lower)
if readme:
rtd_link = ('`{0} <https://iteration-utilities.readthedocs.io/'
'en/latest/generated/{0}.html>`_')
else:
rtd_link = ':py:func:`~iteration_utilities.{0}`'
it = (Iterable(funcs)
# Create a Sphinx link from function name and module
.map(rtd_link.format)
# Group into 4s so we get a 4 column Table
.grouper(4, fillvalue='')
# Convert to list because Table expects it.
.as_list())
print('\n'.join(RST().write(Table(rows=it))))
if __name__ == '__main__':
import pathlib
repo_path = pathlib.Path.cwd()
_create_overview_table(repo_path=repo_path, readme=False)
print('\n\n\n')
_create_overview_table(repo_path=repo_path, readme=True)
| 32.459016
| 77
| 0.685354
| 285
| 1,980
| 4.642105
| 0.498246
| 0.042328
| 0.043084
| 0.052154
| 0.082389
| 0.06198
| 0.06198
| 0.06198
| 0
| 0
| 0
| 0.005799
| 0.216162
| 1,980
| 60
| 78
| 33
| 0.846649
| 0.475253
| 0
| 0.083333
| 0
| 0
| 0.151335
| 0.065282
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.208333
| 0
| 0.25
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a557d896bbb7713624a8d9ae1db240388f2eb7f7
| 1,785
|
py
|
Python
|
MyWriter/testdragdrop.py
|
haha517/mywriter
|
8ddd5ce3b2f31491480dee9beb7367c8d6182282
|
[
"MIT"
] | null | null | null |
MyWriter/testdragdrop.py
|
haha517/mywriter
|
8ddd5ce3b2f31491480dee9beb7367c8d6182282
|
[
"MIT"
] | null | null | null |
MyWriter/testdragdrop.py
|
haha517/mywriter
|
8ddd5ce3b2f31491480dee9beb7367c8d6182282
|
[
"MIT"
] | null | null | null |
import sys
import os
from PyQt4 import QtGui, QtCore
class TestListView(QtGui.QListWidget):
def __init__(self, type, parent=None):
super(TestListView, self).__init__(parent)
self.setAcceptDrops(True)
self.setIconSize(QtCore.QSize(72, 72))
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dragMoveEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls:
event.setDropAction(QtCore.Qt.CopyAction)
event.accept()
links = []
for url in event.mimeData().urls():
links.append(str(url.toLocalFile()))
self.emit(QtCore.SIGNAL("dropped"), links)
else:
event.ignore()
class MainForm(QtGui.QMainWindow):
def __init__(self, parent=None):
super(MainForm, self).__init__(parent)
self.view = TestListView(self)
self.connect(self.view, QtCore.SIGNAL("dropped"), self.pictureDropped)
self.setCentralWidget(self.view)
def pictureDropped(self, l):
for url in l:
if os.path.exists(url):
print(url)
icon = QtGui.QIcon(url)
pixmap = icon.pixmap(72, 72)
icon = QtGui.QIcon(pixmap)
item = QtGui.QListWidgetItem(url, self.view)
item.setIcon(icon)
item.setStatusTip(url)
def main():
app = QtGui.QApplication(sys.argv)
form = MainForm()
form.show()
app.exec_()
if __name__ == '__main__':
main()
| 28.790323
| 78
| 0.576471
| 188
| 1,785
| 5.340426
| 0.37766
| 0.051793
| 0.032869
| 0.047809
| 0.233068
| 0.233068
| 0.191235
| 0.155378
| 0.155378
| 0.155378
| 0
| 0.007276
| 0.307003
| 1,785
| 61
| 79
| 29.262295
| 0.804365
| 0
| 0
| 0.269231
| 0
| 0
| 0.012325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.134615
| false
| 0
| 0.057692
| 0
| 0.230769
| 0.019231
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|