hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
416d556c37282091270ab442f9cc0c1e665b8116
| 2,548
|
py
|
Python
|
jacdac/proto_test/constants.py
|
microsoft/jacdac-python
|
712ad5559e29065f5eccb5dbfe029c039132df5a
|
[
"MIT"
] | 1
|
2022-02-15T21:30:36.000Z
|
2022-02-15T21:30:36.000Z
|
jacdac/proto_test/constants.py
|
microsoft/jacdac-python
|
712ad5559e29065f5eccb5dbfe029c039132df5a
|
[
"MIT"
] | null | null | null |
jacdac/proto_test/constants.py
|
microsoft/jacdac-python
|
712ad5559e29065f5eccb5dbfe029c039132df5a
|
[
"MIT"
] | 1
|
2022-02-08T19:32:45.000Z
|
2022-02-08T19:32:45.000Z
|
# Autogenerated constants for Protocol Test service
from jacdac.constants import *
JD_SERVICE_CLASS_PROTO_TEST = const(0x16c7466a)
JD_PROTO_TEST_REG_RW_BOOL = const(0x81)
JD_PROTO_TEST_REG_RO_BOOL = const(0x181)
JD_PROTO_TEST_REG_RW_U32 = const(0x82)
JD_PROTO_TEST_REG_RO_U32 = const(0x182)
JD_PROTO_TEST_REG_RW_I32 = const(0x83)
JD_PROTO_TEST_REG_RO_I32 = const(0x183)
JD_PROTO_TEST_REG_RW_STRING = const(0x84)
JD_PROTO_TEST_REG_RO_STRING = const(0x184)
JD_PROTO_TEST_REG_RW_BYTES = const(0x85)
JD_PROTO_TEST_REG_RO_BYTES = const(0x185)
JD_PROTO_TEST_REG_RW_I8_U8_U16_I32 = const(0x86)
JD_PROTO_TEST_REG_RO_I8_U8_U16_I32 = const(0x186)
JD_PROTO_TEST_REG_RW_U8_STRING = const(0x87)
JD_PROTO_TEST_REG_RO_U8_STRING = const(0x187)
JD_PROTO_TEST_EV_E_BOOL = const(0x81)
JD_PROTO_TEST_EV_E_U32 = const(0x82)
JD_PROTO_TEST_EV_E_I32 = const(0x83)
JD_PROTO_TEST_EV_E_STRING = const(0x84)
JD_PROTO_TEST_EV_E_BYTES = const(0x85)
JD_PROTO_TEST_EV_E_I8_U8_U16_I32 = const(0x86)
JD_PROTO_TEST_EV_E_U8_STRING = const(0x87)
JD_PROTO_TEST_CMD_C_BOOL = const(0x81)
JD_PROTO_TEST_CMD_C_U32 = const(0x82)
JD_PROTO_TEST_CMD_C_I32 = const(0x83)
JD_PROTO_TEST_CMD_C_STRING = const(0x84)
JD_PROTO_TEST_CMD_C_BYTES = const(0x85)
JD_PROTO_TEST_CMD_C_I8_U8_U16_I32 = const(0x86)
JD_PROTO_TEST_CMD_C_U8_STRING = const(0x87)
JD_PROTO_TEST_CMD_C_REPORT_PIPE = const(0x90)
JD_PROTO_TEST_PACK_FORMATS = {
JD_PROTO_TEST_REG_RW_BOOL: "u8",
JD_PROTO_TEST_REG_RO_BOOL: "u8",
JD_PROTO_TEST_REG_RW_U32: "u32",
JD_PROTO_TEST_REG_RO_U32: "u32",
JD_PROTO_TEST_REG_RW_I32: "i32",
JD_PROTO_TEST_REG_RO_I32: "i32",
JD_PROTO_TEST_REG_RW_STRING: "s",
JD_PROTO_TEST_REG_RO_STRING: "s",
JD_PROTO_TEST_REG_RW_BYTES: "b",
JD_PROTO_TEST_REG_RO_BYTES: "b",
JD_PROTO_TEST_REG_RW_I8_U8_U16_I32: "i8 u8 u16 i32",
JD_PROTO_TEST_REG_RO_I8_U8_U16_I32: "i8 u8 u16 i32",
JD_PROTO_TEST_REG_RW_U8_STRING: "u8 s",
JD_PROTO_TEST_REG_RO_U8_STRING: "u8 s",
JD_PROTO_TEST_EV_E_BOOL: "u8",
JD_PROTO_TEST_EV_E_U32: "u32",
JD_PROTO_TEST_EV_E_I32: "i32",
JD_PROTO_TEST_EV_E_STRING: "s",
JD_PROTO_TEST_EV_E_BYTES: "b",
JD_PROTO_TEST_EV_E_I8_U8_U16_I32: "i8 u8 u16 i32",
JD_PROTO_TEST_EV_E_U8_STRING: "u8 s",
JD_PROTO_TEST_CMD_C_BOOL: "u8",
JD_PROTO_TEST_CMD_C_U32: "u32",
JD_PROTO_TEST_CMD_C_I32: "i32",
JD_PROTO_TEST_CMD_C_STRING: "s",
JD_PROTO_TEST_CMD_C_BYTES: "b",
JD_PROTO_TEST_CMD_C_I8_U8_U16_I32: "i8 u8 u16 i32",
JD_PROTO_TEST_CMD_C_U8_STRING: "u8 s",
JD_PROTO_TEST_CMD_C_REPORT_PIPE: "b[12]"
}
| 39.8125
| 56
| 0.800628
| 508
| 2,548
| 3.362205
| 0.100394
| 0.316159
| 0.379977
| 0.229508
| 0.879391
| 0.869438
| 0.342506
| 0.259368
| 0.259368
| 0.076112
| 0
| 0.106081
| 0.115777
| 2,548
| 63
| 57
| 40.444444
| 0.65202
| 0.019231
| 0
| 0
| 1
| 0
| 0.045254
| 0
| 0
| 0
| 0.053264
| 0
| 0
| 1
| 0
| false
| 0
| 0.016129
| 0
| 0.016129
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
418ec026fe4b1b292324600d0e892521d935a4fa
| 23
|
py
|
Python
|
vibora/request/__init__.py
|
brettcannon/vibora
|
1933b631d4df62e7d748016f7463ab746d4695cc
|
[
"MIT"
] | 2,177
|
2015-01-02T09:56:51.000Z
|
2022-03-27T01:48:37.000Z
|
vibora/request/__init__.py
|
brettcannon/vibora
|
1933b631d4df62e7d748016f7463ab746d4695cc
|
[
"MIT"
] | 93
|
2019-12-07T01:43:50.000Z
|
2021-08-01T13:30:44.000Z
|
vibora/request/__init__.py
|
brettcannon/vibora
|
1933b631d4df62e7d748016f7463ab746d4695cc
|
[
"MIT"
] | 516
|
2015-01-02T18:48:29.000Z
|
2022-01-26T07:12:35.000Z
|
from .request import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
41a18b602a2fe9bf9b451e29bc6d852c9ab66ec9
| 50
|
py
|
Python
|
models/__init__.py
|
mengjian0502/TorchInference_SRAM
|
fcc465c73b79f2ab670b6af03aa53f9bb47c64ca
|
[
"MIT"
] | 1
|
2022-02-28T03:51:11.000Z
|
2022-02-28T03:51:11.000Z
|
models/__init__.py
|
mengjian0502/TorchInference_SRAM
|
fcc465c73b79f2ab670b6af03aa53f9bb47c64ca
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
mengjian0502/TorchInference_SRAM
|
fcc465c73b79f2ab670b6af03aa53f9bb47c64ca
|
[
"MIT"
] | null | null | null |
from .cnn import CNN
from .sram_cnn import SRAMCNN
| 25
| 29
| 0.82
| 9
| 50
| 4.444444
| 0.555556
| 0.45
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.14
| 50
| 2
| 29
| 25
| 0.930233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ec015e1be422fd69e31a08ddca494d7b1c7c7423
| 42,893
|
py
|
Python
|
qpid_tests/broker_0_10/msg_groups.py
|
martpat/qpid-python
|
900baa9bfab0f9b348c517fe1dac3cc6e8f26b29
|
[
"Apache-2.0"
] | 17
|
2016-09-04T03:34:10.000Z
|
2021-11-09T22:29:30.000Z
|
qpid_tests/broker_0_10/msg_groups.py
|
martpat/qpid-python
|
900baa9bfab0f9b348c517fe1dac3cc6e8f26b29
|
[
"Apache-2.0"
] | 1
|
2016-07-11T09:32:41.000Z
|
2016-07-11T09:32:41.000Z
|
qpid_tests/broker_0_10/msg_groups.py
|
martpat/qpid-python
|
900baa9bfab0f9b348c517fe1dac3cc6e8f26b29
|
[
"Apache-2.0"
] | 19
|
2016-07-11T09:23:50.000Z
|
2021-11-09T22:29:20.000Z
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from qpid.messaging import *
from qpid.tests.messaging import Base
import qmf.console
from time import sleep
#
# Tests the Broker's support for message groups
#
class MultiConsumerMsgGroupTests(Base):
"""
Tests for the behavior of multi-consumer message groups. These tests allow
a messages from the same group be consumed by multiple different clients as
long as each message is processed "in sequence". See QPID-3346 for
details.
"""
def setup_connection(self):
return Connection.establish(self.broker, **self.connection_options())
def setup_session(self):
return self.conn.session()
def test_simple(self):
""" Verify simple acquire/accept actions on a set of grouped
messages shared between two receivers.
"""
## Create a msg group queue
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","A","A","B","B","B","C","C","C"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
## Queue = a-0, a-1, a-2, b-3, b-4, b-5, c-6, c-7, c-8...
## Owners= ---, ---, ---, ---, ---, ---, ---, ---, ---,
# create consumers on separate sessions: C1,C2
s1 = self.setup_session()
c1 = s1.receiver("msg-group-q", options={"capacity":0})
s2 = self.setup_session()
c2 = s2.receiver("msg-group-q", options={"capacity":0})
# C1 should acquire A-0, then C2 should acquire B-3
m1 = c1.fetch(0);
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
m2 = c2.fetch(0);
assert m2.properties['THE-GROUP'] == 'B'
assert m2.content['index'] == 3
# C1 Acknowledge A-0
c1.session.acknowledge(m1);
# C2 should next acquire A-1
m3 = c2.fetch(0);
assert m3.properties['THE-GROUP'] == 'A'
assert m3.content['index'] == 1
# C1 should next acquire C-6, since groups A&B are held by c2
m4 = c1.fetch(0);
assert m4.properties['THE-GROUP'] == 'C'
assert m4.content['index'] == 6
## Queue = XXX, a-1, a-2, b-3, b-4, b-5, c-6, c-7, c-8...
## Owners= ---, ^C2, +C2, ^C2, +C2, +C2, ^C1, +C1, +C1,
# C2 Acknowledge B-3, freeing up the rest of B group
c2.session.acknowledge(m2);
## Queue = XXX, a-1, a-2, XXX, b-4, b-5, c-6, c-7, c-8...
## Owners= ---, ^C2, +C2, ---, ---, ---, ^C1, +C1, +C1,
# C1 should now acquire B-4, since it is next "free"
m5 = c1.fetch(0);
assert m5.properties['THE-GROUP'] == 'B'
assert m5.content['index'] == 4
## Queue = XXX, a-1, a-2, XXX, b-4, b-5, c-6, c-7, c-8...
## Owners= ---, ^C2, +C2, ---, ^C1, +C1, ^C1, +C1, +C1,
# C1 acknowledges C-6, freeing the C group
c1.session.acknowledge(m4)
## Queue = XXX, a-1, a-2, XXX, b-4, b-5, XXX, c-7, c-8...
## Owners= ---, ^C2, +C2, ---, ^C1, +C1, ---, ---, ---
# C2 should next fetch A-2, followed by C-7
m7 = c2.fetch(0);
assert m7.properties['THE-GROUP'] == 'A'
assert m7.content['index'] == 2
m8 = c2.fetch(0);
assert m8.properties['THE-GROUP'] == 'C'
assert m8.content['index'] == 7
## Queue = XXX, a-1, a-2, XXX, b-4, b-5, XXX, c-7, c-8...
## Owners= ---, ^C2, ^C2, ---, ^C1, +C1, ---, ^C2, +C2
# have C2 ack all fetched messages, freeing C-8
c2.session.acknowledge()
## Queue = XXX, XXX, XXX, XXX, b-4, b-5, XXX, XXX, c-8...
## Owners= ---, ---, ---, ---, ^C1, +C1, ---, ---, ---
# the next fetch of C2 would get C-8, since B-5 is "owned"
m9 = c2.fetch(0);
assert m9.properties['THE-GROUP'] == 'C'
assert m9.content['index'] == 8
## Queue = XXX, XXX, XXX, XXX, b-4, b-5, XXX, XXX, c-8...
## Owners= ---, ---, ---, ---, ^C1, +C1, ---, ---, ^C2
# C1 acks B-4, freeing B-5 for consumption
c1.session.acknowledge(m5)
## Queue = XXX, XXX, XXX, XXX, XXX, b-5, XXX, XXX, c-8...
## Owners= ---, ---, ---, ---, ---, ^C2, ---, ---, ^C2
# the next fetch of C2 would get B-5
m10 = c2.fetch(0);
assert m10.properties['THE-GROUP'] == 'B'
assert m10.content['index'] == 5
# there should be no more left for C1:
try:
mx = c1.fetch(0)
assert False # should never get here
except Empty:
pass
c1.session.acknowledge()
c2.session.acknowledge()
c1.close()
c2.close()
snd.close()
def test_simple_browse(self):
""" Test the behavior of a browsing subscription on a message grouping
queue.
"""
## Create a msg group queue
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","B","A","B","C"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
## Queue = A-0, B-1, A-2, b-3, C-4
## Owners= ---, ---, ---, ---, ---
# create consumer and browser
s1 = self.setup_session()
c1 = s1.receiver("msg-group-q", options={"capacity":0})
s2 = self.setup_session()
b1 = s2.receiver("msg-group-q; {mode: browse}", options={"capacity":0})
m2 = b1.fetch(0);
assert m2.properties['THE-GROUP'] == 'A'
assert m2.content['index'] == 0
# C1 should acquire A-0
m1 = c1.fetch(0);
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
## Queue = A-0, B-1, A-2, b-3, C-4
## Owners= ^C1, ---, +C1, ---, ---
m2 = b1.fetch(0)
assert m2.properties['THE-GROUP'] == 'B'
assert m2.content['index'] == 1
# verify that the browser may see A-2, even though its group is owned
# by C1
m2 = b1.fetch(0)
assert m2.properties['THE-GROUP'] == 'A'
assert m2.content['index'] == 2
m2 = b1.fetch(0)
assert m2.properties['THE-GROUP'] == 'B'
assert m2.content['index'] == 3
# verify the consumer can own groups currently seen by the browser
m3 = c1.fetch(0);
assert m3.properties['THE-GROUP'] == 'B'
assert m3.content['index'] == 1
m2 = b1.fetch(0)
assert m2.properties['THE-GROUP'] == 'C'
assert m2.content['index'] == 4
def test_release(self):
""" Verify releasing a message can free its assocated group
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","A","B","B"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
s1 = self.setup_session()
c1 = s1.receiver("msg-group-q", options={"capacity":0})
s2 = self.setup_session()
c2 = s2.receiver("msg-group-q", options={"capacity":0})
m1 = c1.fetch(0)
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
m2 = c2.fetch(0)
assert m2.properties['THE-GROUP'] == 'B'
assert m2.content['index'] == 2
# C1 release m1, and the first group
s1.acknowledge(m1, Disposition(RELEASED, set_redelivered=True))
# C2 should be able to get group 'A', msg 'A-0' now
m2 = c2.fetch(0)
assert m2.properties['THE-GROUP'] == 'A'
assert m2.content['index'] == 0
def test_reject(self):
""" Verify rejecting a message can free its associated group
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","A","B","B"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
s1 = self.setup_session()
c1 = s1.receiver("msg-group-q", options={"capacity":0})
s2 = self.setup_session()
c2 = s2.receiver("msg-group-q", options={"capacity":0})
m1 = c1.fetch(0)
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
m2 = c2.fetch(0)
assert m2.properties['THE-GROUP'] == 'B'
assert m2.content['index'] == 2
# C1 rejects m1, and the first group is released
s1.acknowledge(m1, Disposition(REJECTED))
# C2 should be able to get group 'A', msg 'A-1' now
m2 = c2.fetch(0)
assert m2.properties['THE-GROUP'] == 'A'
assert m2.content['index'] == 1
def test_close(self):
""" Verify behavior when a consumer that 'owns' a group closes.
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","A","B","B"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
s1 = self.setup_session()
c1 = s1.receiver("msg-group-q", options={"capacity":0})
s2 = self.setup_session()
c2 = s2.receiver("msg-group-q", options={"capacity":0})
# C1 will own group A
m1 = c1.fetch(0)
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
# C2 will own group B
m2 = c2.fetch(0)
assert m2.properties['THE-GROUP'] == 'B'
assert m2.content['index'] == 2
# C1 shuffles off the mortal coil...
c1.close();
# but the session (s1) remains active, so "A" remains blocked
# from c2, c2 should fetch the next B-3
m2 = c2.fetch(0)
assert m2.properties['THE-GROUP'] == 'B'
assert m2.content['index'] == 3
# and there should be no more messages available for C2
try:
m2 = c2.fetch(0)
assert False # should never get here
except Empty:
pass
# close session s1, releasing the A group
s1.close()
m2 = c2.fetch(0)
assert m2.properties['THE-GROUP'] == 'A'
assert m2.content['index'] == 0
m2 = c2.fetch(0)
assert m2.properties['THE-GROUP'] == 'A'
assert m2.content['index'] == 1
# and there should be no more messages now
try:
m2 = c2.fetch(0)
assert False # should never get here
except Empty:
pass
def test_transaction(self):
""" Verify behavior when using transactions.
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","A","B","B","A","B"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
s1 = self.conn.session(transactional=True)
c1 = s1.receiver("msg-group-q", options={"capacity":0})
s2 = self.conn.session(transactional=True)
c2 = s2.receiver("msg-group-q", options={"capacity":0})
# C1 gets group A
m1 = c1.fetch(0)
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
# C2 gets group B
m2 = c2.fetch(0)
assert m2.properties['THE-GROUP'] == 'B'
assert m2.content['index'] == 2
s1.acknowledge(m1) # A-0 consumed, A group freed
s2.acknowledge(m2) # B-2 consumed, B group freed
s1.commit() # A-0 consumption done, A group now free
s2.rollback() # releases B-2, and group B
## Q: ["A1","B2","B3","A4","B5"]
# C2 should be able to get the next A
m3 = c2.fetch(0)
assert m3.properties['THE-GROUP'] == 'A'
assert m3.content['index'] == 1
# C1 should be able to get B-2
m4 = c1.fetch(0)
assert m4.properties['THE-GROUP'] == 'B'
assert m4.content['index'] == 2
s2.acknowledge(m3) # C2 consumes A-1
s1.acknowledge(m4) # C1 consumes B-2
s1.commit() # C1 consume B-2 occurs, free group B
## Q: [["A1",]"B3","A4","B5"]
# A-1 is still considered owned by C2, since the commit has yet to
# occur, so the next available to C1 would be B-3
m5 = c1.fetch(0) # B-3
assert m5.properties['THE-GROUP'] == 'B'
assert m5.content['index'] == 3
# and C2 should find A-4 available, since it owns the A group
m6 = c2.fetch(0) # A-4
assert m6.properties['THE-GROUP'] == 'A'
assert m6.content['index'] == 4
s2.acknowledge(m6) # C2 consumes A-4
# uh-oh, A-1 and A-4 released, along with A group
s2.rollback()
## Q: ["A1",["B3"],"A4","B5"]
m7 = c1.fetch(0) # A-1 is found
assert m7.properties['THE-GROUP'] == 'A'
assert m7.content['index'] == 1
## Q: [["A1"],["B3"],"A4","B5"]
# since C1 "owns" both A and B group, C2 should find nothing available
try:
m8 = c2.fetch(0)
assert False # should not get here
except Empty:
pass
# C1 next gets A4
m9 = c1.fetch(0)
assert m9.properties['THE-GROUP'] == 'A'
assert m9.content['index'] == 4
s1.acknowledge()
## Q: [["A1"],["B3"],["A4"],"B5"]
# even though C1 acknowledges A1,B3, and A4, B5 is still considered
# owned as the commit has yet to take place
try:
m10 = c2.fetch(0)
assert False # should not get here
except Empty:
pass
# now A1,B3,A4 dequeued, B5 should be free
s1.commit()
## Q: ["B5"]
m11 = c2.fetch(0)
assert m11.properties['THE-GROUP'] == 'B'
assert m11.content['index'] == 5
s2.acknowledge()
s2.commit()
def test_send_transaction(self):
""" Verify behavior when sender is using transactions.
"""
ssn = self.conn.session(transactional=True)
snd = ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
msg = Message(content={'index':0}, properties={"THE-GROUP": "A"})
snd.send(msg)
msg = Message(content={'index':1}, properties={"THE-GROUP": "B"})
snd.send(msg)
snd.session.commit()
msg = Message(content={'index':2}, properties={"THE-GROUP": "A"})
snd.send(msg)
# Queue: [A0,B1, (uncommitted: A2) ]
s1 = self.conn.session(transactional=True)
c1 = s1.receiver("msg-group-q", options={"capacity":0})
s2 = self.conn.session(transactional=True)
c2 = s2.receiver("msg-group-q", options={"capacity":0})
# C1 gets A0, group A
m1 = c1.fetch(0)
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
# C2 gets B2, group B
m2 = c2.fetch(0)
assert m2.properties['THE-GROUP'] == 'B'
assert m2.content['index'] == 1
# Since A2 uncommitted, there should be nothing left to fetch
try:
mX = c1.fetch(0)
assert False # should not get here
except Empty:
pass
try:
mX = c2.fetch(0)
assert False # should not get here
except Empty:
pass
snd.session.commit()
msg = Message(content={'index':3}, properties={"THE-GROUP": "B"})
snd.send(msg)
# Queue: [A2, (uncommitted: B3) ]
# B3 has yet to be committed, so C2 should see nothing available:
try:
mX = c2.fetch(0)
assert False # should not get here
except Empty:
pass
# but A2 should be available to C1
m3 = c1.fetch(0)
assert m3.properties['THE-GROUP'] == 'A'
assert m3.content['index'] == 2
# now make B3 available
snd.session.commit()
# C1 should still be done:
try:
mX = c1.fetch(0)
assert False # should not get here
except Empty:
pass
# but C2 should find the new B
m4 = c2.fetch(0)
assert m4.properties['THE-GROUP'] == 'B'
assert m4.content['index'] == 3
# extra: have C1 rollback, verify C2 finds the released 'A' messages
c1.session.rollback()
## Q: ["A0","A2"]
# C2 should be able to get the next A
m5 = c2.fetch(0)
assert m5.properties['THE-GROUP'] == 'A'
assert m5.content['index'] == 0
m6 = c2.fetch(0)
assert m6.properties['THE-GROUP'] == 'A'
assert m6.content['index'] == 2
c2.session.acknowledge()
c2.session.commit()
def test_query(self):
""" Verify the queue query method against message groups
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","B","C","A","B","C","A"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
s1 = self.setup_session()
c1 = s1.receiver("msg-group-q", options={"capacity":0})
s2 = self.setup_session()
c2 = s2.receiver("msg-group-q", options={"capacity":0})
m1 = c1.fetch(0)
m2 = c2.fetch(0)
# at this point, group A should be owned by C1, group B by C2, and
# group C should be available
# now setup a QMF session, so we can call methods
self.qmf_session = qmf.console.Session()
self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
brokers = self.qmf_session.getObjects(_class="broker")
assert len(brokers) == 1
broker = brokers[0]
# verify the query method call's group information
rc = broker.query("queue", "msg-group-q")
assert rc.status == 0
assert rc.text == "OK"
results = rc.outArgs['results']
assert 'qpid.message_group_queue' in results
q_info = results['qpid.message_group_queue']
assert 'group_header_key' in q_info and q_info['group_header_key'] == "THE-GROUP"
assert 'group_state' in q_info and len(q_info['group_state']) == 3
for g_info in q_info['group_state']:
assert 'group_id' in g_info
if g_info['group_id'] == "A":
assert g_info['msg_count'] == 3
assert g_info['consumer'] != ""
elif g_info['group_id'] == "B":
assert g_info['msg_count'] == 2
assert g_info['consumer'] != ""
elif g_info['group_id'] == "C":
assert g_info['msg_count'] == 2
assert g_info['consumer'] == ""
else:
assert(False) # should never get here
self.qmf_session.delBroker(self.qmf_broker)
def test_purge_free(self):
""" Verify we can purge a queue of all messages of a given "unowned"
group.
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","B","A","B","C","A"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
# now setup a QMF session, so we can call methods
self.qmf_session = qmf.console.Session()
self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
queue = self.qmf_session.getObjects(_class="queue", name="msg-group-q")[0]
assert queue
msg_filter = { 'filter_type' : 'header_match_str',
'filter_params' : { 'header_key' : "THE-GROUP",
'header_value' : "B" }}
assert queue.msgDepth == 6
rc = queue.purge(0, msg_filter)
assert rc.status == 0
queue.update()
assert queue.msgDepth == 4
# verify all B's removed....
s2 = self.setup_session()
b1 = s2.receiver("msg-group-q; {mode: browse}", options={"capacity":0})
count = 0
try:
while True:
m2 = b1.fetch(0)
assert m2.properties['THE-GROUP'] != 'B'
count += 1
except Empty:
pass
assert count == 4
self.qmf_session.delBroker(self.qmf_broker)
def test_purge_acquired(self):
""" Verify we can purge messages from an acquired group.
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","B","A","B","C","A"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
# acquire group "A"
s1 = self.setup_session()
c1 = s1.receiver("msg-group-q", options={"capacity":0})
m1 = c1.fetch(0)
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
# now setup a QMF session, so we can purge group A
self.qmf_session = qmf.console.Session()
self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
queue = self.qmf_session.getObjects(_class="queue", name="msg-group-q")[0]
assert queue
msg_filter = { 'filter_type' : 'header_match_str',
'filter_params' : { 'header_key' : "THE-GROUP",
'header_value' : "A" }}
assert queue.msgDepth == 6
rc = queue.purge(0, msg_filter)
assert rc.status == 0
queue.update()
queue.msgDepth == 4 # the pending acquired A still counts!
s1.acknowledge()
# verify all other A's removed....
s2 = self.setup_session()
b1 = s2.receiver("msg-group-q; {mode: browse}", options={"capacity":0})
count = 0
try:
while True:
m2 = b1.fetch(0)
assert m2.properties['THE-GROUP'] != 'A'
count += 1
except Empty:
pass
assert count == 3 # only 3 really available
s1.acknowledge() # ack the consumed A-0
self.qmf_session.delBroker(self.qmf_broker)
def test_purge_count(self):
""" Verify we can purge a fixed number of messages from an acquired
group.
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","B","A","B","C","A"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
# acquire group "A"
s1 = self.setup_session()
c1 = s1.receiver("msg-group-q", options={"capacity":0})
m1 = c1.fetch(0)
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
# now setup a QMF session, so we can purge group A
self.qmf_session = qmf.console.Session()
self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
queue = self.qmf_session.getObjects(_class="queue", name="msg-group-q")[0]
assert queue
msg_filter = { 'filter_type' : 'header_match_str',
'filter_params' : { 'header_key' : "THE-GROUP",
'header_value' : "A" }}
assert queue.msgDepth == 6
rc = queue.purge(1, msg_filter)
assert rc.status == 0
queue.update()
queue.msgDepth == 5 # the pending acquired A still counts!
# verify all other A's removed....
s2 = self.setup_session()
b1 = s2.receiver("msg-group-q; {mode: browse}", options={"capacity":0})
count = 0
a_count = 0
try:
while True:
m2 = b1.fetch(0)
if m2.properties['THE-GROUP'] != 'A':
count += 1
else:
a_count += 1
except Empty:
pass
assert count == 3 # non-A's
assert a_count == 1 # assumes the acquired message was not the one purged and regular browsers don't get acquired messages
s1.acknowledge() # ack the consumed A-0
self.qmf_session.delBroker(self.qmf_broker)
def test_move_all(self):
""" Verify we can move messages from an acquired group.
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","B","A","B","C","A"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
# set up destination queue
rcvr = self.ssn.receiver("dest-q; {create:always, delete:receiver," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
# acquire group "A"
s1 = self.setup_session()
c1 = s1.receiver("msg-group-q", options={"capacity":0})
m1 = c1.fetch(0)
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
# now setup a QMF session, so we can move what's left of group A
self.qmf_session = qmf.console.Session()
self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
brokers = self.qmf_session.getObjects(_class="broker")
assert len(brokers) == 1
broker = brokers[0]
msg_filter = { 'filter_type' : 'header_match_str',
'filter_params' : { 'header_key' : "THE-GROUP",
'header_value' : "A" }}
rc = broker.queueMoveMessages("msg-group-q", "dest-q", 0, msg_filter)
assert rc.status == 0
# verify all other A's removed from msg-group-q
s2 = self.setup_session()
b1 = s2.receiver("msg-group-q", options={"capacity":0})
count = 0
try:
while True:
m2 = b1.fetch(0)
assert m2.properties['THE-GROUP'] != 'A'
count += 1
except Empty:
pass
assert count == 3 # only 3 really available
# verify the moved A's are at the dest-q
s2 = self.setup_session()
b1 = s2.receiver("dest-q; {mode: browse}", options={"capacity":0})
count = 0
try:
while True:
m2 = b1.fetch(0)
assert m2.properties['THE-GROUP'] == 'A'
assert m2.content['index'] == 2 or m2.content['index'] == 5
count += 1
except Empty:
pass
assert count == 2 # two A's moved
s1.acknowledge() # ack the consumed A-0
self.qmf_session.delBroker(self.qmf_broker)
def test_move_count(self):
""" Verify we can move a fixed number of messages from an acquired group.
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","B","A","B","C","A"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
# set up destination queue
rcvr = self.ssn.receiver("dest-q; {create:always, delete:receiver," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
# now setup a QMF session, so we can move group B
self.qmf_session = qmf.console.Session()
self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
brokers = self.qmf_session.getObjects(_class="broker")
assert len(brokers) == 1
broker = brokers[0]
msg_filter = { 'filter_type' : 'header_match_str',
'filter_params' : { 'header_key' : "THE-GROUP",
'header_value' : "B" }}
rc = broker.queueMoveMessages("msg-group-q", "dest-q", 3, msg_filter)
assert rc.status == 0
# verify all B's removed from msg-group-q
s2 = self.setup_session()
b1 = s2.receiver("msg-group-q; {mode: browse}", options={"capacity":0})
count = 0
try:
while True:
m2 = b1.fetch(0)
assert m2.properties['THE-GROUP'] != 'B'
count += 1
except Empty:
pass
assert count == 4
# verify the moved B's are at the dest-q
s2 = self.setup_session()
b1 = s2.receiver("dest-q; {mode: browse}", options={"capacity":0})
count = 0
try:
while True:
m2 = b1.fetch(0)
assert m2.properties['THE-GROUP'] == 'B'
assert m2.content['index'] == 1 or m2.content['index'] == 3
count += 1
except Empty:
pass
assert count == 2
self.qmf_session.delBroker(self.qmf_broker)
def test_reroute(self):
""" Verify we can reroute messages from an acquired group.
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","B","A","B","C","A"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
# create a topic exchange for the reroute
rcvr = self.ssn.receiver("reroute-q; {create: always, delete:receiver," +
" node: {type: topic}}")
# acquire group "A"
s1 = self.setup_session()
c1 = s1.receiver("msg-group-q", options={"capacity":0})
m1 = c1.fetch(0)
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
# now setup a QMF session, so we can reroute group A
self.qmf_session = qmf.console.Session()
self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
queue = self.qmf_session.getObjects(_class="queue", name="msg-group-q")[0]
assert queue
msg_filter = { 'filter_type' : 'header_match_str',
'filter_params' : { 'header_key' : "THE-GROUP",
'header_value' : "A" }}
assert queue.msgDepth == 6
rc = queue.reroute(0, False, "reroute-q", msg_filter)
assert rc.status == 0
queue.update()
queue.msgDepth == 4 # the pending acquired A still counts!
# verify all other A's removed....
s2 = self.setup_session()
b1 = s2.receiver("msg-group-q", options={"capacity":0})
count = 0
try:
while True:
m2 = b1.fetch(0)
assert m2.properties['THE-GROUP'] != 'A'
count += 1
except Empty:
pass
assert count == 3 # only 3 really available
# and what of reroute-q?
count = 0
try:
while True:
m2 = rcvr.fetch(0)
assert m2.properties['THE-GROUP'] == 'A'
assert m2.content['index'] == 2 or m2.content['index'] == 5
count += 1
except Empty:
pass
assert count == 2
s1.acknowledge() # ack the consumed A-0
self.qmf_session.delBroker(self.qmf_broker)
def test_queue_delete(self):
""" Test deleting a queue while consumers are active.
"""
## Create a msg group queue
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","B","A","B","C"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
## Queue = A-0, B-1, A-2, b-3, C-4
## Owners= ---, ---, ---, ---, ---
# create consumers
s1 = self.setup_session()
c1 = s1.receiver("msg-group-q", options={"capacity":0})
s2 = self.setup_session()
c2 = s2.receiver("msg-group-q", options={"capacity":0})
# C1 should acquire A-0
m1 = c1.fetch(0);
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
# c2 acquires B-1
m2 = c2.fetch(0)
assert m2.properties['THE-GROUP'] == 'B'
assert m2.content['index'] == 1
# with group A and B owned, and C free, delete the
# queue
snd.close()
self.ssn.close()
def test_default_group_id(self):
""" Verify the queue assigns the default group id should a message
arrive without a group identifier.
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
m = Message(content={}, properties={"NO-GROUP-HEADER":"HA-HA"})
snd.send(m)
# now setup a QMF session, so we can call methods
self.qmf_session = qmf.console.Session()
self.qmf_broker = self.qmf_session.addBroker(str(self.broker))
brokers = self.qmf_session.getObjects(_class="broker")
assert len(brokers) == 1
broker = brokers[0]
# grab the group state off the queue, and verify the default group is
# present ("qpid.no-group" is the broker default)
rc = broker.query("queue", "msg-group-q")
assert rc.status == 0
assert rc.text == "OK"
results = rc.outArgs['results']
assert 'qpid.message_group_queue' in results
q_info = results['qpid.message_group_queue']
assert 'group_header_key' in q_info and q_info['group_header_key'] == "THE-GROUP"
assert 'group_state' in q_info and len(q_info['group_state']) == 1
g_info = q_info['group_state'][0]
assert 'group_id' in g_info
assert g_info['group_id'] == 'qpid.no-group'
self.qmf_session.delBroker(self.qmf_broker)
def test_transaction_order(self):
""" Verify that rollback does not reorder the messages with respect to
the consumer (QPID-3804)
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","B","A"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
snd.send(m)
s1 = self.conn.session(transactional=True)
c1 = s1.receiver("msg-group-q", options={"capacity":0})
# C1 gets group A
m1 = c1.fetch(0)
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
s1.acknowledge(m1)
s1.rollback() # release A back to the queue
# the order should be preserved as follows:
m1 = c1.fetch(0)
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
m2 = c1.fetch(0)
assert m2.properties['THE-GROUP'] == 'B'
assert m2.content['index'] == 1
m3 = c1.fetch(0)
assert m3.properties['THE-GROUP'] == 'A'
assert m3.content['index'] == 2
s1.commit()
c1.close()
s1.close()
snd.close()
def test_ttl_expire(self):
""" Verify that expired (TTL) group messages are skipped correctly
"""
snd = self.ssn.sender("msg-group-q; {create:always, delete:sender," +
" node: {x-declare: {arguments:" +
" {'qpid.group_header_key':'THE-GROUP'," +
"'qpid.shared_msg_group':1}}}}")
groups = ["A","B","C","A","B","C"]
messages = [Message(content={}, properties={"THE-GROUP": g}) for g in groups]
index = 0
for m in messages:
m.content['index'] = index
index += 1
if m.properties['THE-GROUP'] == 'B':
m.ttl = 1;
snd.send(m)
sleep(2) # let all B's expire
# create consumers on separate sessions: C1,C2
s1 = self.setup_session()
c1 = s1.receiver("msg-group-q", options={"capacity":0})
s2 = self.setup_session()
c2 = s2.receiver("msg-group-q", options={"capacity":0})
# C1 should acquire A-0, then C2 should acquire C-2, Group B should
# expire and never be fetched
m1 = c1.fetch(0);
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 0
m2 = c2.fetch(0);
assert m2.properties['THE-GROUP'] == 'C'
assert m2.content['index'] == 2
m1 = c1.fetch(0);
assert m1.properties['THE-GROUP'] == 'A'
assert m1.content['index'] == 3
m2 = c2.fetch(0);
assert m2.properties['THE-GROUP'] == 'C'
assert m2.content['index'] == 5
# there should be no more left for either consumer
try:
mx = c1.fetch(0)
assert False # should never get here
except Empty:
pass
try:
mx = c2.fetch(0)
assert False # should never get here
except Empty:
pass
c1.session.acknowledge()
c2.session.acknowledge()
c1.close()
c2.close()
snd.close()
class StickyConsumerMsgGroupTests(Base):
"""
Tests for the behavior of sticky-consumer message groups. These tests
expect all messages from the same group be consumed by the same clients.
See QPID-3347 for details.
"""
pass # TBD
| 35.863712
| 130
| 0.515026
| 5,378
| 42,893
| 4.050762
| 0.073447
| 0.042231
| 0.071058
| 0.034886
| 0.772412
| 0.742575
| 0.732155
| 0.711682
| 0.700252
| 0.683406
| 0
| 0.035868
| 0.33896
| 42,893
| 1,195
| 131
| 35.893724
| 0.732454
| 0.199613
| 0
| 0.831847
| 0
| 0
| 0.174993
| 0.041835
| 0
| 0
| 0
| 0
| 0.233121
| 1
| 0.025478
| false
| 0.026752
| 0.005096
| 0.002548
| 0.035669
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ec4377316b7217f523059d544799334b134d046d
| 188
|
py
|
Python
|
construct_hou/utils.py
|
construct-org/construct_hou
|
7c323c176ce238681f64cbf61ddd7ed5a9a81100
|
[
"MIT"
] | null | null | null |
construct_hou/utils.py
|
construct-org/construct_hou
|
7c323c176ce238681f64cbf61ddd7ed5a9a81100
|
[
"MIT"
] | null | null | null |
construct_hou/utils.py
|
construct-org/construct_hou
|
7c323c176ce238681f64cbf61ddd7ed5a9a81100
|
[
"MIT"
] | 1
|
2020-02-15T12:16:21.000Z
|
2020-02-15T12:16:21.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
def show_file_open_at_startup():
'''Returns True when there will be no autosave dialog at startup.'''
return True
| 20.888889
| 72
| 0.712766
| 27
| 188
| 4.62963
| 0.851852
| 0.144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006536
| 0.18617
| 188
| 8
| 73
| 23.5
| 0.810458
| 0.452128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6b5a77292915e54ad040e0a094a98c0b61d2dafc
| 9,412
|
py
|
Python
|
Incident-Response/Tools/cyphon/cyphon/alerts/tests/test_admin.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/cyphon/cyphon/alerts/tests/test_admin.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/cyphon/cyphon/alerts/tests/test_admin.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2017-2019 ControlScan, Inc.
#
# This file is part of Cyphon Engine.
#
# Cyphon Engine is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# Cyphon Engine is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""
Tests AlertAdmin methods.
"""
# standard library
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
# third party
from django.contrib import admin
from django.test import TestCase
# local
from alerts.admin import AlertAdmin
from alerts.models import Alert
class AlertAdminTestCase(TestCase):
"""
Base class for testing the AlertAdmin class.
"""
def setUp(self):
self.modeladmin = AlertAdmin(Alert, admin.site)
self.modeladmin.message_user = Mock()
self.queryset_single = Mock()
self.queryset_single.update = Mock(return_value=1)
self.queryset_multi = Mock()
self.queryset_multi.update = Mock(return_value=2)
self.request = Mock()
def test_set_status_to_new_single(self):
"""
Tests the set_status_to_new method for one updated alert.
"""
self.modeladmin.set_status_to_new(self.request, self.queryset_single)
self.queryset_single.update.assert_called_once_with(status='NEW')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'1 alert was successfully marked as New.')
def test_set_status_to_new_multi(self):
"""
Tests the set_status_to_new method for multiple updated alerts.
"""
self.modeladmin.set_status_to_new(self.request, self.queryset_multi)
self.queryset_multi.update.assert_called_once_with(status='NEW')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'2 alerts were successfully marked as New.')
def test_set_status_to_busy_single(self):
"""
Tests the set_status_to_busy method for one updated alert.
"""
self.modeladmin.set_status_to_busy(self.request, self.queryset_single)
self.queryset_single.update.assert_called_once_with(status='BUSY')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'1 alert was successfully marked as Busy.')
def test_set_status_to_busy_multi(self):
"""
Tests the set_status_to_busy method for multiple updated alerts.
"""
self.modeladmin.set_status_to_busy(self.request, self.queryset_multi)
self.queryset_multi.update.assert_called_once_with(status='BUSY')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'2 alerts were successfully marked as Busy.')
def test_set_status_to_done_single(self):
"""
Tests the set_status_to_done method for one updated alert.
"""
self.modeladmin.set_status_to_done(self.request, self.queryset_single)
self.queryset_single.update.assert_called_once_with(status='DONE')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'1 alert was successfully marked as Done.')
def test_set_status_to_done_multi(self):
"""
Tests the set_status_to_done method for multiple updated alerts.
"""
self.modeladmin.set_status_to_done(self.request, self.queryset_multi)
self.queryset_multi.update.assert_called_once_with(status='DONE')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'2 alerts were successfully marked as Done.')
def test_set_level_to_crit_single(self):
"""
Tests the set_level_to_critical method for one updated alert.
"""
self.modeladmin.set_level_to_critical(self.request, self.queryset_single)
self.queryset_single.update.assert_called_once_with(level='CRITICAL')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'1 alert was successfully marked as critical priority.')
def test_set_level_to_crit_multi(self):
"""
Tests the set_level_to_critical method for multiple updated alerts.
"""
self.modeladmin.set_level_to_critical(self.request, self.queryset_multi)
self.queryset_multi.update.assert_called_once_with(level='CRITICAL')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'2 alerts were successfully marked as critical priority.')
def test_set_level_to_high_single(self):
"""
Tests the set_level_to_high method for one updated alert.
"""
self.modeladmin.set_level_to_high(self.request, self.queryset_single)
self.queryset_single.update.assert_called_once_with(level='HIGH')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'1 alert was successfully marked as high priority.')
def test_set_level_to_high_multi(self):
"""
Tests the set_level_to_high method for multiple updated alerts.
"""
self.modeladmin.set_level_to_high(self.request, self.queryset_multi)
self.queryset_multi.update.assert_called_once_with(level='HIGH')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'2 alerts were successfully marked as high priority.')
def test_set_level_to_medium_single(self):
"""
Tests the set_level_to_medium method for one updated alert.
"""
self.modeladmin.set_level_to_medium(self.request, self.queryset_single)
self.queryset_single.update.assert_called_once_with(level='MEDIUM')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'1 alert was successfully marked as medium priority.')
def test_set_level_to_medium_multi(self):
"""
Tests the set_level_to_medium method for multiple updated alerts.
"""
self.modeladmin.set_level_to_medium(self.request, self.queryset_multi)
self.queryset_multi.update.assert_called_once_with(level='MEDIUM')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'2 alerts were successfully marked as medium priority.')
def test_set_level_to_low_single(self):
"""
Tests the set_level_to_low method for one updated alert.
"""
self.modeladmin.set_level_to_low(self.request, self.queryset_single)
self.queryset_single.update.assert_called_once_with(level='LOW')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'1 alert was successfully marked as low priority.')
def test_set_level_to_low_multi(self):
"""
Tests the set_level_to_low method for multiple updated alerts.
"""
self.modeladmin.set_level_to_low(self.request, self.queryset_multi)
self.queryset_multi.update.assert_called_once_with(level='LOW')
self.modeladmin.message_user.assert_called_once_with(
self.request,
'2 alerts were successfully marked as low priority.')
def test_set_outcome_true_single(self):
"""
Tests the set_outcome_to_true method for one updated alert.
"""
self.modeladmin.set_outcome_to_true(self.request, self.queryset_single)
self.queryset_single.update.assert_called_once_with(outcome=True)
self.modeladmin.message_user.assert_called_once_with(
self.request,
'1 alert was successfully marked as True.')
def test_set_outcome_true_multi(self):
"""
Tests the set_outcome_to_true method for multiple updated alerts.
"""
self.modeladmin.set_outcome_to_true(self.request, self.queryset_multi)
self.queryset_multi.update.assert_called_once_with(outcome=True)
self.modeladmin.message_user.assert_called_once_with(
self.request,
'2 alerts were successfully marked as True.')
def test_set_outcome_false_single(self):
"""
Tests the set_outcome_to_false method for one updated alert.
"""
self.modeladmin.set_outcome_to_false(self.request, self.queryset_single)
self.queryset_single.update.assert_called_once_with(outcome=False)
self.modeladmin.message_user.assert_called_once_with(
self.request,
'1 alert was successfully marked as False.')
def test_set_outcome_false_multi(self):
"""
Tests the set_outcome_to_false method for multiple updated alerts.
"""
self.modeladmin.set_outcome_to_false(self.request, self.queryset_multi)
self.queryset_multi.update.assert_called_once_with(outcome=False)
self.modeladmin.message_user.assert_called_once_with(
self.request,
'2 alerts were successfully marked as False.')
| 40.921739
| 81
| 0.697726
| 1,232
| 9,412
| 5.035714
| 0.114448
| 0.077369
| 0.092843
| 0.116054
| 0.844455
| 0.833495
| 0.814313
| 0.788524
| 0.774984
| 0.681818
| 0
| 0.004108
| 0.224182
| 9,412
| 229
| 82
| 41.100437
| 0.845522
| 0.200701
| 0
| 0.288
| 0
| 0
| 0.125088
| 0
| 0
| 0
| 0
| 0
| 0.288
| 1
| 0.152
| false
| 0
| 0.056
| 0
| 0.216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6b8a040a738b4226027d6ded1815bd58e4403915
| 1,490
|
py
|
Python
|
PICO-8/games/spin/repack/varint_test.py
|
VyrCossont/Pinput
|
53acf9a0eaf1a64ae263826ea36108150ad0bfbe
|
[
"MIT"
] | 9
|
2021-10-03T06:49:23.000Z
|
2022-02-17T22:10:21.000Z
|
PICO-8/games/spin/repack/varint_test.py
|
VyrCossont/Pinput
|
53acf9a0eaf1a64ae263826ea36108150ad0bfbe
|
[
"MIT"
] | null | null | null |
PICO-8/games/spin/repack/varint_test.py
|
VyrCossont/Pinput
|
53acf9a0eaf1a64ae263826ea36108150ad0bfbe
|
[
"MIT"
] | null | null | null |
from .varint import BitVector, read_varint, varint_encoded_length, write_varint
def test_write_short():
assert write_varint(0) == BitVector(size=4, bits=0b0000)
assert write_varint(1) == BitVector(size=4, bits=0b0001)
assert write_varint(2) == BitVector(size=4, bits=0b0010)
assert write_varint(3) == BitVector(size=4, bits=0b0011)
def test_write_short_neg():
assert write_varint(-1) == BitVector(size=4, bits=0b0111)
assert write_varint(-2) == BitVector(size=4, bits=0b0110)
assert write_varint(-3) == BitVector(size=4, bits=0b0101)
def test_write_long():
assert write_varint(0xf) == BitVector(size=8, bits=0b0001_1111)
assert write_varint(0xff) == BitVector(size=12, bits=0b0011_1111_1111)
def test_write_long_neg():
assert write_varint(-0x8) == BitVector(size=8, bits=0b0111_1000)
assert write_varint(-0x80) == BitVector(size=12, bits=0b0110_1000_1000)
def test_read_short():
assert read_varint(BitVector(size=4, bits=0b0000)) == (4, 0)
assert read_varint(BitVector(size=4, bits=0b0001)) == (4, 1)
def test_read_short_neg():
assert read_varint(BitVector(size=4, bits=0b0111)) == (4, -1)
def test_read_long():
assert read_varint(BitVector(size=8, bits=0b0001_1111)) == (8, 0xf)
assert read_varint(BitVector(size=12, bits=0b0011_1111_1111)) == (12, 0xff)
def test_encoded_length():
assert varint_encoded_length(1) == 4
assert varint_encoded_length(15) == 20
assert varint_encoded_length(16) == 24
| 33.111111
| 79
| 0.720134
| 222
| 1,490
| 4.59009
| 0.18018
| 0.204122
| 0.183513
| 0.176644
| 0.541708
| 0.431796
| 0.37684
| 0.211973
| 0
| 0
| 0
| 0.13302
| 0.142282
| 1,490
| 44
| 80
| 33.863636
| 0.664319
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014094
| 0
| 0.678571
| 1
| 0.285714
| true
| 0
| 0.035714
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
6bb8dc37d081c71fc937803e18726b12ee02e423
| 33
|
py
|
Python
|
modules/retrieval/text_classification/libs/models/__init__.py
|
martinhoang11/vietnamese-ocr-toolbox
|
524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5
|
[
"Apache-2.0"
] | 14
|
2021-09-05T10:42:14.000Z
|
2022-03-10T16:27:26.000Z
|
modules/retrieval/text_classification/libs/models/__init__.py
|
martinhoang11/vietnamese-ocr-toolbox
|
524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5
|
[
"Apache-2.0"
] | 1
|
2021-06-16T11:35:24.000Z
|
2021-06-16T11:35:24.000Z
|
modules/retrieval/text_classification/libs/models/__init__.py
|
martinhoang11/vietnamese-ocr-toolbox
|
524b4908bedceb0c87b2c7cd7b5e3f6e1126ace5
|
[
"Apache-2.0"
] | 5
|
2021-09-05T13:26:51.000Z
|
2022-03-09T07:49:45.000Z
|
from .readability_models import *
| 33
| 33
| 0.848485
| 4
| 33
| 6.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 33
| 1
| 33
| 33
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6bc433dde74c0c2dea66fecad10e68bbe33f7f41
| 174
|
py
|
Python
|
src/compas/datastructures/network/__init__.py
|
gonzalocasas/compas
|
2fabc7e5c966a02d823fa453564151e1a1e7e3c6
|
[
"MIT"
] | null | null | null |
src/compas/datastructures/network/__init__.py
|
gonzalocasas/compas
|
2fabc7e5c966a02d823fa453564151e1a1e7e3c6
|
[
"MIT"
] | null | null | null |
src/compas/datastructures/network/__init__.py
|
gonzalocasas/compas
|
2fabc7e5c966a02d823fa453564151e1a1e7e3c6
|
[
"MIT"
] | null | null | null |
from .network import *
from .facenetwork import *
from .operations import *
from .network import __all__ as a
from .facenetwork import __all__ as b
__all__ = a + b
| 19.333333
| 38
| 0.718391
| 24
| 174
| 4.708333
| 0.375
| 0.265487
| 0.300885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.224138
| 174
| 8
| 39
| 21.75
| 0.837037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.833333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d44bb43710f90ad55d8d6b7a2fb5d4aa58b3da7b
| 522
|
py
|
Python
|
sqlite/interfaces.py
|
samsonosiomwan/SQL-model
|
24935e29182461af787a002c7d3bf15d7e71dc4a
|
[
"MIT"
] | null | null | null |
sqlite/interfaces.py
|
samsonosiomwan/SQL-model
|
24935e29182461af787a002c7d3bf15d7e71dc4a
|
[
"MIT"
] | null | null | null |
sqlite/interfaces.py
|
samsonosiomwan/SQL-model
|
24935e29182461af787a002c7d3bf15d7e71dc4a
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
class CrudOperationInterface(ABC):
@abstractmethod
def read_all():
pass
@abstractmethod
def create():
pass
@abstractmethod
def get_passed():
pass
@abstractmethod
def get_failed():
pass
@abstractmethod
def get_test1():
pass
@abstractmethod
def update():
pass
@abstractmethod
def destroy():
pass
class ToSQLInterface(ABC):
def convert_to_sql():
pass
| 14.108108
| 35
| 0.586207
| 48
| 522
| 6.25
| 0.4375
| 0.396667
| 0.42
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002907
| 0.340996
| 522
| 37
| 36
| 14.108108
| 0.869186
| 0
| 0
| 0.576923
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| true
| 0.346154
| 0.038462
| 0
| 0.423077
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
d44d2ea773b5f949407a8b5f38985ef467a5aa23
| 26
|
py
|
Python
|
elliot/recommender/unpersonalized/random_recommender/__init__.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 175
|
2021-03-04T15:46:25.000Z
|
2022-03-31T05:56:58.000Z
|
elliot/recommender/unpersonalized/random_recommender/__init__.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 15
|
2021-03-06T17:53:56.000Z
|
2022-03-24T17:02:07.000Z
|
elliot/recommender/unpersonalized/random_recommender/__init__.py
|
gategill/elliot
|
113763ba6d595976e14ead2e3d460d9705cd882e
|
[
"Apache-2.0"
] | 39
|
2021-03-04T15:46:26.000Z
|
2022-03-09T15:37:12.000Z
|
from .Random import Random
| 26
| 26
| 0.846154
| 4
| 26
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 26
| 1
| 26
| 26
| 0.956522
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2e092d4d4567985ffe882659d69fac72dff03f92
| 1,759
|
py
|
Python
|
request.py
|
HellsCrimson/DeepAi-Interface
|
3c95074eb1d9909eeb55c219e2b6ed26e64b3e09
|
[
"MIT"
] | null | null | null |
request.py
|
HellsCrimson/DeepAi-Interface
|
3c95074eb1d9909eeb55c219e2b6ed26e64b3e09
|
[
"MIT"
] | null | null | null |
request.py
|
HellsCrimson/DeepAi-Interface
|
3c95074eb1d9909eeb55c219e2b6ed26e64b3e09
|
[
"MIT"
] | null | null | null |
import requests
api_key = "YOUR API KEY"
def Recognition(path): # not used (return string)
if path == None:
return
r = requests.post(
"https://api.deepai.org/api/celebrity-recognition",
files={
'image': open(path, 'rb'),
},
headers={'api-key': api_key}
)
return r.json()
def SupRez(path):
if path == None:
return
r = requests.post(
"https://api.deepai.org/api/torch-srgan",
files={
'image': open(path, 'rb'),
},
headers={'api-key': api_key}
)
return r.json()
def DeepDream(path):
if path == None:
return
r = requests.post(
"https://api.deepai.org/api/deepdream",
files={
'image': open(path, 'rb'),
},
headers={'api-key': api_key}
)
return r.json()
def NeurTalk(path): # not used (return string)
if path == None:
return
r = requests.post(
"https://api.deepai.org/api/neuraltalk",
data={
'image': open(path, 'rb'),
},
headers={'api-key': api_key}
)
return r.json()
def Toonify(path):
if path == None:
return
r = requests.post(
"https://api.deepai.org/api/toonify",
files={
'image': open(path, 'rb'),
},
headers={'api-key': api_key}
)
return r.json()
def Color(path):
if path == None:
return
r = requests.post(
"https://api.deepai.org/api/colorizer",
files={
'image': open(path, 'rb'),
},
headers={'api-key': api_key}
)
return r.json()
def Waifu(path):
if path == None:
return
r = requests.post(
"https://api.deepai.org/api/waifu2x",
files={
'image': open(path, 'rb'),
},
headers={'api-key': api_key}
)
return r.json()
| 20.218391
| 55
| 0.534395
| 220
| 1,759
| 4.236364
| 0.172727
| 0.103004
| 0.075107
| 0.120172
| 0.828326
| 0.828326
| 0.828326
| 0.828326
| 0.828326
| 0.828326
| 0
| 0.000799
| 0.288232
| 1,759
| 87
| 56
| 20.218391
| 0.74361
| 0.027857
| 0
| 0.607595
| 0
| 0
| 0.218384
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088608
| false
| 0
| 0.012658
| 0
| 0.278481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cf4a571824e703e8dee7d972d1b977281408c11a
| 3,120
|
py
|
Python
|
alignholo.py
|
slimpotatoes/Holo_EM_2_keVs
|
e17a773092ff18b429ebd46ca380478d9fb3b819
|
[
"BSD-3-Clause"
] | null | null | null |
alignholo.py
|
slimpotatoes/Holo_EM_2_keVs
|
e17a773092ff18b429ebd46ca380478d9fb3b819
|
[
"BSD-3-Clause"
] | null | null | null |
alignholo.py
|
slimpotatoes/Holo_EM_2_keVs
|
e17a773092ff18b429ebd46ca380478d9fb3b819
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from skimage import transform
def shift_holo(shift_gui, em_data):
# em_data.holo_2_aligned = shift(em_data.holo_2, shift_gui)
em_data.holo_2_aligned = np.roll(em_data.holo_2, shift_gui, axis=(0, 1))
def crop_phase(shift_gui, image_not_shifted, image_shifted):
print('shift image horizontal ', shift_gui[1])
print('shift gui vertical ', shift_gui[0])
if shift_gui[0] > 0:
if shift_gui[1] > 0:
image_not_shifted_crop = np.array(image_not_shifted[shift_gui[0]:, shift_gui[1]:])
image_shifted_crop = np.array(image_shifted[:-shift_gui[0], :-shift_gui[1]])
return [image_not_shifted_crop, image_shifted_crop]
if shift_gui[1] < 0:
image_not_shifted_crop = np.array(image_not_shifted[shift_gui[0]:, :shift_gui[1]])
image_shifted_crop = np.array(image_shifted[:-shift_gui[0], -shift_gui[1]:])
return [image_not_shifted_crop, image_shifted_crop]
if shift_gui[1] == 0:
image_not_shifted_crop = np.array(image_not_shifted[shift_gui[0]:, :])
image_shifted_crop = np.array(image_shifted[:-shift_gui[0], :])
return [image_not_shifted_crop, image_shifted_crop]
elif shift_gui[0] < 0:
if shift_gui[1] > 0:
image_not_shifted_crop = np.array(image_not_shifted[:shift_gui[0], shift_gui[1]:])
image_shifted_crop = np.array(image_shifted[-shift_gui[0]:, :-shift_gui[1]])
return [image_not_shifted_crop, image_shifted_crop]
if shift_gui[1] < 0:
image_not_shifted_crop = np.array(image_not_shifted[:shift_gui[0], :shift_gui[1]])
image_shifted_crop = np.array(image_shifted[-shift_gui[0]:, -shift_gui[1]:])
return [image_not_shifted_crop, image_shifted_crop]
if shift_gui[1] == 0:
image_not_shifted_crop = np.array(image_not_shifted[:shift_gui[0], :])
image_shifted_crop = np.array(image_shifted[-shift_gui[0]:, :])
return [image_not_shifted_crop, image_shifted_crop]
elif shift_gui[0] == 0:
if shift_gui[1] > 0:
image_not_shifted_crop = np.array(image_not_shifted[:, shift_gui[1]:])
image_shifted_crop = np.array(image_shifted[:, :-shift_gui[1]])
return [image_not_shifted_crop, image_shifted_crop]
if shift_gui[1] < 0:
image_not_shifted_crop = np.array(image_not_shifted[:, :shift_gui[1]])
image_shifted_crop = np.array(image_shifted[:, -shift_gui[1]:])
return [image_not_shifted_crop, image_shifted_crop]
if shift_gui[1] == 0:
image_not_shifted_crop = np.array(image_not_shifted[:, :])
image_shifted_crop = np.array(image_shifted[:, :])
return [image_not_shifted_crop, image_shifted_crop]
def deform_image(scale, rotation, shear, translation, data_em):
transformation = transform.AffineTransform(scale=scale, rotation=rotation, shear=shear, translation=translation)
data_em.holo_2_aligned = transform.warp(data_em.holo_2 , transformation, preserve_range=1)
print('Deformation done')
| 56.727273
| 116
| 0.674359
| 453
| 3,120
| 4.240618
| 0.094923
| 0.179073
| 0.218636
| 0.178032
| 0.789172
| 0.756377
| 0.736596
| 0.718376
| 0.697033
| 0.697033
| 0
| 0.02381
| 0.205769
| 3,120
| 55
| 117
| 56.727273
| 0.751412
| 0.018269
| 0
| 0.36
| 0
| 0
| 0.018942
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0
| 0.04
| 0
| 0.28
| 0.06
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cf60515b82210ba3aba19760a8851e4ed65baee3
| 116
|
py
|
Python
|
protoseg/filters/sobel.py
|
chriamue/protoseg
|
4ddc7d613aadcb9d25b5773eff688214349ab23f
|
[
"MIT"
] | null | null | null |
protoseg/filters/sobel.py
|
chriamue/protoseg
|
4ddc7d613aadcb9d25b5773eff688214349ab23f
|
[
"MIT"
] | null | null | null |
protoseg/filters/sobel.py
|
chriamue/protoseg
|
4ddc7d613aadcb9d25b5773eff688214349ab23f
|
[
"MIT"
] | 1
|
2020-03-30T07:10:54.000Z
|
2020-03-30T07:10:54.000Z
|
import cv2
def sobel(img, dx, dy, ksize = 5):
s = cv2.Sobel(img, cv2.CV_8U, dx, dy, ksize=ksize)
return s
| 16.571429
| 54
| 0.612069
| 22
| 116
| 3.181818
| 0.590909
| 0.228571
| 0.257143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056818
| 0.241379
| 116
| 6
| 55
| 19.333333
| 0.738636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
cf773198e508c6ee2544a751d3ed3072b58daf9a
| 12,341
|
py
|
Python
|
tests/test_module.py
|
mollstam/skal
|
af2ce460d9addd07ad2459125511308cfa7cdb44
|
[
"Apache-2.0"
] | 3
|
2016-01-20T06:52:39.000Z
|
2019-01-18T09:06:32.000Z
|
tests/test_module.py
|
mollstam/skal
|
af2ce460d9addd07ad2459125511308cfa7cdb44
|
[
"Apache-2.0"
] | null | null | null |
tests/test_module.py
|
mollstam/skal
|
af2ce460d9addd07ad2459125511308cfa7cdb44
|
[
"Apache-2.0"
] | 3
|
2015-09-30T22:04:28.000Z
|
2020-12-26T10:36:20.000Z
|
# Copyright (c) 2012-2013 - Max Persson <max@looplab.se>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nose.tools import with_setup
import inspect
from helpers import OutputCapture
from skal import SkalApp
capture = OutputCapture(debug=False)
module = 'skalmodule'
# --- Test cases --------------------------------------------------------------
# Global tests
@with_setup(capture.start, capture.stop)
def test_description():
args = ['-h']
doc = """main help string
more help here
"""
try:
SkalApp(command_modules=[module], description=doc).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
doc = inspect.cleandoc(doc)
assert doc in capture.stdout.getvalue(), (
'help string should be "%s"' % doc)
@with_setup(capture.start, capture.stop)
def test_no_description():
args = ['-h']
try:
SkalApp(command_modules=[module]).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert 'no main documentation' in capture.stderr.getvalue(), (
'there should be a warning about missing main documentation')
@with_setup(capture.start, capture.stop)
def test_version():
args = ['--version']
version = '0.5.2'
try:
SkalApp(command_modules=[module], version=version).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert version in capture.stderr.getvalue(), (
'version should be "%s"' % version)
@with_setup(capture.start, capture.stop)
def test_no_version():
args = ['-h']
try:
SkalApp(command_modules=[module]).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert 'no version set' in capture.stderr.getvalue(), (
'there should be a warning about no version set')
@with_setup(capture.start, capture.stop)
def test_missing_module():
args = ['-h']
try:
SkalApp(command_modules=['missing_module']).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert 'does not exist' in capture.stderr.getvalue(), (
'there should be a warning about module not existing')
# Argument tests
@with_setup(capture.start, capture.stop)
def test_argument_existing():
args = ['-h']
try:
SkalApp(
args={
'-b': {'help': 'bool argument', 'action': 'store_true'},
('-s', '--string'): {'help': 'string argument with long name'}
},
command_modules=[module]).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
arg = '-b'
assert arg in capture.stdout.getvalue(), (
'help should list argument "%s"' % arg)
@with_setup(capture.start, capture.stop)
def test_argument_doc():
# TODO: fix this test
args = ['-h']
try:
SkalApp(
args={
'-b': {'help': 'bool argument', 'action': 'store_true'},
('-s', '--string'): {'help': 'string argument with long name'}
},
command_modules=[module]).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
arg = '-b'
doc = 'bool argument'
assert doc in capture.stdout.getvalue(), (
'help string for "%s" should be "%s"' % (arg, doc))
@with_setup(capture.start, capture.stop)
def test_argument_value_bool():
value = 'b'
args = ['-b', 'first']
try:
SkalApp(
args={
'-b': {'help': 'bool argument', 'action': 'store_true'},
('-s', '--string'): {'help': 'string argument with long name'}
},
command_modules=[module]).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert value in capture.stdout.getvalue(), (
'output should contain "%s"' % value)
@with_setup(capture.start, capture.stop)
def test_argument_value_string():
value = 'test'
args = ['--string=' + value, 'first']
try:
SkalApp(
args={
'-b': {'help': 'bool argument', 'action': 'store_true'},
('-s', '--string'): {'help': 'string argument with long name'}
},
command_modules=[module]).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert value in capture.stdout.getvalue(), (
'output should contain "%s"' % value)
@with_setup(capture.start, capture.stop)
def test_argument_value_bool_and_string():
value1 = 'b'
value2 = 'test'
args = ['-b', '--string=' + value2, 'first']
try:
SkalApp(
args={
'-b': {'help': 'bool argument', 'action': 'store_true'},
('-s', '--string'): {'help': 'string argument with long name'}
},
command_modules=[module]).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert value1 in capture.stdout.getvalue(), (
'output should contain "%s"' % value1)
assert value2 in capture.stdout.getvalue(), (
'output should contain "%s"' % value2)
# Command tests
@with_setup(capture.start, capture.stop)
def test_command_existing():
value = 'first'
args = [value]
SkalApp(command_modules=[module]).run(args)
assert value in capture.stdout.getvalue(), (
'output should contain "%s"' % value)
@with_setup(capture.start, capture.stop)
def test_command_non_existing():
args = ['other']
try:
SkalApp(command_modules=[module]).run(args)
except SystemExit as e:
assert e.code != 0, 'exit code should not be 0'
assert 'ImportError' not in capture.stderr.getvalue(), (
'output should not contain ImportError')
@with_setup(capture.start, capture.stop)
def test_command_doc():
args = ['first', '-h']
try:
SkalApp(command_modules=[module]).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
import skalmodule
doc = inspect.getdoc(skalmodule.first)
assert doc in capture.stdout.getvalue(), (
'help string should be "%s"' % doc)
@with_setup(capture.start, capture.stop)
def test_command_no_doc():
args = ['no_doc']
try:
SkalApp(command_modules=[module]).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert 'no_doc' in capture.stderr.getvalue(), (
'there should be a warning about missing documentation')
@with_setup(capture.start, capture.stop)
def test_command_without_decorator():
args = ['second']
try:
SkalApp(command_modules=[module]).run(args)
except SystemExit as e:
assert e.code != 0, 'exit code should not be 0'
@with_setup(capture.start, capture.stop)
def test_command_duplicate():
args = ['-h']
try:
SkalApp(command_modules=[module, 'skalmodule_nodoc']).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert 'duplicate' in capture.stderr.getvalue(), (
'duplicate commands should print warning and be skipped')
assert 'first command, second instance' not in capture.stdout.getvalue(), (
'duplicate commands should not be added')
@with_setup(capture.start, capture.stop)
def test_command_import_error():
args = ['-h']
try:
SkalApp(command_modules=['skalmodule_importerror']).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert 'ImportError' in capture.stderr.getvalue(), (
'output should contain ImportError')
@with_setup(capture.start, capture.stop)
def test_command_syntax_error():
args = ['-h']
try:
SkalApp(command_modules=['skalmodule_syntaxerror']).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert 'SyntaxError' in capture.stderr.getvalue(), (
'output should contain SyntaxError')
@with_setup(capture.start, capture.stop)
def test_command_name_error():
args = ['-h']
try:
SkalApp(command_modules=['skalmodule_nameerror']).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert 'NameError' in capture.stderr.getvalue(), (
'output should contain NameError')
# Subcommand tests
@with_setup(capture.start, capture.stop)
def test_subcommand_doc():
args = [module, '-h']
try:
SkalApp(subcommand_modules=[module]).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
import skalmodule
doc = inspect.getdoc(skalmodule)
assert doc in capture.stdout.getvalue(), (
'help string should be "%s"' % doc)
@with_setup(capture.start, capture.stop)
def test_subcommand_no_doc():
args = ['-h']
try:
SkalApp(subcommand_modules=['skalmodule_nodoc']).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert 'skalmodule_nodoc' in capture.stderr.getvalue(), (
'there should be a warning about missing documentation')
@with_setup(capture.start, capture.stop)
def test_subcommand_import_error():
args = ['-h']
try:
SkalApp(subcommand_modules=['skalmodule_importerror']).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert 'ImportError' in capture.stderr.getvalue(), (
'output should contain ImportError')
@with_setup(capture.start, capture.stop)
def test_subcommand_syntax_error():
args = ['-h']
try:
SkalApp(subcommand_modules=['skalmodule_syntaxerror']).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert 'SyntaxError' in capture.stderr.getvalue(), (
'output should contain SyntaxError')
@with_setup(capture.start, capture.stop)
def test_subcommand_name_error():
args = ['-h']
try:
SkalApp(subcommand_modules=['skalmodule_nameerror']).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert 'NameError' in capture.stderr.getvalue(), (
'output should contain NameError')
@with_setup(capture.start, capture.stop)
def test_subcommand_command_existing():
value = 'first'
args = [module, value]
SkalApp(subcommand_modules=[module]).run(args)
assert value in capture.stdout.getvalue(), (
'output should contain "%s"' % value)
@with_setup(capture.start, capture.stop)
def test_subcommand_command_non_existing():
args = [module, 'other']
try:
SkalApp(command_modules=[module]).run(args)
except SystemExit as e:
assert e.code != 0, 'exit code should not be 0'
@with_setup(capture.start, capture.stop)
def test_subcommand_command_doc():
args = [module, 'first', '-h']
try:
SkalApp(subcommand_modules=[module]).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
import skalmodule
doc = inspect.getdoc(skalmodule.first)
assert doc in capture.stdout.getvalue(), (
'help string should be "%s"' % doc)
@with_setup(capture.start, capture.stop)
def test_subcommand_command_no_doc():
args = [module, 'no_doc']
try:
SkalApp(subcommand_modules=[module]).run(args)
except SystemExit as e:
assert e.code == 0, 'exit code should be 0'
assert 'no_doc' in capture.stderr.getvalue(), (
'there should be a warning about missing documentation')
@with_setup(capture.start, capture.stop)
def test_subcommand_command_without_decorator():
args = [module, 'second']
try:
SkalApp(command_modules=[module]).run(args)
except SystemExit as e:
assert e.code != 0, 'exit code should not be 0'
| 31.402036
| 79
| 0.635038
| 1,570
| 12,341
| 4.89172
| 0.102548
| 0.036458
| 0.060417
| 0.079297
| 0.802344
| 0.785286
| 0.76875
| 0.760026
| 0.716016
| 0.657813
| 0
| 0.00801
| 0.231181
| 12,341
| 392
| 80
| 31.482143
| 0.801433
| 0.059396
| 0
| 0.674342
| 0
| 0
| 0.219365
| 0.007594
| 0
| 0
| 0
| 0.002551
| 0.180921
| 1
| 0.095395
| false
| 0
| 0.055921
| 0
| 0.151316
| 0.003289
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
cf870b0c1b4a980981b2414d38256f569ca4703f
| 549
|
py
|
Python
|
libopy/__init__.py
|
evdatsion/libopy
|
6360dad043929b6767abbfdddbf3accfa8920797
|
[
"MIT"
] | null | null | null |
libopy/__init__.py
|
evdatsion/libopy
|
6360dad043929b6767abbfdddbf3accfa8920797
|
[
"MIT"
] | null | null | null |
libopy/__init__.py
|
evdatsion/libopy
|
6360dad043929b6767abbfdddbf3accfa8920797
|
[
"MIT"
] | null | null | null |
from hdwallets import BIP32DerivationError as BIP32DerivationError # noqa: F401
from libopy._transaction import Transaction as Transaction # noqa: F401
from libopy._wallet import generate_wallet as generate_wallet # noqa: F401
from libopy._wallet import privkey_to_address as privkey_to_address # noqa: F401
from libopy._wallet import privkey_to_pubkey as privkey_to_pubkey # noqa: F401
from libopy._wallet import pubkey_to_address as pubkey_to_address # noqa: F401
from libopy._wallet import seed_to_privkey as seed_to_privkey # noqa: F401
| 61
| 81
| 0.834244
| 80
| 549
| 5.425
| 0.2
| 0.129032
| 0.165899
| 0.248848
| 0.428571
| 0.428571
| 0.290323
| 0.290323
| 0
| 0
| 0
| 0.052301
| 0.129326
| 549
| 8
| 82
| 68.625
| 0.855649
| 0.138434
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d8433ce0335d10c182226dda6e0406924cc46172
| 2,264
|
py
|
Python
|
tests/unitary/LiquidityGaugeWrapper/test_approve.py
|
forest-friends/curve-dao-contracts
|
dfc4f279e1cf87723e672f1255a7b87c00ff997e
|
[
"MIT"
] | null | null | null |
tests/unitary/LiquidityGaugeWrapper/test_approve.py
|
forest-friends/curve-dao-contracts
|
dfc4f279e1cf87723e672f1255a7b87c00ff997e
|
[
"MIT"
] | null | null | null |
tests/unitary/LiquidityGaugeWrapper/test_approve.py
|
forest-friends/curve-dao-contracts
|
dfc4f279e1cf87723e672f1255a7b87c00ff997e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import brownie
import pytest
@pytest.mark.parametrize("idx", range(5))
def test_initial_approval_is_zero(gauge_wrapper, accounts, idx):
assert gauge_wrapper.allowance(accounts[0], accounts[idx]) == 0
def test_approve(gauge_wrapper, accounts):
gauge_wrapper.approve(accounts[1], 10**19, {'from': accounts[0]})
assert gauge_wrapper.allowance(accounts[0], accounts[1]) == 10**19
def test_modify_approve(gauge_wrapper, accounts):
gauge_wrapper.approve(accounts[1], 10**19, {'from': accounts[0]})
gauge_wrapper.approve(accounts[1], 12345678, {'from': accounts[0]})
assert gauge_wrapper.allowance(accounts[0], accounts[1]) == 12345678
def test_revoke_approve(gauge_wrapper, accounts):
gauge_wrapper.approve(accounts[1], 10**19, {'from': accounts[0]})
gauge_wrapper.approve(accounts[1], 0, {'from': accounts[0]})
assert gauge_wrapper.allowance(accounts[0], accounts[1]) == 0
def test_approve_self(gauge_wrapper, accounts):
gauge_wrapper.approve(accounts[0], 10**19, {'from': accounts[0]})
assert gauge_wrapper.allowance(accounts[0], accounts[0]) == 10**19
def test_only_affects_target(gauge_wrapper, accounts):
gauge_wrapper.approve(accounts[1], 10**19, {'from': accounts[0]})
assert gauge_wrapper.allowance(accounts[1], accounts[0]) == 0
def test_returns_true(gauge_wrapper, accounts):
tx = gauge_wrapper.approve(accounts[1], 10**19, {'from': accounts[0]})
assert tx.return_value is True
def test_approval_event_fires(accounts, gauge_wrapper):
tx = gauge_wrapper.approve(accounts[1], 10**19, {'from': accounts[0]})
assert len(tx.events) == 1
assert tx.events["Approval"].values() == [accounts[0], accounts[1], 10**19]
def test_increase_allowance(accounts, gauge_wrapper):
gauge_wrapper.approve(accounts[1], 100, {'from': accounts[0]})
gauge_wrapper.increaseAllowance(accounts[1], 403, {'from': accounts[0]})
assert gauge_wrapper.allowance(accounts[0], accounts[1]) == 503
def test_decrease_allowance(accounts, gauge_wrapper):
gauge_wrapper.approve(accounts[1], 100, {'from': accounts[0]})
gauge_wrapper.decreaseAllowance(accounts[1], 34, {'from': accounts[0]})
assert gauge_wrapper.allowance(accounts[0], accounts[1]) == 66
| 32.342857
| 79
| 0.717756
| 309
| 2,264
| 5.080906
| 0.174757
| 0.236943
| 0.107643
| 0.189172
| 0.711465
| 0.711465
| 0.711465
| 0.653503
| 0.628025
| 0.628025
| 0
| 0.061337
| 0.121466
| 2,264
| 69
| 80
| 32.811594
| 0.728004
| 0.007509
| 0
| 0.216216
| 0
| 0
| 0.028062
| 0
| 0
| 0
| 0
| 0
| 0.297297
| 1
| 0.27027
| false
| 0
| 0.054054
| 0
| 0.324324
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d84f4031891110170b6afde142b53e9c62183e51
| 4,100
|
py
|
Python
|
src/utils.py
|
danbailo/T1-Teoria-Computacao
|
542c5c28289f48cd73ab6ad3f6654e330990de60
|
[
"MIT"
] | null | null | null |
src/utils.py
|
danbailo/T1-Teoria-Computacao
|
542c5c28289f48cd73ab6ad3f6654e330990de60
|
[
"MIT"
] | null | null | null |
src/utils.py
|
danbailo/T1-Teoria-Computacao
|
542c5c28289f48cd73ab6ad3f6654e330990de60
|
[
"MIT"
] | null | null | null |
import os
from time import time
import json
from collections import defaultdict
import core
hooks = [
('Crescente', core.crescent),
('Decrescente', core.decrescent),
('Eficiente', core.efficiency)
]
def create_imgs_directory():
if not os.path.exists(os.path.join('..','imgs')):
os.makedirs(os.path.join('..','imgs'))
def create_results_directory():
if not os.path.exists(os.path.join('..','results')):
os.makedirs(os.path.join('..','results'))
def get_instances(directory):
return sorted(os.listdir(os.path.join('..',directory)), key=lambda k:int(k.strip('input.in')))
def get_exact_results(directory):
result = {}
time_results = {}
for input_file in get_instances(directory):
result[input_file] = {}
time_results[input_file] = {}
with open(os.path.join('..',directory,input_file)) as file:
state = 0
weight_items = []
values_items = []
for line in file:
inst = line.split()
if state == 0:
number_items = int(inst[0])
state = 1
elif state == 1:
item_id = int(inst[0])
values_items.append(int(inst[1]))
weight_items.append(int(inst[2]))
if item_id == number_items: state = 2
elif state == 2:
weight_max = int(inst[0])
start = time()
result[input_file]["Exato"] = core.exact(number_items, weight_max,values_items, weight_items)
time_results[input_file]["Exato"] = time() - start
with open(os.path.join('..','results','result_exact.json'),'w') as file: file.write(json.dumps(result,indent=4))
with open(os.path.join('..','results','time_exact.json'),'w') as file: file.write(json.dumps(time_results,indent=4))
def get_greedy_results(directory):
result = {}
time_results = {}
for input_file in get_instances(directory):
result[input_file] = {}
time_results[input_file] = {}
with open(os.path.join('..',directory,input_file)) as file:
state = 0
weight_items = []
values_items = []
for line in file:
inst = line.split()
if state == 0:
number_items = int(inst[0])
state = 1
elif state == 1:
item_id = int(inst[0])
values_items.append(int(inst[1]))
weight_items.append(int(inst[2]))
if item_id == number_items: state = 2
elif state == 2:
weight_max = int(inst[0])
for name, f in hooks:
start = time()
result[input_file][name] = f(number_items, weight_max,values_items, weight_items)
time_results[input_file][name] = time() - start
with open(os.path.join('..','results','result.json'),'w') as file: file.write(json.dumps(result,indent=4))
with open(os.path.join('..','results','time.json'),'w') as file: file.write(json.dumps(time_results,indent=4))
def get_GRASP_results(directory):
result = {}
time_results = {}
windows = [2,3,4,5,6,7,8,9]
iters = [10,100,1000,10000]
for input_file in get_instances(directory):
result[input_file] = {}
time_results[input_file] = {}
with open(os.path.join('..',directory,input_file)) as file:
state = 0
weight_items = []
values_items = []
for line in file:
inst = line.split()
if state == 0:
number_items = int(inst[0])
state = 1
elif state == 1:
item_id = int(inst[0])
values_items.append(int(inst[1]))
weight_items.append(int(inst[2]))
if item_id == number_items: state = 2
elif state == 2:
weight_max = int(inst[0])
for max_it in iters:
result[input_file]["Máx Iterações - "+str(max_it)] = {}
time_results[input_file]["Máx Iterações - "+str(max_it)] = {}
for window in windows:
start = time()
result[input_file]["Máx Iterações - "+str(max_it)]["Janela - "+str(window)] = core.grasp(max_it, window, number_items, weight_max, values_items, weight_items)
time_results[input_file]["Máx Iterações - "+str(max_it)]["Janela - "+str(window)] = time() - start
with open(os.path.join('..','results','result_GRASP.json'),'w', encoding='utf8') as file: file.write(json.dumps(result,indent=4, ensure_ascii=False))
with open(os.path.join('..','results','time_GRASP.json'),'w', encoding='utf8') as file: file.write(json.dumps(time_results,indent=4, ensure_ascii=False))
| 34.166667
| 163
| 0.657073
| 607
| 4,100
| 4.276771
| 0.156507
| 0.069337
| 0.053929
| 0.048536
| 0.830894
| 0.76849
| 0.76849
| 0.757319
| 0.741525
| 0.643297
| 0
| 0.018464
| 0.167805
| 4,100
| 120
| 164
| 34.166667
| 0.74238
| 0
| 0
| 0.622642
| 0
| 0
| 0.077786
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0.04717
| 0.009434
| 0.113208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d8ecc3cfe2a6d87e064d01f92bd2ca1a17facc85
| 27,948
|
py
|
Python
|
samitorch/inputs/datasets.py
|
sami-ets/samitorch
|
b0407c110ae198a9e18594716811bd7184ff2f36
|
[
"MIT"
] | 7
|
2019-10-19T20:20:37.000Z
|
2021-02-03T03:47:54.000Z
|
samitorch/inputs/datasets.py
|
sami-ets/SAMITorch
|
b0407c110ae198a9e18594716811bd7184ff2f36
|
[
"MIT"
] | null | null | null |
samitorch/inputs/datasets.py
|
sami-ets/SAMITorch
|
b0407c110ae198a9e18594716811bd7184ff2f36
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 SAMITorch Authors. All Rights Reserved.
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import abc
import copy
import os
from typing import Callable, List, Optional, Tuple, Union
import numpy as np
from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit
from torchvision.transforms import Compose
from torch.utils.data.dataset import Dataset
from samitorch.inputs.augmentation.strategies import DataAugmentationStrategy
from samitorch.inputs.transformers import ToNumpyArray, PadToPatchShape, ToNDTensor
from samitorch.inputs.sample import Sample
from samitorch.inputs.patch import Patch, CenterCoordinate
from samitorch.inputs.images import Modality
from samitorch.utils.slice_builder import SliceBuilder
from samitorch.utils.files import extract_file_paths, split_filename
class AbstractDatasetFactory(metaclass=abc.ABCMeta):
@staticmethod
@abc.abstractmethod
def create_train_test(*args, **kwargs):
raise NotImplementedError
@staticmethod
@abc.abstractmethod
def create_train_valid_test(*args, **kwargs):
raise NotImplementedError
class SegmentationDataset(Dataset):
"""
Create a dataset class in PyTorch for reading NIfTI files.
"""
def __init__(self, source_paths: List[str], target_paths: List[str], samples: List[Sample],
modalities: Union[Modality, List[Modality]], dataset_id: int = None,
transforms: Optional[Callable] = None,
augment: DataAugmentationStrategy = None) -> None:
"""
Dataset initializer.
Args:
source_paths (List of str): Path to source images.
target_paths (List of str): Path to target (labels) images.
samples (list of :obj:`samitorch.inputs.sample.Sample`): A list of Sample objects.
modality (:obj:`samitorch.inputs.images.Modalities`): The modality of the data set.
dataset_id (int): An integer representing the ID of the data set.
transforms (Callable): transform to apply to both source and target images.
augment (Callable): transform to apply as data augmentation on source image.
"""
self._source_paths = source_paths
self._target_paths = target_paths
self._samples = samples
self._modalities = modalities
self._dataset_id = dataset_id
self._transform = transforms
self._augment = augment
def __len__(self):
return len(self._samples)
def __getitem__(self, idx: int):
sample = self._samples[idx]
if self._transform is not None:
sample = self._transform(sample)
if self._augment is not None:
sample.augmented_x = self._augment(sample.x)
else:
sample.augmented_x = sample.x
return sample
class MultimodalSegmentationDataset(Dataset):
"""
Create a dataset class in PyTorch for reading NIfTI files.
"""
def __init__(self, source_paths: List[str], target_paths: List[str], samples: List[Sample],
modalities: List[Modality], dataset_id: int = None, transforms: Optional[Callable] = None,
augment: DataAugmentationStrategy = None) -> None:
"""
Dataset initializer.
Args:
source_paths (List of str): Path to source images.
target_paths (List of str): Path to target (labels) images.
samples (list of :obj:`samitorch.inputs.sample.Sample`): A list of Sample objects.
modalities (List of :obj:`samitorch.inputs.images.Modalities`): The list of modalities of the data set.
dataset_id (int): An integer representing the ID of the data set.
transforms (Callable): transform to apply to both source and target images.
augment (Callable): transform to apply as data augmentation on source image.
"""
self._source_paths = source_paths
self._target_paths = target_paths
self._samples = samples
self._modalities = modalities
self._dataset_id = dataset_id
self._transform = transforms
self._augment = augment
def __len__(self):
return len(self._samples)
def __getitem__(self, idx: int):
sample = self._samples[idx]
if self._transform is not None:
sample = self._transform(sample)
if self._augment is not None:
sample.augmented_x = self._augment(sample.x)
else:
sample.augmented_x = sample.x
return sample
class PatchDataset(SegmentationDataset):
"""
Create a dataset of patches in PyTorch for reading NiFTI files and slicing them into fixed shape.
"""
def __init__(self, source_paths: List[str], target_paths: List[str], samples: List[Sample],
patch_size: Tuple[int, int, int, int],
step: Tuple[int, int, int, int], modality: Modality,
dataset_id: int = None, transforms: Optional[Callable] = None,
augment: DataAugmentationStrategy = None) -> None:
"""
Dataset initializer.
Args:
source_paths (List of str): Path to source images.
target_paths (List of str): Path to target (labels) images.
samples (list of :obj:`samitorch.inputs.sample.Sample`): A list of Sample objects.
patch_size (Tuple of int): A tuple representing the desired patch size.
step (Tuple of int): A tuple representing the desired step between two patches.
modality (:obj:`samitorch.inputs.images.Modalities`): The modality of the data set.
dataset_id (int): An integer representing the ID of the data set.
transforms (Callable): transform to apply to both source and target images.
augment (Callable): transform to apply as data augmentation on source image.
"""
super(PatchDataset, self).__init__(source_paths, target_paths, samples, modality, dataset_id, transforms,
augment)
self._pre_transform = Compose([ToNumpyArray(), PadToPatchShape(patch_size=patch_size, step=step)])
@property
def samples(self):
return self._samples
@samples.setter
def samples(self, samples):
self._samples = samples
@property
def modality(self):
return self._modalities
def __len__(self):
return len(self._samples)
def __getitem__(self, idx: int):
# Get the sample according to the id.
sample = self._samples[idx]
image_id = sample.x.image_id
image = self._source_paths[image_id]
target = self._target_paths[image_id]
image = self._pre_transform(image)
target = self._pre_transform(target)
# Get the patch and its slice out of this sample.
patch_x, patch_y = sample.x, sample.y
slice_x, slice_y = patch_x.slice, patch_y.slice
slice_x, slice_y = image[tuple(slice_x)], target[tuple(slice_y)]
center_coordinate = CenterCoordinate(slice_x, slice_y)
transformed_patch_x = Patch(slice_x, image_id, center_coordinate)
transformed_patch_y = Patch(slice_y, image_id, center_coordinate)
sample.x = transformed_patch_x
sample.y = transformed_patch_y
if self._transform is not None:
sample = self._transform(sample)
if self._augment is not None:
sample = self._augment(sample)
return sample
class MultimodalPatchDataset(MultimodalSegmentationDataset):
"""
Create a dataset class in PyTorch for reading NIfTI files.
"""
def __init__(self, source_paths: List[str], target_paths: List[str], samples: List[Sample],
patch_size: Tuple[int, int, int, int], step: Tuple[int, int, int, int], modalities: List[Modality],
dataset_id: int = None, transforms: Optional[Callable] = None,
augment: DataAugmentationStrategy = None) -> None:
"""
Dataset initializer.
Args:
source_paths (List of str): Path to source images.
target_paths (List of str): Path to target (labels) images.
samples (list of :obj:`samitorch.inputs.sample.Sample`): A list of Sample objects.
modality_1 (:obj:`samitorch.inputs.images.Modalities`): The first modality of the data set.
modality_2 (:obj:`samitorch.inputs.images.Modalities`): The second modality of the data set.
dataset_id (int): An integer representing the ID of the data set.
transforms (Callable): transform to apply to both source and target images.
augment (Callable): transform to apply as data augmentation on source image.
"""
super(MultimodalPatchDataset, self).__init__(source_paths, target_paths, samples, modalities, dataset_id,
transforms, augment)
self._pre_transform = Compose([ToNumpyArray(), PadToPatchShape(patch_size=patch_size, step=step)])
def __len__(self):
return len(self._samples)
def __getitem__(self, idx: int):
# Get the sample according to the id.
sample = self._samples[idx]
image_id = sample.x.image_id
image_modality_1 = self._source_paths[image_id][0]
image_modality_2 = self._source_paths[image_id][1]
target = self._target_paths[image_id]
image_modality_1 = self._pre_transform(image_modality_1)
image_modality_2 = self._pre_transform(image_modality_2)
target = self._pre_transform(target)
# Get the patch and its slice out of this sample.
patch_x, patch_y = sample.x, sample.y
slice_x, slice_y = patch_x.slice, patch_y.slice
# Get the real image data for each modality and target.
x_modality_1 = image_modality_1[tuple(slice_x)]
x_modality_2 = image_modality_2[tuple(slice_x)]
y = target[tuple(slice_y)]
# Concatenate on channel axis both modalities.
slice_x = np.concatenate((x_modality_1, x_modality_2), axis=0)
slice_y = y
center_coordinate = CenterCoordinate(slice_x, slice_y)
transformed_patch_x = Patch(slice_x, image_id, center_coordinate)
transformed_patch_y = Patch(slice_y, image_id, center_coordinate)
sample.x = transformed_patch_x
sample.y = transformed_patch_y
if self._transform is not None:
sample = self._transform(sample)
if self._augment is not None:
sample = self._augment(sample)
return sample
class PatchDatasetFactory(AbstractDatasetFactory):
@staticmethod
def create_train_test(source_dir: str, target_dir: str, modalities: Union[Modality, List[Modality]],
patch_size: Tuple[int, int, int, int],
step: Tuple[int, int, int, int], dataset_id: int, test_size: float,
keep_centered_on_foreground: bool = True,
augmentation_strategy: DataAugmentationStrategy = None):
"""
Create a SegmentationDataset object for both training and validation.
Args:
source_dir (str): Path to source directory.
target_dir (str): Path to target directory.
modalities (List of :obj:`samitorch.inputs.images.Modalities`): The first modality of the data set.
dataset_id (int): An integer representing the ID of the data set.
test_size (float): The size in percentage of the validation set over total number of samples.
Returns:
Tuple of :obj:`torch.utils.data.dataset`: A tuple containing both training and validation dataset.
"""
if isinstance(modalities, list):
return PatchDatasetFactory._create_multimodal_train_test(source_dir, target_dir, modalities,
patch_size, step, dataset_id, test_size,
keep_centered_on_foreground,
augmentation_strategy)
else:
return PatchDatasetFactory._create_single_modality_train_test(source_dir, target_dir, modalities,
patch_size, step, dataset_id, test_size,
keep_centered_on_foreground,
augmentation_strategy)
@staticmethod
def _create_single_modality_train_test(source_dir: str, target_dir: str, modality: Modality,
patch_size: Tuple[int, int, int, int],
step: Tuple[int, int, int, int], dataset_id: int, test_size: float,
keep_centered_on_foreground: bool = True,
augmentation_strategy: DataAugmentationStrategy = None):
"""
Create a PatchDataset object for both training and validation.
Args:
source_dir (str): Path to source directory.
target_dir (str): Path to target directory.
modality (:obj:`samitorch.inputs.images.Modalities`): The modality of the data set.
patch_size (Tuple of int): A tuple representing the desired patch size.
step (Tuple of int): A tuple representing the desired step between two patches.
dataset_id (int): An integer representing the ID of the data set.
test_size (float): The size in percentage of the validation set over total number of samples.
keep_centered_on_foreground (bool): Keep only patches which center coordinates belongs to a foreground class.
augmentation_strategy (:obj:`samitorch.inputs.augmentation.strategies.DataAugmentationStrategy`): Data
augmentation strategy to apply on inputs.
Returns:
Tuple of :obj:`torch.utils.data.dataset`: A tuple containing both training and validation dataset.
"""
source_dir = os.path.join(source_dir, str(modality))
source_paths, target_paths = np.array(extract_file_paths(source_dir)), np.array(extract_file_paths(target_dir))
transforms = Compose([ToNumpyArray(), PadToPatchShape(patch_size=patch_size, step=step)])
patches = PatchDatasetFactory.get_patches(source_paths, target_paths, patch_size, step, transforms,
keep_centered_on_foreground)
label_patches = copy.deepcopy(patches)
train_ids, test_ids = next(
StratifiedShuffleSplit(n_splits=1, test_size=test_size).split(range(len(patches)), list(
map(lambda patch: patch.class_id, patches))))
train_samples = list(
map(lambda source, target: Sample(source, target, dataset_id=dataset_id, is_labeled=True),
patches[train_ids], label_patches[train_ids]))
test_samples = list(
map(lambda source, target: Sample(source, target, dataset_id=dataset_id, is_labeled=True),
patches[test_ids], label_patches[test_ids]))
training_dataset = PatchDataset(list(source_paths), list(target_paths), train_samples, patch_size, step,
modality, dataset_id, Compose([ToNDTensor()]), augmentation_strategy)
test_dataset = PatchDataset(list(source_paths), list(target_paths), test_samples, patch_size, step, modality,
dataset_id, Compose([ToNDTensor()]), augmentation_strategy)
return training_dataset, test_dataset
@staticmethod
def _create_multimodal_train_test(source_dir: str, target_dir: str, modalities: List[Modality],
patch_size: Tuple[int, int, int, int], step: Tuple[int, int, int, int],
dataset_id: int, test_size: float, keep_centered_on_foreground: bool = False,
augmentation_strategy: DataAugmentationStrategy = None):
"""
Create a MultimodalPatchDataset object for both training and validation.
Args:
source_dir (str): Path to source directory.
target_dir (str): Path to target directory.
modality_1 (:obj:`samitorch.inputs.images.Modalities`): The first modality of the data set.
modality_2 (:obj:`samitorch.inputs.images.Modalities`): The second modality of the data set.
patch_size (Tuple of int): A tuple representing the desired patch size.
step (Tuple of int): A tuple representing the desired step between two patches.
dataset_id (int): An integer representing the ID of the data set.
test_size (float): The size in percentage of the validation set over total number of samples.
keep_centered_on_foreground (bool): Keep only patches which center coordinates belongs to a foreground class.
augmentation_strategy (:obj:`samitorch.inputs.augmentation.strategies.DataAugmentationStrategy`): Data
augmentation strategy to apply on inputs.
Returns:
Tuple of :obj:`torch.utils.data.dataset`: A tuple containing both training and validation dataset.
"""
source_dirs = list(map(lambda modality: os.path.join(source_dir, str(modality)), modalities))
source_paths = list(map(lambda source_dir: np.array(extract_file_paths(source_dir)), source_dirs))
target_paths = np.array(extract_file_paths(target_dir))
source_paths = np.stack((modality for modality in source_paths), axis=1)
transforms = Compose([ToNumpyArray(), PadToPatchShape(patch_size=patch_size, step=step)])
patches = PatchDatasetFactory.get_patches(source_paths, target_paths, patch_size, step, transforms,
keep_centered_on_foreground)
label_patches = copy.deepcopy(patches)
train_ids, test_ids = next(
StratifiedShuffleSplit(n_splits=1, test_size=test_size).split(range(len(patches)), list(
map(lambda patch: patch.class_id, patches))))
train_samples = list(
map(lambda source, target: Sample(source, target, dataset_id=dataset_id, is_labeled=True),
patches[train_ids],
label_patches[train_ids]))
test_samples = list(
map(lambda source, target: Sample(source, target, dataset_id=dataset_id, is_labeled=True),
patches[test_ids],
label_patches[test_ids]))
training_dataset = MultimodalPatchDataset(list(source_paths), list(target_paths), train_samples, patch_size,
step, modalities, dataset_id, Compose([ToNDTensor()]),
augmentation_strategy)
test_dataset = MultimodalPatchDataset(list(source_paths), list(target_paths), test_samples, patch_size, step,
modalities, dataset_id, Compose([ToNDTensor()]),
augmentation_strategy)
return training_dataset, test_dataset
@staticmethod
def get_patches(source_paths: np.ndarray, target_paths: np.ndarray, patch_size: Tuple[int, int, int, int],
step: Tuple[int, int, int, int], transforms: Callable, keep_centered_on_foreground: bool = False):
patches = list()
for idx in range(len(source_paths)):
source_path, target_path = source_paths[idx], target_paths[idx]
sample = Sample(x=source_path, y=target_path, dataset_id=None, is_labeled=True)
transformed_sample = transforms(sample)
slices = SliceBuilder(transformed_sample.x.shape, patch_size=patch_size, step=step).build_slices()
for slice in slices:
if np.count_nonzero(transformed_sample.x[slice]) > 0:
center_coordinate = CenterCoordinate(transformed_sample.x[slice], transformed_sample.y[slice])
patches.append(
Patch(slice, idx, center_coordinate))
else:
pass
if keep_centered_on_foreground:
patches = list(filter(lambda patch: patch.center_coordinate.is_foreground, patches))
return np.array(patches)
class SegmentationDatasetFactory(AbstractDatasetFactory):
@staticmethod
def create_train_test(source_dir: str, target_dir: str, modalities: Union[Modality, List[Modality]],
dataset_id: int,
test_size: float, augmentation_strategy: DataAugmentationStrategy = None):
"""
Create a SegmentationDataset object for both training and validation.
Args:
source_dir (str): Path to source directory.
target_dir (str): Path to target directory.
modalities (List of :obj:`samitorch.inputs.images.Modalities`): The first modality of the data set.
dataset_id (int): An integer representing the ID of the data set.
test_size (float): The size in percentage of the validation set over total number of samples.
Returns:
Tuple of :obj:`torch.utils.data.dataset`: A tuple containing both training and validation dataset.
"""
if isinstance(modalities, list):
return SegmentationDatasetFactory._create_multimodal_train_test(source_dir, target_dir, modalities,
dataset_id, test_size,
augmentation_strategy)
else:
return SegmentationDatasetFactory._create_single_modality_train_test(source_dir, target_dir, modalities,
dataset_id, test_size,
augmentation_strategy)
@staticmethod
def _create_single_modality_train_test(source_dir: str, target_dir: str, modality: Modality, dataset_id: int,
test_size: float,
augmentation_strategy: DataAugmentationStrategy = None):
"""
Create a SegmentationDataset object for both training and validation.
Args:
source_dir (str): Path to source directory.
target_dir (str): Path to target directory.
modality (:obj:`samitorch.inputs.images.Modalities`): The first modality of the data set.
dataset_id (int): An integer representing the ID of the data set.
test_size (float): The size in percentage of the validation set over total number of samples.
augmentation_strategy (:obj:`samitorch.inputs.augmentation.strategies.DataAugmentationStrategy`): Data
augmentation strategy to apply on inputs.
Returns:
Tuple of :obj:`torch.utils.data.dataset`: A tuple containing both training and validation dataset.
"""
source_dir = os.path.join(source_dir, str(modality))
source_paths, target_paths = np.array(extract_file_paths(source_dir)), np.array(extract_file_paths(target_dir))
train_ids, test_ids = next(ShuffleSplit(n_splits=1, test_size=test_size).split(source_paths, target_paths))
train_samples = list(
map(lambda source, target: Sample(source, target, is_labeled=True), source_paths[train_ids],
target_paths[train_ids]))
test_samples = list(
map(lambda source, target: Sample(source, target, is_labeled=True), source_paths[test_ids],
target_paths[test_ids]))
training_dataset = SegmentationDataset(list(source_paths), list(target_paths), train_samples, modality,
dataset_id, Compose([ToNumpyArray(), ToNDTensor()]),
augmentation_strategy)
test_dataset = SegmentationDataset(list(source_paths), list(target_paths), test_samples, modality, dataset_id,
Compose([ToNumpyArray(), ToNDTensor()]), augmentation_strategy)
return training_dataset, test_dataset
@staticmethod
def _create_multimodal_train_test(source_dir: str, target_dir: str, modalities: List[Modality],
dataset_id: int, test_size: float,
augmentation_strategy: DataAugmentationStrategy = None):
"""
Create a MultimodalDataset object for both training and validation.
Args:
source_dir (str): Path to source directory.
target_dir (str): Path to target directory.
modality_1 (:obj:`samitorch.inputs.images.Modalities`): The first modality of the data set.
modality_2 (:obj:`samitorch.inputs.images.Modalities`): The second modality of the data set.
dataset_id (int): An integer representing the ID of the data set.
test_size (float): The size in percentage of the validation set over total number of samples.
augmentation_strategy (:obj:`samitorch.inputs.augmentation.strategies.DataAugmentationStrategy`): Data
augmentation strategy to apply on inputs.
Returns:
Tuple of :obj:`torch.utils.data.dataset`: A tuple containing both training and validation dataset.
"""
source_dirs = list(map(lambda modality: os.path.join(source_dir, str(modality)), modalities))
source_paths = list(map(lambda source_dir: np.array(extract_file_paths(source_dir)), source_dirs))
target_paths = np.array(extract_file_paths(target_dir))
source_paths = np.stack((modality for modality in source_paths), axis=1)
train_ids, test_ids = next(ShuffleSplit(n_splits=1, test_size=test_size).split(source_paths, target_paths))
train_samples = list(
map(lambda source, target: Sample(source, target, is_labeled=True), source_paths[train_ids],
target_paths[train_ids]))
test_samples = list(
map(lambda source, target: Sample(source, target, is_labeled=True), source_paths[test_ids],
target_paths[test_ids]))
training_dataset = MultimodalSegmentationDataset(list(source_paths), list(target_paths), train_samples,
modalities, dataset_id,
Compose([ToNumpyArray(), ToNDTensor()]), augmentation_strategy)
test_dataset = MultimodalSegmentationDataset(list(source_paths), list(target_paths), test_samples,
modalities, dataset_id, Compose([ToNumpyArray(), ToNDTensor()]),
augmentation_strategy)
return training_dataset, test_dataset
| 49.291005
| 121
| 0.631315
| 3,136
| 27,948
| 5.415179
| 0.079082
| 0.024909
| 0.012719
| 0.016253
| 0.845955
| 0.834177
| 0.818867
| 0.810623
| 0.790778
| 0.773878
| 0
| 0.00166
| 0.288858
| 27,948
| 566
| 122
| 49.378092
| 0.85278
| 0.297732
| 0
| 0.65411
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082192
| false
| 0.003425
| 0.05137
| 0.020548
| 0.222603
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2b1628a48b2705c10e658d825951d894bf2a0132
| 37
|
py
|
Python
|
unique/__init__.py
|
amarinelli/unique-value-domain
|
cab781736ffa3ce193edd21c7fed475265ba9781
|
[
"MIT"
] | 1
|
2020-05-20T19:40:49.000Z
|
2020-05-20T19:40:49.000Z
|
unique/__init__.py
|
amarinelli/unique-value-domain
|
cab781736ffa3ce193edd21c7fed475265ba9781
|
[
"MIT"
] | null | null | null |
unique/__init__.py
|
amarinelli/unique-value-domain
|
cab781736ffa3ce193edd21c7fed475265ba9781
|
[
"MIT"
] | null | null | null |
from .core import get_unique_feature
| 18.5
| 36
| 0.864865
| 6
| 37
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 37
| 1
| 37
| 37
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2b565319e48bbc05e0a9d26f84b4e2327398bd35
| 27
|
py
|
Python
|
ham_package/ham_module.py
|
thinkAmi-sandbox/python_modulefinder-sample
|
564f337f05656c0a196fea39367a310c38bef02d
|
[
"Unlicense"
] | null | null | null |
ham_package/ham_module.py
|
thinkAmi-sandbox/python_modulefinder-sample
|
564f337f05656c0a196fea39367a310c38bef02d
|
[
"Unlicense"
] | null | null | null |
ham_package/ham_module.py
|
thinkAmi-sandbox/python_modulefinder-sample
|
564f337f05656c0a196fea39367a310c38bef02d
|
[
"Unlicense"
] | null | null | null |
def ham():
print('ham')
| 13.5
| 16
| 0.518519
| 4
| 27
| 3.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 27
| 2
| 16
| 13.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.107143
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
2b5d1d95f862e6718444f43838f118a934572db6
| 21
|
py
|
Python
|
python/fasttext_module/fasttext/numpy/__init__.py
|
telescout/fastText
|
8b657bbc8edd2fe200623f952be0088ff790d4b9
|
[
"MIT"
] | null | null | null |
python/fasttext_module/fasttext/numpy/__init__.py
|
telescout/fastText
|
8b657bbc8edd2fe200623f952be0088ff790d4b9
|
[
"MIT"
] | null | null | null |
python/fasttext_module/fasttext/numpy/__init__.py
|
telescout/fastText
|
8b657bbc8edd2fe200623f952be0088ff790d4b9
|
[
"MIT"
] | null | null | null |
from .numpy import *
| 10.5
| 20
| 0.714286
| 3
| 21
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
995dd9616d90ac26230301e47f49a18e2d451805
| 20
|
py
|
Python
|
pip/c19/countries/__init__.py
|
siphr/c19
|
1acf258668c0c5ede13f579c06aec73eee443c7b
|
[
"MIT"
] | null | null | null |
pip/c19/countries/__init__.py
|
siphr/c19
|
1acf258668c0c5ede13f579c06aec73eee443c7b
|
[
"MIT"
] | null | null | null |
pip/c19/countries/__init__.py
|
siphr/c19
|
1acf258668c0c5ede13f579c06aec73eee443c7b
|
[
"MIT"
] | null | null | null |
from .pk import pk
| 6.666667
| 18
| 0.7
| 4
| 20
| 3.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 20
| 2
| 19
| 10
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
510b017f4477220c885d5ee9b0de9dc852fc5650
| 84
|
py
|
Python
|
tests/test_lola.py
|
arghyagod-coder/lolaaur
|
c5efa29f183e4803f43a0387ada9bb5696beda4b
|
[
"MIT"
] | 1
|
2021-06-29T06:59:25.000Z
|
2021-06-29T06:59:25.000Z
|
tests/test_lola.py
|
arghyagod-coder/lolaaur
|
c5efa29f183e4803f43a0387ada9bb5696beda4b
|
[
"MIT"
] | null | null | null |
tests/test_lola.py
|
arghyagod-coder/lolaaur
|
c5efa29f183e4803f43a0387ada9bb5696beda4b
|
[
"MIT"
] | null | null | null |
from lolacli import __version__
def test_version():
assert __version__=='0.1.4'
| 21
| 31
| 0.75
| 12
| 84
| 4.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 0.142857
| 84
| 4
| 32
| 21
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5a9208ec4e5dcd5d02b11b43ad734388025cb3cb
| 373
|
py
|
Python
|
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Center_of_Gravity/__init__.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Center_of_Gravity/__init__.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
SUAVE/SUAVE-2.5.0/trunk/SUAVE/Methods/Center_of_Gravity/__init__.py
|
Vinicius-Tanigawa/Undergraduate-Research-Project
|
e92372f07882484b127d7affe305eeec2238b8a9
|
[
"MIT"
] | null | null | null |
## @defgroup Methods-Center_of_Gravity Center_of_Gravity
# Description
# @ingroup Methods
from .compute_component_centers_of_gravity import compute_component_centers_of_gravity
from .compute_mission_center_of_gravity import compute_mission_center_of_gravity
from .compute_fuel_center_of_gravity_longitudinal_range import compute_fuel_center_of_gravity_longitudinal_range
| 41.444444
| 112
| 0.911528
| 51
| 373
| 6.039216
| 0.313725
| 0.233766
| 0.292208
| 0.162338
| 0.675325
| 0.279221
| 0.279221
| 0
| 0
| 0
| 0
| 0
| 0.061662
| 373
| 8
| 113
| 46.625
| 0.88
| 0.219839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
5ae4452bc1f2fe79d8b631f61bc01d9888d6302b
| 6,094
|
py
|
Python
|
analysis.py
|
fanta-mnix/speech-to-text-comparison
|
1e43b55edea2dd943f08b9277bd63e2551299e0c
|
[
"MIT"
] | null | null | null |
analysis.py
|
fanta-mnix/speech-to-text-comparison
|
1e43b55edea2dd943f08b9277bd63e2551299e0c
|
[
"MIT"
] | null | null | null |
analysis.py
|
fanta-mnix/speech-to-text-comparison
|
1e43b55edea2dd943f08b9277bd63e2551299e0c
|
[
"MIT"
] | null | null | null |
from Levenshtein import distance
def ratio(t1, t2):
return 1 - float(distance(t1, t2))/max(len(t1), len(t2))
audio1 = '''amigos são poucos e escassos ao longo da vida e esta é uma lição que eu duramente aprendi nós temos centenas de conhecidos mas temos poucos amigos e eu já lancei uma vez e volta a dizer que é um teste eficaz infalível para vocês aplicarem sobre a amizade ao retornarem às suas casas ao longo dos próximos dias reunam os amigos e digam sobre o sucesso de vocês não falem de fracasso o fracasso provoca solidariedade entre todas as pessoas o fracasso provoca proximidade entre as pessoas se eu disser que tem um câncer receberei abraços todas as pessoas'''.lower()
bing1 = '''amigos são poucos e escassos ao longo da vida eh essa é uma lição que eu duramente aprende nós temos centenas de conhecidos mas temos poucos amigos e eu já lancei uma vez de volta dizer que é um teste e ficas infalível pra vocês aplicarem sobre amizade ao retornarem às suas casas ao longo dos próximos dias reuniu os amigos e digam sobre o sucesso de vocês não falem de fracasso em fracasso provoca solidariedade entre todas as pessoas ou fracasso provoca proximidade entre as pessoas se eu disser que tem um câncer receberia braços todas as pessoas'''.lower()
google1 = '''amigos são poucos e escassos ao longo da vida e esta é uma lição que eu duramente aprende nós temos centenas de conhecidos mas temos poucos amigos e eu já lancei uma vez de volta dizer que é um teste eficaz alívio para vocês aplicar em sobre amizade ao retornarem às suas casas ao longo dos próximos dias eu não os amigos e digam sobre o sucesso de você não falem de fracasso em fracasso provoca solidariedade entre todas as pessoas o fracasso provoca proximidade entre as pessoas se eu disser que tem um câncer receberei abraços todas as pessoas'''.lower()
watson1 = '''amigo são poucos e escassos ao longo da vida e esta é uma lição que o dura mente aprende aos termos centenas de conhecidos mas temos poucos amigos e os balanceio uma vez e volta a dizer que é um teste e fica às infalível para vocês aplicarem sobre amizade ao retornarem a suas que reúnam os amigos digam sobre o sucesso não falem os fracassos o fracasso provoca solidariedade de todas as pessoas o fracasso provoca proximidade entre as se eu disse é que tem um câncer receberia braços todas as pessoas'''.lower()
audio2 = '''Além disso você quiser usar o microfone de lapela ligado diretamente na câmera o fio desse microfone vai ter que ser bem comprido um fio bem comprido por onde tá passando o sinal fraco como de um microfone vira uma bela antena e é muito fácil pegar a interferência eletromagnética com um fio de microfone que é Longe de mais além disso quanto maior o fio maior a chance de alguém tropeçar no fio e mandar sua câmera para o chão se esses problemas não te incomoda usar o microfone ligado diretamente na câmera na verdade é uma boa alternativa e mais uma coisa essas informações não se aplicam no caso de câmeras profissionais e a câmera de microfones com conexão XLR essas conexões são praticamente a prova de interferência e se uma câmera tem entrada XLR a chance dela tem um pré-amplificador de baixo qualidade é muito pequena'''.lower()
bing2 = '''além disso se você quiser usar o microfone de lapela ligado diretamente na camera ou field's microfone ptc bem cumprido um filme comprido porque onde tá passando o sinal fraco como de o microfone numa bela antenatal muito fácil pegar interferência eletromagnética um filme microfone que é longo demais além disso quanto maior o phil maiores chances de alguém tropeçou no fio de mandar sua camera pro chão seus problemas não te incomodam usar o microfone ligado diretamente na camera na verdade é uma boa tentativa ah mais uma coisa essas informações não se aplicam no caso de câmeras profissionais e câmeras microfones com conexão xlr essas conexões são praticamente a prova de interferência esse uma camera temporada salieri a chance dela ter um pré amplificador de baixa qualidade é muito pequena'''.lower()
google2 = '''Além disso você quiser usar o microfone de lapela ligado diretamente na câmera o seu desse microfone vai ter que ser bem Comprido um fio bem comprido para Onde tá passando o sinal fraco como de um microfone vir uma bela antena que é muito fácil pegar a interferência eletromagnética com um fio de microfone que é Longe de mais além disso quanto maior o fio maior a chance de alguém tropeçar no fio e mandar sua câmera para o chão problemas não te incomoda usar o microfone ligado diretamente na câmera na verdade é uma boa alternativa e mais uma coisa essas informações não se aplicam no caso de câmeras profissionais e a câmera de microfones com conexão XLR essas conexões são praticamente a prova de interferência e se uma câmera tem entrada XLR a chance dela tem um pré-amplificador de baixo qualidade é muito pequeno'''.lower()
watson2 = '''além disso se você quiser usar o microfone de lapela ligado diretamente na câmara o fio das microfone pertencer bem cumprido um fio bem cumprido por prós tapação num sinal fraco como de um microfone vira uma bela antena é muito fácil pegar interferência das magnética com que os microfones é longo demais além disso quanto maior o fio maiores chances de alguém tropeçar no filme e mandasse canetas chão se os problemas não se incomodam os ao microfone ligado diretamente na câmara na verdade é uma boa alternativa a e mais uma coisa essas informações não se aplicam no caso de câmeras profissionais e para câmeras microfones com conexão gisele é essas conexões são praticamente a prova de interferências existe uma câmera tem cargos aliás as chances da otan pré-amplificador de baixa qualidade é muito pequena'''.lower()
summary = '''
| Empresa | Áudio 1 | Áudio 2 |
|---------|---------|---------|
| Bing | {:.3f} | {:.3f} |
| Google | {:.3f} | {:.3f} |
| Watson | {:.3f} | {:.3f} |
'''.format(
ratio(audio1, bing1),
ratio(audio2, bing2),
ratio(audio1, google1),
ratio(audio2, google2),
ratio(audio1, watson1),
ratio(audio2, watson2))
print(summary)
| 152.35
| 850
| 0.786511
| 1,032
| 6,094
| 4.64438
| 0.225775
| 0.020655
| 0.023367
| 0.015022
| 0.740872
| 0.725642
| 0.698727
| 0.691216
| 0.656374
| 0.618193
| 0
| 0.006999
| 0.179357
| 6,094
| 39
| 851
| 156.25641
| 0.95141
| 0
| 0
| 0
| 0
| 0.32
| 0.91385
| 0.005087
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0.12
| 0.04
| 0.04
| 0.12
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
85268c9adcf8e3b7ce2874dc264c4884245fda1b
| 35
|
py
|
Python
|
co_cn/net/__init__.py
|
zhujiagang/co-cn
|
4ea18441fa1842eb5d87a90ab8f28f3d803ef1a5
|
[
"BSD-2-Clause"
] | 7
|
2018-04-18T13:27:22.000Z
|
2020-05-27T14:43:49.000Z
|
co_cn/net/__init__.py
|
zhujiagang/co-cn
|
4ea18441fa1842eb5d87a90ab8f28f3d803ef1a5
|
[
"BSD-2-Clause"
] | null | null | null |
co_cn/net/__init__.py
|
zhujiagang/co-cn
|
4ea18441fa1842eb5d87a90ab8f28f3d803ef1a5
|
[
"BSD-2-Clause"
] | 1
|
2019-02-14T08:53:24.000Z
|
2019-02-14T08:53:24.000Z
|
from .co_occ import Model as CO_CN
| 17.5
| 34
| 0.8
| 8
| 35
| 3.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171429
| 35
| 1
| 35
| 35
| 0.896552
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
51f65e66e62b166c461efa2b74952bf4217885d6
| 46
|
py
|
Python
|
lexos/io/__init__.py
|
scottkleinman/lexos
|
d362ddd05ef23b5173ce303eb7b08ff3583ac709
|
[
"MIT"
] | null | null | null |
lexos/io/__init__.py
|
scottkleinman/lexos
|
d362ddd05ef23b5173ce303eb7b08ff3583ac709
|
[
"MIT"
] | null | null | null |
lexos/io/__init__.py
|
scottkleinman/lexos
|
d362ddd05ef23b5173ce303eb7b08ff3583ac709
|
[
"MIT"
] | null | null | null |
"""__init__.py."""
from .basic import Loader
| 11.5
| 25
| 0.673913
| 6
| 46
| 4.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 46
| 3
| 26
| 15.333333
| 0.675
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
cfe68ed15267082f11c7543cbef77bcc0d7414e3
| 4,818
|
py
|
Python
|
test/integration/replicasets/test_list_replicasets.py
|
RunsFor/cartridge-cli
|
10ffc64450ca96452b0bd17a9000071382f34118
|
[
"BSD-2-Clause"
] | 48
|
2019-08-29T10:05:01.000Z
|
2022-01-25T16:41:59.000Z
|
test/integration/replicasets/test_list_replicasets.py
|
RunsFor/cartridge-cli
|
10ffc64450ca96452b0bd17a9000071382f34118
|
[
"BSD-2-Clause"
] | 306
|
2019-08-30T13:41:14.000Z
|
2022-03-25T09:45:03.000Z
|
test/integration/replicasets/test_list_replicasets.py
|
RunsFor/cartridge-cli
|
10ffc64450ca96452b0bd17a9000071382f34118
|
[
"BSD-2-Clause"
] | 20
|
2019-08-29T14:30:21.000Z
|
2022-03-18T12:11:24.000Z
|
import pytest
from integration.replicasets.utils import set_instance_zone
from utils import run_command_and_get_output, write_conf
def test_default_application(cartridge_cmd, default_project_with_instances):
project = default_project_with_instances.project
# setup replicasets
cmd = [
cartridge_cmd, 'replicasets', 'setup',
'--bootstrap-vshard',
]
rc, output = run_command_and_get_output(cmd, cwd=project.path)
assert rc == 0
# list replicasets
cmd = [
cartridge_cmd, 'replicasets', 'list',
]
rc, output = run_command_and_get_output(cmd, cwd=project.path)
assert rc == 0
assert output.strip() == """• Current replica sets:
• router
Role: failover-coordinator | vshard-router | app.roles.custom
★ router localhost:3301
• s-1 default | 1
Role: vshard-storage
★ s1-master localhost:3302
• s1-replica localhost:3303
• s-2 default | 1
Role: vshard-storage
★ s2-master localhost:3304
• s2-replica localhost:3305"""
def test_no_joined_instances(cartridge_cmd, project_with_instances):
project = project_with_instances.project
# list replicasets
cmd = [
cartridge_cmd, 'replicasets', 'list',
]
rc, output = run_command_and_get_output(cmd, cwd=project.path)
assert rc == 1
assert "No instances joined to cluster found" in output
def test_list(cartridge_cmd, project_with_instances):
project = project_with_instances.project
instances = project_with_instances.instances
router = instances['router']
s1_master = instances['s1-master']
s1_replica = instances['s1-replica']
s1_replica2 = instances['s1-replica-2']
# setup replicasets
rpl_cfg_path = project.get_replicasets_cfg_path()
rpl_cfg = {
'router': {
'roles': ['vshard-router', 'app.roles.custom', 'failover-coordinator'],
'instances': [router.name],
},
's-1': {
'roles': ['vshard-storage'],
'instances': [s1_master.name, s1_replica.name, s1_replica2.name],
'weight': 1.234,
'vshard_group': 'hot',
'all_rw': True,
},
}
write_conf(rpl_cfg_path, rpl_cfg)
cmd = [
cartridge_cmd, 'replicasets', 'setup',
]
rc, output = run_command_and_get_output(cmd, cwd=project.path)
assert rc == 0
# get current topology
cmd = [
cartridge_cmd, 'replicasets', 'list',
]
rc, output = run_command_and_get_output(cmd, cwd=project.path)
assert rc == 0
assert output.strip() == """• Current replica sets:
• router
Role: failover-coordinator | vshard-router | app.roles.custom
★ router localhost:3301
• s-1 hot | 1.234 | ALL RW
Role: vshard-storage
★ s1-master localhost:3302
• s1-replica localhost:3303
• s1-replica-2 localhost:3304"""
def test_list_with_zones(cartridge_cmd, project_with_instances):
project = project_with_instances.project
instances = project_with_instances.instances
if project.name == 'my-old-project':
pytest.skip("Old cartridge doesn't support zones")
router = instances['router']
s1_master = instances['s1-master']
s1_replica = instances['s1-replica']
s1_replica2 = instances['s1-replica-2']
# setup replicasets
rpl_cfg_path = project.get_replicasets_cfg_path()
rpl_cfg = {
'router': {
'roles': ['vshard-router', 'app.roles.custom', 'failover-coordinator'],
'instances': [router.name],
},
's-1': {
'roles': ['vshard-storage'],
'instances': [s1_master.name, s1_replica.name, s1_replica2.name],
'vshard_group': 'hot',
},
}
write_conf(rpl_cfg_path, rpl_cfg)
cmd = [
cartridge_cmd, 'replicasets', 'setup',
]
rc, output = run_command_and_get_output(cmd, cwd=project.path)
assert rc == 0
# set storages zones
admin_api_url = router.get_admin_api_url()
set_instance_zone(admin_api_url, router.name, "Mordor")
set_instance_zone(admin_api_url, s1_master.name, "Hogwarts")
set_instance_zone(admin_api_url, s1_replica.name, "Narnia")
# get current topology
cmd = [
cartridge_cmd, 'replicasets', 'list',
]
rc, output = run_command_and_get_output(cmd, cwd=project.path)
assert rc == 0
assert output.strip() == """• Current replica sets:
• router
Role: failover-coordinator | vshard-router | app.roles.custom
★ router localhost:3301 Mordor
• s-1 hot | 1
Role: vshard-storage
★ s1-master localhost:3302 Hogwarts
• s1-replica localhost:3303 Narnia
• s1-replica-2 localhost:3304"""
| 29.378049
| 83
| 0.63242
| 594
| 4,818
| 4.947811
| 0.156566
| 0.042872
| 0.06805
| 0.043552
| 0.819667
| 0.763525
| 0.727458
| 0.708404
| 0.708404
| 0.694454
| 0
| 0.03172
| 0.254047
| 4,818
| 163
| 84
| 29.558282
| 0.779633
| 0.030718
| 0
| 0.663934
| 0
| 0
| 0.341772
| 0
| 0
| 0
| 0
| 0
| 0.090164
| 1
| 0.032787
| false
| 0
| 0.02459
| 0
| 0.057377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5c9282737c7d45d0c39f8985609f52d92a3a0217
| 4,272
|
py
|
Python
|
tests/integration/infra/api/devices/http_endpoints/test_index_controller/test_list_devices.py
|
mirumon/mirumon-backend
|
9b4d914b67dcc839ed8264f470e822dc22c98ad7
|
[
"MIT"
] | 19
|
2020-01-25T22:52:09.000Z
|
2022-03-20T13:45:10.000Z
|
tests/integration/infra/api/devices/http_endpoints/test_index_controller/test_list_devices.py
|
mirumon/mirumon-backend
|
9b4d914b67dcc839ed8264f470e822dc22c98ad7
|
[
"MIT"
] | 15
|
2019-10-07T18:18:40.000Z
|
2020-10-17T15:47:39.000Z
|
tests/integration/infra/api/devices/http_endpoints/test_index_controller/test_list_devices.py
|
mirumon/mirumon-backend
|
9b4d914b67dcc839ed8264f470e822dc22c98ad7
|
[
"MIT"
] | 1
|
2020-01-20T14:16:29.000Z
|
2020-01-20T14:16:29.000Z
|
import uuid
import pytest
from mirumon.domain.devices.entities import Device
pytestmark = [pytest.mark.asyncio]
async def test_devices_list_without_registered_devices(app, client):
url = app.url_path_for("devices:list")
response = await client.get(url)
assert response.status_code == 200
assert response.json() == []
async def test_list_with_one_offline_device(app, client, devices_repo) -> None:
device_id = uuid.uuid4()
await devices_repo.create(Device(id=device_id, name="nick-laptop", properties={}))
expected_device = {
"id": str(device_id),
"online": False,
"name": "nick-laptop",
}
url = app.url_path_for("devices:list")
response = await client.get(url)
items = response.json()
assert response.status_code == 200
assert [expected_device] == items
async def test_list_with_two_offline_device(app, client, devices_repo) -> None:
device_id = uuid.uuid4()
device_id2 = uuid.uuid4()
await devices_repo.create(Device(id=device_id, name="my device 1", properties={}))
await devices_repo.create(Device(id=device_id2, name="my device 2", properties={}))
expected_device = {
"id": str(device_id),
"online": False,
"name": "my device 1",
}
expected_device2 = {
"id": str(device_id2),
"online": False,
"name": "my device 2",
}
url = app.url_path_for("devices:list")
response = await client.get(url)
items = response.json()
assert response.status_code == 200
assert [expected_device, expected_device2] == items
async def test_list_with_one_online_device_and_one_offline_device(
app, client, devices_repo, device_factory
) -> None:
online_device_id = uuid.uuid4()
offline_device_id = uuid.uuid4()
await devices_repo.create(
Device(id=offline_device_id, name="laptop", properties={})
)
expected_online_device = {
"id": str(online_device_id),
"online": True,
"name": "desktop",
}
expected_offline_device = {
"id": str(offline_device_id),
"online": False,
"name": "laptop",
}
async with device_factory(
device_id=online_device_id, name=expected_online_device["name"]
):
url = app.url_path_for("devices:list")
response = await client.get(url)
items = response.json()
assert response.status_code == 200
assert [expected_offline_device, expected_online_device] == items
async def test_list_with_one_online_device_only(app, client, device_factory) -> None:
online_device_id = uuid.uuid4()
expected_online_device = {
"id": str(online_device_id),
"online": True,
"name": f"Device-{online_device_id}",
}
async with device_factory(device_id=online_device_id):
url = app.url_path_for("devices:list")
response = await client.get(url)
items = response.json()
assert response.status_code == 200
assert [expected_online_device] == items
async def test_list_with_two_online_device_and_one_offline_device(
app, client, devices_repo, device_factory
) -> None:
online_device_id = uuid.uuid4()
online_device_id2 = uuid.uuid4()
offline_device_id = uuid.uuid4()
await devices_repo.create(
Device(id=offline_device_id, name="old pc", properties={})
)
expected_online_device = {
"id": str(online_device_id),
"online": True,
"name": f"Device-{online_device_id}",
}
expected_online_device2 = {
"id": str(online_device_id2),
"online": True,
"name": f"Device-{online_device_id2}",
}
expected_offline_device = {
"id": str(offline_device_id),
"online": False,
"name": "old pc",
}
async with device_factory(device_id=online_device_id):
async with device_factory(device_id=online_device_id2):
url = app.url_path_for("devices:list")
response = await client.get(url)
items = response.json()
assert response.status_code == 200
assert [
expected_offline_device,
expected_online_device,
expected_online_device2,
] == items
| 29.061224
| 87
| 0.643727
| 523
| 4,272
| 4.952199
| 0.118547
| 0.120463
| 0.075676
| 0.045946
| 0.834363
| 0.822394
| 0.801931
| 0.774131
| 0.746718
| 0.676834
| 0
| 0.012943
| 0.240403
| 4,272
| 146
| 88
| 29.260274
| 0.785208
| 0
| 0
| 0.543103
| 0
| 0
| 0.083567
| 0.01779
| 0
| 0
| 0
| 0
| 0.103448
| 1
| 0
| false
| 0
| 0.025862
| 0
| 0.025862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7a2912671dd522c4bc0cd8edc8f612b7edbc0ad0
| 44
|
py
|
Python
|
functions/villain/post/postVillain.py
|
jeffpignataro/hero-villan-serverless-api
|
5c28e03f1a8b5c4de6e72c9cf718256b34e4a2df
|
[
"MIT"
] | null | null | null |
functions/villain/post/postVillain.py
|
jeffpignataro/hero-villan-serverless-api
|
5c28e03f1a8b5c4de6e72c9cf718256b34e4a2df
|
[
"MIT"
] | null | null | null |
functions/villain/post/postVillain.py
|
jeffpignataro/hero-villan-serverless-api
|
5c28e03f1a8b5c4de6e72c9cf718256b34e4a2df
|
[
"MIT"
] | null | null | null |
from models.villain.villain import villain
| 22
| 43
| 0.840909
| 6
| 44
| 6.166667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 44
| 1
| 44
| 44
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7a50044d65a0e32345e057984558059246ba2672
| 32
|
py
|
Python
|
python_tests/subprocess.py
|
qihqi/custom_loader
|
aaf54c8465a48ef3162dd5cbf9ac3185c44b3e30
|
[
"BSD-3-Clause"
] | null | null | null |
python_tests/subprocess.py
|
qihqi/custom_loader
|
aaf54c8465a48ef3162dd5cbf9ac3185c44b3e30
|
[
"BSD-3-Clause"
] | null | null | null |
python_tests/subprocess.py
|
qihqi/custom_loader
|
aaf54c8465a48ef3162dd5cbf9ac3185c44b3e30
|
[
"BSD-3-Clause"
] | null | null | null |
import subprocess
print('done')
| 10.666667
| 17
| 0.78125
| 4
| 32
| 6.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09375
| 32
| 2
| 18
| 16
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
8fffe9d25be75046fdde804fd31b0cef84183863
| 2,365
|
py
|
Python
|
gen_ts_frames_from_video.py
|
GuilinZ/conditional-lane-detection
|
7a98dcc0e0e7f48773cf97e9a0d9fd05c0f0e083
|
[
"Apache-2.0"
] | null | null | null |
gen_ts_frames_from_video.py
|
GuilinZ/conditional-lane-detection
|
7a98dcc0e0e7f48773cf97e9a0d9fd05c0f0e083
|
[
"Apache-2.0"
] | null | null | null |
gen_ts_frames_from_video.py
|
GuilinZ/conditional-lane-detection
|
7a98dcc0e0e7f48773cf97e9a0d9fd05c0f0e083
|
[
"Apache-2.0"
] | null | null | null |
import cv2
import numpy as np
import json
path = './freeway_drive.MP4'
cap = cv2.VideoCapture(path)
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
js_line = '{"lanes": [[-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, 410, 468, 483, 477, 460, 437, 412, 387, 356, 322, 288, 254, 220, 186, 152, 118, 84, 50, 16, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2], [-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, 546, 569, 577, 577, 574, 571, 560, 549, 538, 527, 516, 504, 493, 482, 471, 460, 448, 437, 426, 415, 404, 393, 381, 370, 359, 348, 337, 326, 314, 303, 292, 281, 270, 258, 247, 236, 225, 214, 203, 191, 180, 169, 158, 147, 136], [-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, 588, 641, 684, 705, 722, 740, 757, 769, 780, 792, 803, 815, 827, 838, 850, 861, 873, 885, 896, 908, 919, 931, 943, 954, 966, 977, 989, 1001, 1012, 1024, 1035, 1047, 1059, 1070, 1082, 1093, 1105, 1116, 1128, 1140, 1151, 1163, 1174, 1186, 1198], [-2, -2, -2, -2, -2, -2, -2, -2, -2, -2, 559, 676, 742, 799, 838, 877, 916, 955, 991, 1022, 1053, 1085, 1116, 1147, 1178, 1210, 1241, 1272, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2, -2]], "h_samples": [160, 170, 180, 190, 200, 210, 220, 230, 240, 250, 260, 270, 280, 290, 300, 310, 320, 330, 340, 350, 360, 370, 380, 390, 400, 410, 420, 430, 440, 450, 460, 470, 480, 490, 500, 510, 520, 530, 540, 550, 560, 570, 580, 590, 600, 610, 620, 630, 640, 650, 660, 670, 680, 690, 700, 710], '
file_line = '"raw_file": "clips/0530/1492627171538356342_0/20.jpg"}\n'
fp = open('./datasets/video_frames/test_label.json','w')
count = 0
for i in range(num_frames):
_,frame = cap.read()
if i % 100 == 0:
print(i)
# if i % 2 == 0:
# continue
if i < 600:
continue
if i > 1200:
break
# if frame is None:
# count += 1
# print('None here:', i)
if frame is not None:
frame = frame[:720, 320:1600, :]
# cv2.imwrite('./datasets/video_frames/clips/{:0>4d}.jpg'.format(i),frame)
# fp.write(js_line + '"raw_file": "clips/{:0>4d}.jpg"'.format(i)+'}\n')
cv2.imwrite('./datasets/video_frames/720_60fps_clips/{:0>4d}.jpg'.format(i),frame)
fp.write(js_line + '"raw_file": "720_60fps_clips/{:0>4d}.jpg"'.format(i)+'}\n')
print('Done')
| 67.571429
| 1,374
| 0.541649
| 437
| 2,365
| 2.87643
| 0.540046
| 0.14638
| 0.207637
| 0.260939
| 0.234686
| 0.188544
| 0.188544
| 0.172633
| 0.145585
| 0.145585
| 0
| 0.399678
| 0.211839
| 2,365
| 35
| 1,375
| 67.571429
| 0.274678
| 0.097252
| 0
| 0
| 0
| 0.043478
| 0.740602
| 0.076598
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0.086957
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
64fbc34b75a706e1799a524bc9146853351c8aa2
| 39,364
|
py
|
Python
|
msksidekick.py
|
delphix/msksidekick
|
d8e06200ce7a5e6777bfd5286f8df63cfee39f30
|
[
"Apache-2.0"
] | 4
|
2021-02-23T19:23:09.000Z
|
2021-08-17T12:56:00.000Z
|
msksidekick.py
|
delphix/msksidekick
|
d8e06200ce7a5e6777bfd5286f8df63cfee39f30
|
[
"Apache-2.0"
] | 3
|
2021-03-05T21:49:47.000Z
|
2021-04-29T20:59:03.000Z
|
msksidekick.py
|
delphix/msksidekick
|
d8e06200ce7a5e6777bfd5286f8df63cfee39f30
|
[
"Apache-2.0"
] | 1
|
2021-10-13T00:40:04.000Z
|
2021-10-13T00:40:04.000Z
|
# Author : Ajay Thotangare
# Created : 05/11/2020
# Purpose : Sidekick for masking server.It heps to
# 1) Load balance masking job
# 2) Sync job/env/global objects/engines
# 3) Backup / Recover metadata to/from filesystem
############################################################################
# Copyright and license:
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2020 by Delphix. All rights reserved.
#
# Description:
#
# Call this tool to run masking job,backup metadata, sync engines manually or via scheduler.
#
# ================================================================================
import collections
import os
import sys
import traceback
import click
import mskpkg.globals as globals
# import sqlite3
# import atexit
from mskpkg.DxLogging import print_debug
from mskpkg.banner import banner
from mskpkg.masking import masking
from mskpkg.virtualization import virtualization
from pathlib import Path
# atexit.register(print, "Program exited successfully!")
VERSION = "2.0.5-rc3"
# con = sqlite3.connect('msksidekick.db')
# cur = con.cursor()
# script_dir = os.path.dirname(os.path.realpath(__file__))
# script_dir = getattr(
# sys, "_MEIPASS", os.path.dirname(os.path.abspath(__file__))
# )
# script_dir = Path(__file__).resolve().parent
if getattr(sys, 'frozen', False):
# If the application is run as a bundle, the PyInstaller bootloader
# extends the sys module by a flag frozen=True and sets the app
# path into variable _MEIPASS'.
# script_dir = sys._MEIPASS
script_dir = os.path.dirname(sys.executable)
else:
script_dir = os.path.dirname(os.path.abspath(__file__))
output_dir = "{}/output".format(script_dir)
# print(script_dir)
# print(output_dir)
try:
# print("output_dir = {}".format(output_dir))
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
except Exception as e:
print("Unable to create {} directory in current folder".format(output_dir))
print(str(e))
raise
class Config(object):
def __init__(self):
self.verbose = False
self.debug = False
pass_config = click.make_pass_decorator(Config, ensure=True)
class OrderedGroup(click.Group):
def __init__(self, name=None, commands=None, **attrs):
super(OrderedGroup, self).__init__(name, commands, **attrs)
#: the registered subcommands by their exported names.
self.commands = commands or collections.OrderedDict()
def list_commands(self, ctx):
return self.commands
def print_banner():
bannertext = banner()
mybannero = bannertext.banner_sl_box_open(text=" ")
mybannera = bannertext.banner_sl_box_addline(
text="Masking Sidekick - {}".format(VERSION)
)
mybannerc = bannertext.banner_sl_box_close()
# print(mybannero)
print(" ")
print(mybannera)
print(mybannerc)
def print_debug_banner(txtmsg):
bannertext = banner()
mybannero = bannertext.banner_sl_box_open(text=" ")
mybannera = bannertext.banner_sl_box_addline(txtmsg)
mybannerc = bannertext.banner_sl_box_close()
print_debug(" ")
print_debug(mybannero)
print_debug(mybannera)
print_debug(mybannerc)
print_debug(" ")
def print_exception_exit1():
message_String = "\nERROR: Exit Code:1\n"
type_, value_, traceback_ = sys.exc_info()
whole_message = traceback.format_exception(type_, value_, traceback_)
res = []
for sub in whole_message:
if "raise Exception" not in sub:
sub = sub.strip()
res.append(sub.replace("\n", ""))
message_String = message_String + "\n".join(res[-2:])
print(message_String, file=sys.stderr)
sys.exit(1)
# Common Options
# @click.group()
@click.group(cls=OrderedGroup)
@click.option("--verbose", "-v", is_flag=True)
@click.option("--debug", "-d", is_flag=True)
@pass_config
def cli(config, verbose, debug):
if verbose:
config.verbose = verbose
if debug:
config.debug = debug
# gen-dxtoolsconf
@cli.command()
@pass_config
def version(config):
"""Script Version"""
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print_banner()
click.echo("Script Version : {}".format(VERSION))
# add_engine
@cli.command()
@click.option(
"--mskengname",
"-m",
default="",
prompt="Enter Masking Engine name",
help="Masking Engine name",
)
@click.option(
"--totalgb",
"-t",
default="",
prompt="Enter total memory in GB for masking engine",
help="Total memory in GB for masking engine",
)
@click.option(
"--systemgb",
"-s",
default="",
prompt="Enter system memory in GB for masking engine",
help="System memory in GB for masking engine",
)
@click.option(
"--poolname",
"-p",
default="Default",
prompt="Enter Pool Name for Engine",
help="Pool name to assign engine",
)
# @click.option('--enabled','-e', default='Y', prompt='Enable Masking Engine for pooling',
# type=click.Choice(['Y', 'N'], case_sensitive=True),
# help='Add Engine to Pool')
@pass_config
# def add_engine(config, mskengname, totalgb, systemgb, mskaiagntuser, enabled):
def add_engine(config, mskengname, totalgb, systemgb, poolname):
"""This module will add engine to pool"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
globals.arguments["--debug"] = config.debug
globals.arguments["--config"] = "./dxtools.conf"
mskai = masking(
config,
mskengname=mskengname,
totalgb=totalgb,
systemgb=systemgb,
poolname=poolname,
)
mskai.add_engine()
sys.exit(0)
# list_engine
@cli.command()
@pass_config
def list_engine(config):
"""This module will list engine from pool"""
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
try:
mskai = masking(config, noparam="noparam")
mskai.list_engine()
sys.exit(0)
except Exception as e:
print_exception_exit1()
# del_engine
@cli.command()
@click.option(
"--mskengname",
"-m",
default="",
prompt="Enter Masking Engine name",
help="Masking Engine name",
)
@pass_config
def del_engine(config, mskengname):
"""This module will remove engine from pool"""
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
click.echo("mskengname = {0}".format(mskengname))
mskai = masking(config, mskengname=mskengname)
mskai.del_engine()
sys.exit(0)
# pulljoblist
@cli.command()
@click.option(
"--mskengname",
"-m",
default="all",
prompt="Enter Masking Engine name",
help="Masking Engine name",
)
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking mskaiagnt username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@pass_config
def pull_joblist(config, mskengname, username, password, protocol):
"""This module will pull joblist from engine"""
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
click.echo("mskengname = {0}".format(mskengname))
click.echo("username = {0}".format(username))
click.echo("protocol = {0}".format(protocol))
print_banner()
mskai = masking(
config,
mskengname=mskengname,
username=username,
password=password,
protocol=protocol,
)
mskai.pull_joblist()
sys.exit(0)
# pull_currjoblist
@cli.command()
@click.option(
"--jobname", "-j", default="", help="Masking Job name from Masking Engine"
)
@click.option(
"--envname", "-e", default="mskenv", help="Environment Name of Masking Job"
)
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking mskaiagnt username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
default="mskenv",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@click.option("--poolname", default="Default", help="Pool name of engine")
@pass_config
def pull_currjoblist(
config, jobname, envname, username, password, protocol, poolname
):
"""This module will pull current job execution list from all engines"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print(" jobname = {0}".format(jobname))
print(" envname = {0}".format(envname))
print(" username = {0}".format(username))
print(" protocol = {0}".format(protocol))
try:
mskai = masking(
config,
jobname=jobname,
envname=envname,
username=username,
password=password,
protocol=protocol,
poolname=poolname,
)
mskai.pull_currjoblist()
sys.exit(0)
except Exception as e:
print_exception_exit1()
# gen-dxtoolsconf
@cli.command()
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@pass_config
def gen_dxtools_conf(config, protocol):
"""This module will generate dxtools conf file for engine"""
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print_banner()
mskai = masking(config, protocol=protocol)
mskai.gen_dxtools_conf()
sys.exit(0)
# syncjob
@cli.command()
@click.option(
"--srcmskengname",
default="",
prompt="Enter Source Masking Engine name",
help="Source Masking Engine name",
)
@click.option(
"--srcenvname",
default="",
prompt="Enter Source Masking Engine env name",
help="Source Masking Engine Environment name",
)
@click.option(
"--srcjobname",
default="",
prompt="Enter Source Masking Engine job name",
help="Source Masking Engine Job name",
)
@click.option(
"--tgtmskengname",
default="",
prompt="Enter Target Masking Engine name",
help="Target Masking Engine name",
)
@click.option(
"--tgtenvname",
default="",
prompt="Enter Target Masking Engine env name",
help="Target Masking Engine Environment name",
)
@click.option(
"--globalobjsync",
"-g",
default=False,
is_flag=True,
prompt="Sync global Objects",
help="Sync global Objects",
)
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking mskaiagnt username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@pass_config
def sync_job(
config,
srcmskengname,
srcenvname,
srcjobname,
tgtmskengname,
tgtenvname,
globalobjsync,
username,
password,
protocol,
):
"""This module will sync particular job between 2 engines"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print(" srcmskengname = {0}".format(srcmskengname))
print(" srcenvname = {0}".format(srcenvname))
print(" srcjobname = {0}".format(srcjobname))
print(" tgtmskengname = {0}".format(tgtmskengname))
print(" globalobjsync = {0}".format(globalobjsync))
print(" username = {0}".format(username))
print(" protocol = {0}".format(protocol))
try:
mskai = masking(
config,
srcmskengname=srcmskengname,
srcenvname=srcenvname,
srcjobname=srcjobname,
tgtmskengname=tgtmskengname,
tgtenvname=tgtenvname,
globalobjsync=globalobjsync,
username=username,
password=password,
protocol=protocol,
)
mskai.sync_job()
sys.exit(0)
except Exception as e:
print_exception_exit1()
# syncenv
@cli.command()
@click.option(
"--srcmskengname",
default="",
prompt="Enter Source Masking Engine name",
help="Source Masking Engine name",
)
@click.option(
"--srcenvname",
default="",
prompt="Enter Source Masking Engine env name",
help="Source Masking Engine Environment name",
)
@click.option(
"--tgtmskengname",
default="",
prompt="Enter Target Masking Engine name",
help="Target Masking Engine name",
)
@click.option(
"--tgtenvname",
default="",
prompt="Enter Target Masking Engine env name",
help="Target Masking Engine Environment name",
)
@click.option(
"--globalobjsync",
"-g",
default=False,
is_flag=True,
prompt="Sync global Objects",
help="Sync global Objects",
)
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking mskaiagnt username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@pass_config
def sync_env(
config,
srcmskengname,
srcenvname,
tgtmskengname,
tgtenvname,
globalobjsync,
username,
password,
protocol,
):
"""This module will sync particular env between 2 engines"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print(" srcmskengname = {0}".format(srcmskengname))
print(" srcenvname = {0}".format(srcenvname))
print(" tgtmskengname = {0}".format(tgtmskengname))
print(" tgtenvname = {0}".format(tgtenvname))
print(" globalobjsync = {0}".format(globalobjsync))
print(" username = {0}".format(username))
print(" protocol = {0}".format(protocol))
try:
mskai = masking(
config,
srcmskengname=srcmskengname,
srcenvname=srcenvname,
tgtmskengname=tgtmskengname,
tgtenvname=tgtenvname,
globalobjsync=globalobjsync,
username=username,
password=password,
protocol=protocol,
)
mskai.sync_env()
sys.exit(0)
except Exception as e:
print_exception_exit1()
# synceng
@cli.command()
@click.option(
"--srcmskengname",
default="",
prompt="Enter Source Masking Engine name",
help="Source Masking Engine name",
)
@click.option(
"--tgtmskengname",
default="",
prompt="Enter Target Masking Engine name",
help="Target Masking Engine name",
)
@click.option(
"--globalobjsync",
"-g",
default=True,
is_flag=True,
prompt="Sync global Objects",
help="Sync global Objects",
)
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking mskaiagnt username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@click.option(
"--delextra",
default=False,
is_flag=True,
help="Delete extra objects from target",
)
@click.option(
"--excludenonadmin",
default="Y",
type=click.Choice(["Y", "N"], case_sensitive=False),
help="Exclude to sync non admin users. Supported values Y|N",
)
@pass_config
def sync_eng(
config,
srcmskengname,
tgtmskengname,
globalobjsync,
username,
password,
protocol,
delextra,
excludenonadmin,
):
"""This module will complete sync 2 engines"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print(" srcmskengname = {0}".format(srcmskengname))
print(" tgtmskengname = {0}".format(tgtmskengname))
print(" globalobjsync = {0}".format(globalobjsync))
print(" username = {0}".format(username))
print(" protocol = {0}".format(protocol))
print(" delextra = {0}".format(delextra))
print(" excludenonadmin = {0}".format(excludenonadmin))
print(" ")
globalobjsync = True
try:
mskai = masking(
config,
srcmskengname=srcmskengname,
tgtmskengname=tgtmskengname,
globalobjsync=globalobjsync,
username=username,
password=password,
protocol=protocol,
delextra=delextra,
excludenonadmin=excludenonadmin,
)
mskai.sync_eng()
sys.exit(0)
except Exception as e:
print_exception_exit1()
# sync_globalobj
@cli.command()
@click.option(
"--srcmskengname",
default="",
prompt="Enter Source Masking Engine name",
help="Source Masking Engine name",
)
@click.option(
"--tgtmskengname",
default="",
prompt="Enter Target Masking Engine name",
help="Target Masking Engine name",
)
@click.option(
"--globalobjsync",
"-g",
default=False,
is_flag=True,
prompt="Sync global Objects",
help="Sync global Objects",
)
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking mskaiagnt username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@pass_config
def sync_globalobj(
config,
srcmskengname,
tgtmskengname,
globalobjsync,
username,
password,
protocol,
):
"""This module will sync global objects between 2 engines"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print(" srcmskengname = {0}".format(srcmskengname))
print(" tgtmskengname = {0}".format(tgtmskengname))
print(" globalobjsync = {0}".format(globalobjsync))
print(" username = {0}".format(username))
print(" protocol = {0}".format(protocol))
try:
mskai = masking(
config,
srcmskengname=srcmskengname,
tgtmskengname=tgtmskengname,
globalobjsync=globalobjsync,
username=username,
password=password,
protocol=protocol,
)
mskai.sync_globalobj()
sys.exit(0)
except Exception as e:
print_exception_exit1()
# cleanup-eng
@cli.command()
@click.option(
"--mskengname",
default="",
prompt="Enter Source Masking Engine name",
help="Source Masking Engine name",
)
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking mskaiagnt username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@click.option(
"--includeadmin",
default=False,
is_flag=True,
help="Include to delete admin users",
)
@pass_config
def cleanup_eng(
config, mskengname, username, password, protocol, includeadmin
):
"""This module will complete cleanup engine for fresh start"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print(" mskengname = {0}".format(mskengname))
print(" username = {0}".format(username))
print(" protocol = {0}".format(protocol))
print(" includeadmin = {0}".format(includeadmin))
try:
mskai = masking(
config,
mskengname=mskengname,
username=username,
password=password,
protocol=protocol,
includeadmin=includeadmin,
)
mskai.cleanup_eng()
sys.exit(0)
except Exception as e:
print_exception_exit1()
# runjob
@cli.command()
@click.option(
"--jobname",
"-j",
default="",
prompt="Enter Masking Job Name",
help="Masking Job name from Masking Engine",
)
@click.option(
"--envname",
"-e",
default="mskenv",
prompt="Enter Environment Name of Masking Job",
help="Environment Name of Masking Job",
)
@click.option(
"--run",
"-r",
default=False,
is_flag=True,
help="Execute Job. In Absence display only decision",
)
@click.option(
"--mock",
"-m",
default=False,
is_flag=True,
help="Mock run - just for demos",
)
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking mskaiagnt username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@click.option(
"--dxtoolkit_path",
default="",
prompt="Enter dxtoolkit path",
help="dxtoolkit full path",
)
@click.option(
"--poolname", "-p", default="Default", help="Pool name to assign engine"
)
@pass_config
def run_job(
config,
jobname,
envname,
run,
mock,
username,
password,
protocol,
dxtoolkit_path,
poolname,
):
"""This module will execute masking job on best candidate engine"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print(" jobname = {0}".format(jobname))
print(" envname = {0}".format(envname))
print(" run = {0}".format(run))
print(" mock = {0}".format(mock))
print(" username = {0}".format(username))
print(" protocol = {0}".format(protocol))
print(" dxtoolkit_path = {0}".format(dxtoolkit_path))
print(" poolname = {0}".format(poolname))
globals.arguments["--debug"] = config.debug
globals.arguments["--config"] = "./dxtools.conf"
globals.arguments["--all"] = True
globals.arguments["--engine"] = None
globals.arguments["--logdir"] = "./dx_skel.log"
globals.arguments["--parallel"] = None
globals.arguments["--poll"] = "10"
globals.arguments["--version"] = False
globals.arguments["--single_thread"] = True
globals.arguments["--dxtoolkit_path"] = dxtoolkit_path
try:
mskai = masking(
config,
jobname=jobname,
envname=envname,
run=run,
mock=mock,
username=username,
password=password,
protocol=protocol,
poolname=poolname,
)
if not mock:
mskai.pull_jobexeclist()
chk_status = mskai.chk_job_running()
# print("chk_status={}".format(chk_status))
if chk_status != 0:
# print(
# " Job {} on Env {} is already running on engine {}. Please retry later".format(
# jobname, envname, chk_status
# )
# )
# sys.exit(1)
raise Exception(
"ERROR: Job {} on Env {} is already running on engine {}. Please retry later".format(jobname, envname,
chk_status))
except Exception as e:
print_exception_exit1()
try:
print_debug(" ")
print_debug(" ")
print_debug(" ")
print_debug(" ")
print_debug_banner("Capture CPU usage data...")
scriptdir = os.path.dirname(os.path.abspath(__file__))
outputdir = os.path.join(scriptdir, "output")
print_debug("dxtoolkit_path: {}".format(dxtoolkit_path))
aive = virtualization(
config,
config_file_path="{}/dxtools.conf".format(scriptdir),
scriptdir=scriptdir,
outputdir=outputdir,
protocol=protocol,
dxtoolkit_path=dxtoolkit_path,
)
print_debug("dxtoolkit_path: {}".format(dxtoolkit_path))
aive.gen_cpu_file()
print_debug("Capture CPU usage data : done")
print_debug(" ")
print_debug(" ")
print_debug(" ")
print_debug(" ")
except Exception as e:
print_exception_exit1()
print_debug_banner("Execute Job run module...")
try:
mskai = masking(
config,
jobname=jobname,
envname=envname,
run=run,
mock=mock,
username=username,
password=password,
protocol=protocol,
poolname=poolname,
)
mskai.run_job()
except Exception as e:
print_exception_exit1()
sys.exit(0)
# test-connectors
@cli.command()
@click.option(
"--mskengname",
default="",
prompt="Enter Source Masking Engine name",
help="Source Masking Engine name",
)
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking mskaiagnt username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@pass_config
def test_connectors(config, mskengname, username, password, protocol):
"""This module will help to test all connectors"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print(" mskengname = {0}".format(mskengname))
print(" username = {0}".format(username))
print(" protocol = {0}".format(protocol))
try:
mskai = masking(
config,
mskengname=mskengname,
username=username,
password=password,
protocol=protocol,
)
mskai.test_all_connectors()
except Exception as e:
print_exception_exit1()
sys.exit(0)
# list_green_eng
@cli.command()
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking mskaiagnt username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--mock",
"-m",
default=False,
is_flag=True,
help="Mock run - just for demos",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@click.option(
"--dxtoolkit_path",
default="",
prompt="Enter dxtoolkit path",
help="dxtoolkit full path",
)
@pass_config
def list_eng_usage(config, username, password, protocol, mock, dxtoolkit_path):
"""This module will find green engines"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print(" mock = {0}".format(mock))
print(" username = {0}".format(username))
print(" protocol = {0}".format(protocol))
print(" dxtoolkit_path = {0}".format(dxtoolkit_path))
globals.arguments["--debug"] = config.debug
globals.arguments["--config"] = "{}/dxtools.conf".format(script_dir)
globals.arguments["--all"] = True
globals.arguments["--engine"] = None
globals.arguments["--logdir"] = "{}/dx_skel.log".format(output_dir)
globals.arguments["--parallel"] = None
globals.arguments["--poll"] = "10"
globals.arguments["--version"] = False
globals.arguments["--single_thread"] = True
globals.arguments["--dxtoolkit_path"] = dxtoolkit_path
try:
mskai = masking(
config,
mock=mock,
username=username,
password=password,
protocol=protocol,
)
if not mock:
mskai.pull_jobexeclist()
except Exception as e:
print_exception_exit1()
try:
print_debug(" ")
print_debug("Capture CPU usage data...")
scriptdir = os.path.dirname(os.path.abspath(__file__))
outputdir = os.path.join(scriptdir, "output")
print_debug("dxtoolkit_path: {}".format(dxtoolkit_path))
aive = virtualization(
config,
config_file_path="{}/dxtools.conf".format(script_dir),
scriptdir=scriptdir,
outputdir=outputdir,
protocol=protocol,
dxtoolkit_path=dxtoolkit_path,
)
print_debug("dxtoolkit_path: {}".format(dxtoolkit_path))
aive.gen_cpu_file()
print_debug("Capture CPU usage data : done")
print_debug(" ")
except Exception as e:
print("Error in VE module")
# sys.exit(1)
# raise Exception("ERROR: Error in VE module")
print_exception_exit1()
try:
mskai = masking(
config,
mock=mock,
username=username,
password=password,
protocol=protocol,
)
mskai.list_eng_usage()
except Exception as e:
print_exception_exit1()
sys.exit(0)
# offline_backup_eng
@cli.command()
@click.option(
"--mskengname",
default="",
prompt="Enter Masking Engine name",
help="Masking Engine name",
)
@click.option(
"--backup_dir", default="", prompt="Enter Backup Path", help="Backup Path"
)
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking mskaiagnt username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@pass_config
def offline_backup_eng(
config, mskengname, username, password, protocol, backup_dir
):
"""This module will offline backup engine"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print(" mskengname = {0}".format(mskengname))
print(" username = {0}".format(username))
print(" protocol = {0}".format(protocol))
print(" backup_dir = {0}".format(backup_dir))
try:
mskai = masking(
config,
mskengname=mskengname,
username=username,
password=password,
protocol=protocol,
backup_dir=backup_dir,
)
mskai.offline_backup_eng()
sys.exit(0)
except Exception as e:
print_exception_exit1()
sys.exit(0)
# offline_restore_eng
@cli.command()
@click.option(
"--mskengname",
default="",
prompt="Enter Masking Engine name",
help="Masking Engine name",
)
@click.option(
"--backup_dir", default="", prompt="Enter Backup Path", help="Backup Path"
)
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking mskaiagnt username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@pass_config
def offline_restore_eng(
config, mskengname, username, password, protocol, backup_dir
):
"""This module will offline restore engine from backups"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print(" mskengname = {0}".format(mskengname))
print(" username = {0}".format(username))
print(" protocol = {0}".format(protocol))
print(" backup_dir = {0}".format(backup_dir))
try:
mskai = masking(
config,
mskengname=mskengname,
username=username,
password=password,
protocol=protocol,
backup_dir=backup_dir,
)
mskai.offline_restore_eng()
except Exception as e:
print_exception_exit1()
sys.exit(0)
# offline_restore_env
@cli.command()
@click.option(
"--mskengname",
default="",
prompt="Enter Masking Engine name",
help="Masking Engine name",
)
@click.option(
"--backup_dir", default="", prompt="Enter Backup Path", help="Backup Path"
)
@click.option(
"--envname",
"-e",
default="mskenv",
prompt="Enter name of Environment to be restored",
help="Name of Environment to be restored",
)
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking sidekick username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@pass_config
def offline_restore_env(
config, mskengname, envname, username, password, protocol, backup_dir
):
"""This module will offline restore engine from backups"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print(" mskengname = {0}".format(mskengname))
print(" envname = {0}".format(envname))
print(" username = {0}".format(username))
print(" protocol = {0}".format(protocol))
print(" backup_dir = {0}".format(backup_dir))
try:
mskai = masking(
config,
mskengname=mskengname,
envname=envname,
username=username,
password=password,
protocol=protocol,
backup_dir=backup_dir,
)
mskai.offline_restore_env()
except Exception as e:
print_exception_exit1()
sys.exit(0)
# duplicate_connectors
@cli.command()
@click.option(
"--mskengname",
default="",
prompt="Enter Masking Engine name",
help="Masking Engine name",
)
@click.option(
"--username",
"-u",
prompt="Enter Masking username",
help="Masking mskaiagnt username to connect masking engines",
)
@click.password_option(
"--password",
"-p",
help="Masking mskaiagnt password to connect masking engines",
)
@click.option(
"--protocol",
default="https",
help="Enter protocol http|https to access Masking Engines",
)
@click.option(
"--action",
type=click.Choice(['list', 'resolve']),
default="list",
help="List Connector | Rename conflicting connector names ( All conflicting connector names will be renamed )",
)
@pass_config
def duplicate_connectors(
config, mskengname, username, password, protocol, action
):
"""This module will offline backup engine"""
print_banner()
globals.initialize(config.debug, config.verbose, script_dir)
if config.verbose or config.debug:
click.echo("Verbose mode enabled")
print(" mskengname = {0}".format(mskengname))
print(" username = {0}".format(username))
print(" protocol = {0}".format(protocol))
print(" action = {0}".format(action))
print(" ")
try:
mskai = masking(
config,
mskengname=mskengname,
username=username,
password=password,
protocol=protocol,
action=action,
)
mskai.duplicate_connectors()
sys.exit(0)
except Exception as e:
print_exception_exit1()
sys.exit(0)
if __name__ == "__main__":
cli()
| 28.339813
| 119
| 0.59684
| 4,071
| 39,364
| 5.677475
| 0.09408
| 0.038074
| 0.025008
| 0.027863
| 0.774759
| 0.759962
| 0.736815
| 0.705447
| 0.685761
| 0.681564
| 0
| 0.005202
| 0.277284
| 39,364
| 1,388
| 120
| 28.360231
| 0.807234
| 0.087669
| 0
| 0.739389
| 0
| 0
| 0.255894
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022071
| false
| 0.078947
| 0.009338
| 0.000849
| 0.033956
| 0.117148
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
8f444b8fb7f3fbcb198108c584a5bfbfc257030d
| 98
|
py
|
Python
|
src/internals/errors/invalid_transition_error.py
|
mabel-dev/salmon
|
90866d16b08925206d1f8d9b3c8290fcd35f5322
|
[
"MIT"
] | null | null | null |
src/internals/errors/invalid_transition_error.py
|
mabel-dev/salmon
|
90866d16b08925206d1f8d9b3c8290fcd35f5322
|
[
"MIT"
] | null | null | null |
src/internals/errors/invalid_transition_error.py
|
mabel-dev/salmon
|
90866d16b08925206d1f8d9b3c8290fcd35f5322
|
[
"MIT"
] | null | null | null |
from .base_exception import BaseException
class InvalidTransitionError(BaseException):
pass
| 16.333333
| 44
| 0.826531
| 9
| 98
| 8.888889
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132653
| 98
| 5
| 45
| 19.6
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
56bfef75d9729bec14e64b1cdd8d2232c8793f8e
| 82
|
py
|
Python
|
dashboard/__init__.py
|
mueller-stephan/car-classification
|
6b620091e071535ea112a6e19c1d15de21dadbd1
|
[
"MIT"
] | 2
|
2020-03-21T19:26:39.000Z
|
2021-06-14T18:45:10.000Z
|
dashboard/__init__.py
|
fabianmax/car-classification
|
2c32a57b0404c2da6c341a6f496c9912fbd19b50
|
[
"MIT"
] | null | null | null |
dashboard/__init__.py
|
fabianmax/car-classification
|
2c32a57b0404c2da6c341a6f496c9912fbd19b50
|
[
"MIT"
] | 5
|
2020-02-26T13:35:23.000Z
|
2022-03-11T20:05:34.000Z
|
from .app import app, game_data, server
__all__ = ['app', 'game_data', 'server']
| 20.5
| 40
| 0.682927
| 12
| 82
| 4.166667
| 0.583333
| 0.28
| 0.44
| 0.68
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 82
| 3
| 41
| 27.333333
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0.219512
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
56eff7be9ed41b8822eeeedde9c72911867ef3c8
| 70,941
|
py
|
Python
|
game/optimize_dirty_rects.py
|
mouton5000/DiscreteEventApplicationEditor
|
4a4272fd9b0a7f3f228fee1e9e7b351e4a21cd33
|
[
"MIT"
] | null | null | null |
game/optimize_dirty_rects.py
|
mouton5000/DiscreteEventApplicationEditor
|
4a4272fd9b0a7f3f228fee1e9e7b351e4a21cd33
|
[
"MIT"
] | null | null | null |
game/optimize_dirty_rects.py
|
mouton5000/DiscreteEventApplicationEditor
|
4a4272fd9b0a7f3f228fee1e9e7b351e4a21cd33
|
[
"MIT"
] | null | null | null |
# coding: ascii
# optimize_dirty_rects
"""Optimize a list of dirty rects so that there are no overlapping rects
File version: 1.1
Minimum Python version: 2.4
Inspirations:
"It turns out, that there are quite many combinations...
To represent all cases we use a table."
Detect Overlapping Subrectangles by Herbert Glarner
http://gandraxa.com/detect_overlapping_subrectangles.xml
A way to process rectangle collisions is to categorize them
based on edge inclusion.
"Use wide, shallow graphic elements in preference to tall,
narrow ones. (There are more raster lines, and therefore more
pointer arithmetic, in tall graphics.)"
Fast Blit Strategies: A Mac Programmer's Guide by Kas Thomas
http://www.mactech.com/articles/mactech/Vol.15/15.06/FastBlitStrategies/index.html
The optimized rectangles will tend to be wide and shallow.
Return: new list
The MIT License (MIT)
Copyright (c) 2013 Jason Marshall
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from pygame import Rect
from bisect import bisect_left, insort
from collections import deque
#####import os
import sys
PYTHON_VERSION = sys.version_info[0]
#####DEBUG = int(os.environ.get('DEBUG', '1'))
#####if DEBUG:
##### debug_output = sys.stderr.write
##### debug = lambda x: debug_output(x + '\n')
#####else:
##### debug = lambda x: None
try:
Inf = float('inf')
except ValueError:
#####debug('using fallback infinity')
import decimal
Inf = decimal.Decimal('Infinity')
negInf = -Inf
def optimize_dirty_rects(dirty_rects):
"""remove overlapping areas from a list of dirty rectangles"""
#####debug("INPUT: " + str(dirty_rects))
# Put good rects in a queue
queue = deque()
append_to_queue = queue.append
for r in dirty_rects:
if r:
r.normalize()
append_to_queue(r)
# If there aren't multiple rectangles in the input queue, then more
# optimization is not possible.
if len(queue) <= 1:
#####debug('shortcut 0\n')
return list(queue)
# Get the first rect
append_to_queue = queue.appendleft
pop_from_queue = queue.popleft
r = pop_from_queue()
# Determine the maximum possible size of all the rects combined
extent_of_all_r = r.unionall(queue)
# If any input rectangle covers the maximum possible size of all the rects
# combined, then that rectangle is optimal.
if r == extent_of_all_r:
#####debug('shortcut 1\n')
return [r]
for r2 in queue:
if r2 == extent_of_all_r:
#####debug('shortcut 2\n')
return [r2]
# Create lists that will always be sorted by left edge position,
# right edge position, top edge position and bottom edge position.
# Rects are referenced by unique IDs for compatibility with set.
# These unique IDs are mapped to rects by r_dict.
r_id = id(r)
r_dict = {r_id: r}
edges_l = [(r.left, r_id)]
edges_r = [(r.right, r_id)]
edges_t = [(r.top, r_id)]
edges_b = [(r.bottom, r_id)]
while queue:
r = pop_from_queue()
r_id = id(r)
### Use bisect_left and set to get potential collisions ###
# Get rect IDs with left edges <= r's right edge
collisions = set(_get_rects_left_of_right_edge_inclusive(r, edges_l))
collisions_update = collisions.intersection_update
# Get rect IDs with right edges >= r's left edge
collisions_update(_get_rects_right_of_left_edge_inclusive(r, edges_r))
# Get rect IDs with top edges < rect's bottom edge
collisions_update(_get_rects_above_bottom_edge_exclusive(r, edges_t))
# Get rect IDs with bottom edges > rect's top edge
collisions_update(_get_rects_below_top_edge_exclusive(r, edges_b))
# Handle collisions with existing optimized rects
while collisions:
r2_id = collisions.pop()
r2 = r_dict[r2_id]
if r.top == r2.top:
if r.bottom == r2.bottom:
if r.left == r2.left:
if r.right <= r2.right:
#####debug('p001|p002')
# 1: RBTLRBTL: r is not outside of r2; forget r
# ..... .....
# .+++. .---.
# .+++. -> .---.
# .+++. .---.
# ..... .....
# 2: RBTL-BTL:
# ..... .....
# .++-. .---.
# .++-. -> .---.
# .++-. .---.
# ..... .....
break
else: #r.right > r2.right
#####debug('p003')
# 3: -BTLRBTL: r2 is not outside of r; delete r2
# ..... .....
# .++|. .|||.
# .++|. -> .|||.
# .++|. .|||.
# ..... .....
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
#r.top == r2.top:
#r.bottom == r2.bottom:
elif r.left < r2.left:
if r.right >= r2.right:
#####debug('p004|p005')
# 4: RBT-RBTL: r2 is not outside of r; delete r2
# ..... .....
# .|++. .|||.
# .|++. -> .|||.
# .|++. .|||.
# ..... .....
# 5: -BT-RBTL:
# ..... .....
# .|+|. .|||.
# .|+|. -> .|||.
# .|+|. .|||.
# ..... .....
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
else: #r.right < r2.right:
#####debug('p006|p007')
# 6: RBT--BTL: expand r to encompass r2; delete r2
# ..... .....
# .|+-. .|||.
# .|+-. -> .|||.
# .|+-. .|||.
# ..... .....
# 7: -BT--BT-a:
# ..... .....
# .|--. .|||.
# .|--. -> .|||.
# .|--. .|||.
# ..... .....
r.union_ip(r2)
if r == extent_of_all_r:
#####debug('shortcut 2a\n')
return [r]
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
#r.top == r2.top:
#r.bottom == r2.bottom:
else: #r.left > r2.left
if r.right <= r2.right:
#####debug('p008|p009')
# 8: RBTLRBT-: r is not outside of r2; forget r
# ..... .....
# .-++. .---.
# .-++. -> .---.
# .-++. .---.
# ..... .....
# 9: RBTL-BT-:
# ..... .....
# .-+-. .---.
# .-+-. -> .---.
# .-+-. .---.
# ..... .....
break
else: #r.right > r2.right
#####debug('p010|p011')
# 10: -BTLRBT-: expand r to encompass r2; delete r2
# ..... .....
# .-+|. .|||.
# .-+|. -> .|||.
# .-+|. .|||.
# ..... .....
# 11: -BT--BT-b:
# ..... .....
# .--|. .|||.
# .--|. -> .|||.
# .--|. .|||.
# ..... .....
r.union_ip(r2)
if r == extent_of_all_r:
#####debug('shortcut 2b\n')
return [r]
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
#r.top == r2.top:
elif r.bottom < r2.bottom:
if r.left == r2.left:
if r.right <= r2.right:
#####debug('p012|p013')
# 12: RBTLR-TL: r is not outside of r2; forget r
# ..... .....
# .+++. .---.
# .---. -> .---.
# .---. .---.
# ..... .....
# 13: RBTL--TL:
# ..... .....
# .++-. .---.
# .++-. -> .---.
# .---. .---.
# ..... .....
break
else: #r.right > r2.right
#####debug('p014')
# 14: -BTLR-TL:
# Crop r2 to the part that is below r.
# Update r2's location in edges_t.
# ..... .....
# .+||. .|||.
# .-... -> .-...
# .-... .-...
# ..... .....
_remove_r_from_edges_t(r2, r2_id, edges_t)
r2.height -= r.height
r2.top = r.bottom
insort(edges_t, (r2.top, r2_id))
#r.top == r2.top:
#r.bottom < r2.bottom:
elif r.left < r2.left:
if r.right >= r2.right:
#####debug('p015|p018')
# 15: RBT-R-TL:
# Crop r2 to the part that is below r.
# Update r2's location in edges_t.
# ..... .....
# .|++. .|||.
# .|++. -> .|||.
# ..--. ..--.
# ..... .....
# 18: -BT-R-TL:
# Crop r2 to the part that is below r.
# Update r2's location in edges_t.
# ..... .....
# .|+|. .|||.
# .|+|. -> .|||.
# ..-.. ..-..
# ..... .....
_remove_r_from_edges_t(r2, r2_id, edges_t)
r2.height -= r.height
r2.top = r.bottom
insort(edges_t, (r2.top, r2_id))
else: #r.right < r2.right
#####debug('p016|p017')
# 16: RBT---TL:
# Crop r2 to the part that is below r.
# Update r2's location in edges_t.
# Expand r to reach r2's right edge.
# ..... .....
# .|+-. .|||.
# .|+-. -> .|||.
# ..--. ..--.
# ..... .....
# 17: -BT---T-a:
# ..... .....
# .|--. .|||.
# .|--. -> .|||.
# ..--. ..--.
# ..... .....
_remove_r_from_edges_t(r2, r2_id, edges_t)
r2.height -= r.height
r2.top = r.bottom
insort(edges_t, (r2.top, r2_id))
r.width = r2.right - r.left
#r.top == r2.top:
#r.bottom < r2.bottom:
else: #r.left > r2.left
if r.right <= r2.right:
#####debug('p019|p020')
# 19: RBTLR-T-: r is not outside of r2; forget r
# ..... .....
# .-++. .---.
# .-++. -> .---.
# .---. .---.
# ..... .....
# 20: RBTL--T-:
# ..... .....
# .-+-. .---.
# .-+-. -> .---.
# .---. .---.
# ..... .....
break
else: #r.right > r2.right
#####debug('p021|p022')
# 21: -BTLR-T-:
# Crop r2 to the part that is below r.
# Update r2's location in edges_t.
# Expand r to reach r2's left edge.
# ..... .....
# .-+|. .|||.
# .-+|. -> .|||.
# .--.. .--..
# ..... .....
# 22: -BT---T-b:
# ..... .....
# .--|. .|||.
# .--|. -> .|||.
# .--.. .--..
# ..... .....
_remove_r_from_edges_t(r2, r2_id, edges_t)
r2.height -= r.height
r2.top = r.bottom
insort(edges_t, (r2.top, r2_id))
r.width = r.right - r2.left
r.left = r2.left
#r.top == r2.top:
else: #r.bottom > r2.bottom
if r.left == r2.left:
if r.right >= r2.right:
#####debug('p023|p025')
# 23: R-TLRBTL: r2 is not outside of r; delete r2
# ..... .....
# .+++. .|||.
# .+++. -> .|||.
# .|||. .|||.
# ..... .....
# 25: --TLRBTL:
# ..... .....
# .++|. .|||.
# .++|. -> .|||.
# .|||. .|||.
# ..... .....
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
else: #r.right < r2.right:
#####debug('p024')
# 24: R-TL-BTL: crop r to the part that is below r2
# ..... .....
# .++-. .---.
# .++-. -> .---.
# .||.. .||..
# ..... .....
r.height -= r2.height
r.top = r2.bottom
#r.top == r2.top:
#r.bottom > r2.bottom
elif r.left < r2.left:
if r.right >= r2.right:
#####debug('p026|p029')
# 26: R-T-RBTL: r2 is not outside of r; delete r2
# ..... .....
# .|++. .|||.
# .|++. .|||.
# .|||. .|||.
# ..... .....
# 29: --T-RBTL:
# ..... .....
# .|+|. .|||.
# .|+|. .|||.
# .|||. .|||.
# ..... .....
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
else: #r.right < r2.right
#####debug('p027|p028')
# 27: R-T--BTL:
# Split r into r & r3 at the bottom of r2.
# Expand r to encompass r2; delete r2.
# Enqueue the on-bottom parts. Remove r_id
# numbers from the collisions set if their
# rects collide with only r3 or are beside
# only r3.
# ..... .....
# .|+-. .|||.
# .|+-. -> .|||.
# .||.. .==..
# ..... .....
# 28: --T--BT-b:
# ..... .....
# .||-. .|||.
# .||-. -> .|||.
# .||.. .==..
# ..... .....
r3 = Rect(r.left, r2.bottom, r.width, r.height - r2.height)
append_to_queue(r3)
r.height = r2.height
r.union_ip(r2)
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
collisions_update(_get_rects_above_bottom_edge_exclusive(r, edges_t))
#r.top == r2.top:
#r.bottom > r2.bottom
else: #r.left > r2.left
if r.right <= r2.right:
#####debug('p030|p031')
# 30: R-TLRBT-: crop r to the part that is below r2
# ..... .....
# .-++. .---.
# .-++. -> .---.
# ..||. ..||.
# ..... .....
# 31: R-TL-BT-:
# ..... .....
# .-+-. .---.
# .-+-. -> .---.
# ..|.. ..|..
# ..... .....
r.height -= r2.height
r.top = r2.bottom
else: #r.right > r2.right
#####debug('p032|p033')
# 32: --TLRBT-
# Split r into r & r3 at the bottom of r2.
# Expand r to encompass r2; delete r2.
# Enqueue the on-bottom parts. Remove r_id
# numbers from the collisions set if their
# rects collide with only r3 or are beside
# only r3.
# ..... .....
# .-+|. .|||.
# .-+|. -> .|||.
# ..||. ..==.
# ..... .....
# 33: --T--BT-a:
# ..... .....
# .-||. .|||.
# .-||. -> .|||.
# ..||. ..==.
# ..... .....
r3 = Rect(r.left, r2.bottom, r.width, r.height - r2.height)
append_to_queue(r3)
r.height = r2.height
r.union_ip(r2)
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
collisions_update(_get_rects_above_bottom_edge_exclusive(r, edges_t))
elif r.top < r2.top:
if r.bottom == r2.bottom:
if r.left == r2.left:
if r.right >= r2.right:
#####debug('p034|p036')
# 34: RB-LRBTL: r2 is not outside of r; delete r2
# ..... .....
# .|||. .|||.
# .+++. -> .|||.
# .+++. .|||.
# ..... .....
# 36: -B-LRBTL:
# ..... .....
# .|||. .|||.
# .++|. -> .|||.
# .++|. .|||.
# ..... .....
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
else: # r.right < r2.right:
#####debug('p035')
# 35: RB-L-BTL: crop r to the part that is above r2
# ..... .....
# .||.. .||..
# .++-. -> .---.
# .++-. .---.
# ..... .....
r.height -= r2.height
#r.top < r2.top:
#r.bottom == r2.bottom:
elif r.left < r2.left:
if r.right >= r2.right:
#####debug('p037|p040')
# 37: RB--RBTL: r2 is not outside of r; delete r2
# ..... .....
# .|||. .|||.
# .|++. -> .|||.
# .|++. .|||.
# ..... .....
# 40: -B--RBTL:
# ..... .....
# .|||. .|||.
# .|+|. -> .|||.
# .|+|. .|||.
# ..... .....
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
else: # r.right < r2.right:
#####debug('p038|p039')
# 38: RB---BTL:
# Split r into r & r3 at the top of r2.
# Expand r to encompass r2; delete r2.
# Enqueue the on-top parts. Remove r_id
# numbers from the collisions set if their
# rects collide with only r3 or are beside
# only r3.
# ..... .....
# .||.. .==..
# .|+-. -> .|||.
# .|+-. .|||.
# ..... .....
# 39: -B---BT-b:
# ..... .....
# .||.. .==..
# .||-. -> .|||.
# .||-. .|||.
# ..... .....
r3 = Rect(r.left, r.top, r.width, r.height - r2.height)
append_to_queue(r3)
r.height = r2.height
r.top = r2.top
r.union_ip(r2)
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
collisions_update(_get_rects_below_top_edge_exclusive(r, edges_b))
#r.top < r2.top:
#r.bottom == r2.bottom:
else: #r.left > r2.left
if r.right <= r2.right:
#####debug('p041|p042')
# 41: RB-LRBT-: crop r to the part that is above r2
# ..... .....
# ..||. ..||.
# .-++. -> .---.
# .-++. .---.
# ..... .....
# 42: RB-L-BT-:
# ..... .....
# ..|.. ..|..
# .-+-. -> .---.
# .-+-. .---.
# ..... .....
r.height -= r2.height
else: #r.right > r2.right
#####debug('p043|p044')
# 43: -B-LRBT-:
# Split r into r & r3 at the top of r2.
# Expand r to encompass r2; delete r2.
# Enqueue the on-top parts. Remove r_id
# numbers from the collisions set if their
# rects collide with only r3 or are beside
# only r3.
# ..... .....
# ..||. ..==.
# .-+|. -> .|||.
# .-+|. .|||.
# ..... .....
# 44: -B---BT-a:
# ..... .....
# ..||. ..==.
# .-||. -> .|||.
# .-||. .|||.
# ..... .....
r3 = Rect(r.left, r.top, r.width, r.height - r2.height)
append_to_queue(r3)
r.height = r2.height
r.top = r2.top
r.union_ip(r2)
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
collisions_update(_get_rects_below_top_edge_exclusive(r, edges_b))
#r.top < r2.top:
elif r.bottom < r2.bottom:
if r.left == r2.left:
if r.right == r2.right:
#####debug('p045')
# 45: RB-LR-TL: expand r to encompass r2; delete r2
# ..... .....
# .|||. .|||.
# .+++. -> .|||.
# .---. .|||.
# ..... .....
r.union_ip(r2)
if r == extent_of_all_r:
#####debug('shortcut 2b\n')
return [r]
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
elif r.right < r2.right:
#####debug('p046')
# 46: RB-L--TL: crop r to the part that is above r2
# ..... .....
# .||.. .||..
# .++-. -> .---.
# .---. .---.
# ..... .....
r.height = r2.top - r.top
else: #r.right > r2.right
#####debug('p047')
# 47: -B-LR-TL:
# Crop r2 to the part that is below r.
# Update r2's location in edges_t.
# ..... .....
# .|||. .|||.
# .++|. -> .|||.
# .--.. .--..
# ..... .....
_remove_r_from_edges_t(r2, r2_id, edges_t)
r2.height = r2.bottom - r.bottom
r2.top = r.bottom
insort(edges_t, (r2.top, r2_id))
#r.top < r2.top:
#r.bottom < r2.bottom:
elif r.left < r2.left:
if r.right == r2.right:
#####debug('p048')
# 48: RB--R-TL:
# Crop r2 to the part that is below r.
# Update r2's location in edges_t.
# ..... .....
# .|||. .|||.
# .|++. -> .|||.
# ..--. ..--.
# ..... .....
_remove_r_from_edges_t(r2, r2_id, edges_t)
r2.height = r2.bottom - r.bottom
r2.top = r.bottom
insort(edges_t, (r2.top, r2_id))
elif r.right < r2.right:
#####debug('p049|p050')
# 49: RB----TL:
# Create r3 to cover the area of r that is in
# line with r2 and the area of r2 that is in
# line with r. Enqueue r3. Crop r to the part
# that is above r3. Crop r2 to the part that
# is below r3. Remove r_id numbers from the
# collisions set if their rects no longer either
# collide with r or are beside r. Update r2's
# position in edges_t.
# ..... .....
# .||.. .||..
# .|+-. -> .===.
# ..--. ..--.
# ..... .....
# 50: -B----T-a:
# ..... .....
# .||.. .||..
# .||-. -> .===.
# ...-. ...-.
# ..... .....
r3 = Rect(r.left, r2.top, r2.right - r.left, r.bottom - r2.top)
append_to_queue(r3)
r.height -= r3.height
_remove_r_from_edges_t(r2, r2_id, edges_t)
r2.height -= r3.height
r2.top = r3.bottom
collisions_update(_get_rects_above_bottom_edge_exclusive(r, edges_t))
insort(edges_t, (r2.top, r2_id))
else: #r.right > r2.right
#####debug('p051')
# 51: -B--R-TL:
# Crop r2 to the part that is below r.
# Update r2's location in edges_t.
# ..... .....
# .|||. .|||.
# .|+|. -> .|||.
# ..-.. ..-..
# ..... .....
_remove_r_from_edges_t(r2, r2_id, edges_t)
r2.height = r2.bottom - r.bottom
r2.top = r.bottom
insort(edges_t, (r2.top, r2_id))
#r.top < r2.top:
#r.bottom < r2.bottom:
else: #r.left > r2.left
if r.right <= r2.right:
#####debug('p052|p053')
# 52: RB-LR-T-: crop r to the part that is above r2
# ..... .....
# ..||. ..||.
# .-++. -> .---.
# .---. .---.
# ..... .....
# 52: RB-L--T-:
# ..... .....
# ..|.. ..|..
# .-+-. -> .---.
# .---. .---.
# ..... .....
r.height = r2.top - r.top
else: #r.right > r2.right
#####debug('p054|p055')
# 54: -B-LR-T-:
# Create r3 to cover the area of r that is in
# line with r2 and the area of r2 that is in
# line with r. Enqueue r3. Crop r to the part
# that is above r3. Crop r2 to the part that
# is below r3. Remove r_id numbers from the
# collisions set if their rects no longer either
# collide with r or are beside r. Update r2's
# position in edges_t.
# ..... .....
# ..||. ..||.
# .-+|. -> .===.
# .--.. .--..
# ..... .....
# 55: -B----T-b:
# ..... .....
# ..||. ..||.
# .-||. -> .===.
# .-... .-...
# ..... .....
r3 = Rect(r2.left, r2.top, r.right - r2.left, r.bottom - r2.top)
append_to_queue(r3)
r.height -= r3.height
_remove_r_from_edges_t(r2, r2_id, edges_t)
r2.height -= r3.height
r2.top = r3.bottom
collisions_update(_get_rects_above_bottom_edge_exclusive(r, edges_t))
insort(edges_t, (r2.top, r2_id))
#r.top < r2.top:
else: #r.bottom > r2.bottom
if r.left == r2.left:
if r.right >= r2.right:
#####debug('p056|p058')
# 56: R--LRBTL: r2 is not outside of r; delete r2
# ..... .....
# .|||. .|||.
# .+++. -> .|||.
# .|||. .|||.
# ..... .....
# 58: ---LRBTL:
# ..... .....
# .|||. .|||.
# .++|. -> .|||.
# .|||. .|||.
# ..... .....
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
else: #r.right < r2.right:
#####debug('p057')
# 57: R--L-BTL:
# Separate r into on-top & on-bottom parts.
# Lose the middle; it is not outside of r2.
# Enqueue the on-bottom parts. Remove r_id
# numbers from the collisions set if their
# rects no longer either collide with r1 or
# are beside r1.
# ..... .....
# .||.. .||..
# .++-. -> .---.
# .||.. .==..
# ..... .....
r3 = Rect(r.left, r2.bottom, r.width, r.bottom - r2.bottom)
append_to_queue(r3)
r.height = r2.top - r.top
collisions_update(_get_rects_above_bottom_edge_exclusive(r, edges_t))
#r.top < r2.top:
#r.bottom > r2.bottom
elif r.left < r2.left:
if r.right >= r2.right:
#####debug('p059|p061')
# 59: R---RBTL: r2 is not outside of r; delete r2
# ..... .....
# .|||. .|||.
# .|++. -> .|||.
# .|||. .|||.
# ..... .....
# 61: ----RBTL:
# ..... .....
# .|||. .|||.
# .|+|. -> .|||.
# .|||. .|||.
# ..... .....
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
else: #r.right < r2.right:
#####debug('p060|p066')
# 60: R----BTL:
# Separate r into on-top & on-bottom parts.
# Lose the middle. Extend r2 and return it
# to the queue. Enqueue the on-bottom parts.
# Remove r_id numbers from the collisions
# set if their rects no longer either
# collide with r1 or are beside r1.
# ..... .....
# .||.. .||..
# .|+-. -> .---.
# .||.. .==..
# ..... .....
# 66: -----BT-b:
# ..... .....
# .|... .|...
# .|--. -> .---.
# .|... .=...
# ..... .....
r3 = Rect(r.left, r2.bottom, r.width, r.bottom - r2.bottom)
append_to_queue(r3)
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
r2.width = r2.right - r.left
r2.left = r.left
append_to_queue(r2)
r.height = r2.top - r.top
collisions_update(_get_rects_above_bottom_edge_exclusive(r, edges_t))
#r.top < r2.top:
#r.bottom > r2.bottom
else: #r.left > r2.left
if r.right <= r2.right:
#####debug('p062|p063')
# 62: R--LRBT-:
# Separate r into on-top & on-bottom parts.
# Lose the middle; it is not outside of r2.
# Enqueue the on-bottom parts. Remove r_id
# numbers from the collisions set if their
# rects no longer either collide with r1 or
# are beside r1.
# ..... .....
# ..||. ..||.
# .-++. -> .---.
# ..||. ..==.
# ..... .....
# 63: R--L-BT-:
# ..... .....
# ..|.. ..|..
# .-+-. -> .---.
# ..|.. ..=..
# ..... .....
r3 = Rect(r.left, r2.bottom, r.width, r.bottom - r2.bottom)
append_to_queue(r3)
r.height = r2.top - r.top
collisions_update(_get_rects_above_bottom_edge_exclusive(r, edges_t))
else: #r.right > r2.right
#####debug('p064|p065')
# 64: ---LRBT-
# Separate r into on-top & on-bottom parts.
# Lose the middle. Extend r2 and return it
# to the queue. Enqueue the on-bottom parts.
# Remove r_id numbers from the collisions
# set if their rects no longer either
# collide with r1 or are beside r1.
# ..... .....
# ..||. ..||.
# .-+|. -> .---.
# ..||. ..==.
# ..... .....
# 65: -----BT-a:
# ..... .....
# ...|. ...|.
# .--|. -> .---.
# ...|. ...=.
# ..... .....
r3 = Rect(r.left, r2.bottom, r.width, r.bottom - r2.bottom)
append_to_queue(r3)
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
r2.width = r.right - r2.left
append_to_queue(r2)
r.height = r2.top - r.top
collisions_update(_get_rects_above_bottom_edge_exclusive(r, edges_t))
else: #r.top > r2.top
if r.bottom == r2.bottom:
if r.left == r2.left:
if r.right <= r2.right:
#####debug('p067|p068')
# 67: RBTLRB-L: r is not outside of r2; forget r
# ..... .....
# .---. .---.
# .+++. -> .---.
# .+++. .---.
# ..... .....
# 68: RBTL-B-L:
# ..... .....
# .---. .---.
# .++-. -> .---.
# .++-. .---.
# ..... .....
break
else: #r.right > r2.right
#####debug('p069')
# 69: -BTLRB-L:
# Crop r2 to the part that is above r.
# Update r2's location in edges_b.
# ..... .....
# .-... .-...
# .-... -> .-...
# .+||. .|||.
# ..... .....
_remove_r_from_edges_b(r2, r2_id, edges_b)
r2.height -= r.height
insort(edges_b, (r2.bottom, r2_id))
#r.top > r2.top
#r.bottom == r2.bottom:
elif r.left < r2.left:
if r.right >= r2.right:
#####debug('p070|p073')
# 70: RBT-RB-L:
# Crop r2 to the part that is above r.
# Update r2's location in edges_b.
# ..... .....
# ...-. ...-.
# ...-. -> ...-.
# .||+. .|||.
# ..... .....
# 73: -BT-RB-L:
# ..... .....
# ..-.. ..-..
# ..-.. -> ..-..
# .|+|. .|||.
# ..... .....
_remove_r_from_edges_b(r2, r2_id, edges_b)
r2.height -= r.height
insort(edges_b, (r2.bottom, r2_id))
else: #r.right < r2.right:
#####debug('p071|p072')
# 71: RBT--B-L:
# Crop r2 to the part that is above r.
# Update r2's location in edges_b.
# Expand r to reach r2's right edge.
# ..... .....
# ..--. ..--.
# .|+-. -> .|||.
# .|+-. .|||.
# ..... .....
# 72: -BT--B--a:
# ..... .....
# ..--. ..--.
# .|--. -> .|||.
# .|--. .|||.
# ..... .....
_remove_r_from_edges_b(r2, r2_id, edges_b)
r2.height -= r.height
insort(edges_b, (r2.bottom, r2_id))
r.width = r2.right - r.left
#r.top > r2.top
#r.bottom == r2.bottom:
else: #r.left > r2.left
if r.right <= r2.right:
#####debug('p074|p075')
# 74: RBTLRB--: r is not outside of r2; forget r
# ..... .....
# .---. .---.
# .-++. -> .---.
# .-++. .---.
# ..... .....
# 75: RBTL-B--:
# ..... .....
# .---. .---.
# .-+-. -> .---.
# .-+-. .---.
# ..... .....
break
else: #r.right > r2.right
#####debug('p076|p077')
# 76: -BTLRB--:
# Crop r2 to the part that is above r.
# Update r2's location in edges_b.
# Expand r to reach r2's left edge.
# ..... .....
# .--.. .--..
# .-+|. -> .|||.
# .-+|. .|||.
# ..... .....
# 77: -BT--B--b:
# ..... .....
# .--.. .--..
# .--|. -> .|||.
# .--|. .|||.
# ..... .....
_remove_r_from_edges_b(r2, r2_id, edges_b)
r2.height -= r.height
insort(edges_b, (r2.bottom, r2_id))
r.width = r.right - r2.left
r.left = r2.left
#r.top > r2.top
elif r.bottom < r2.bottom:
if r.left == r2.left:
if r.right <= r2.right:
#####debug('p078|p079')
# 78: RBTLR--L: r is not outside of r2; forget r
# ..... .....
# .---. .---.
# .+++. -> .---.
# .---. .---.
# ..... .....
# 79: RBTL---L:
# ..... .....
# .---. .---.
# .+--. -> .---.
# .---. .---.
# ..... .....
break
else: #r.right > r2.right
#####debug('p080')
# 80: -BTLR--L:
# Separate r2 into on-top & on-bottom parts.
# Lose the middle. Put r3 in edges lists.
# Update r2's location in edges_b.
# ..... .....
# .-... .-...
# .+||. -> .|||.
# .-... .=...
# ..... .....
r3 = Rect(r.left, r.bottom, r2.width, r2.bottom - r.bottom)
_remove_r_from_edges_b(r2, r2_id, edges_b)
_add_r(r3, id(r3), r_dict, edges_l, edges_r, edges_t, edges_b)
r2.height = r.top - r2.top
insort(edges_b, (r2.bottom, r2_id))
#r.top > r2.top
#r.bottom < r2.bottom:
elif r.left < r2.left:
if r.right >= r2.right:
#####debug('p081|p084')
# 81: RBT-R--L:
# Separate r2 into on-top & on-bottom parts.
# Lose the middle. Put r3 in edges lists.
# Update r2's location in edges_b.
# ..... .....
# ...-. ...-.
# .||+. -> .|||.
# ...-. ...=.
# ..... .....
# 84: -BT-R--L:
# ..... .....
# ..-.. ..-..
# .|+|. -> .|||.
# ..-.. ..=..
# ..... .....
r3 = Rect(r2.left, r.bottom, r2.width, r2.bottom - r.bottom)
_remove_r_from_edges_b(r2, r2_id, edges_b)
_add_r(r3, id(r3), r_dict, edges_l, edges_r, edges_t, edges_b)
r2.height = r.top - r2.top
insort(edges_b, (r2.bottom, r2_id))
else: #r.right < r2.right:
#####debug('p082|p083')
# 82: RBT----L:
# Separate r2 into on-top & on-bottom parts.
# Lose the middle. Put r3 in edges lists.
# Update r2's location in edges_b. Extend r
# to cover what was the middle of r2.
# ..... .....
# ..--. ..--.
# .|+-. -> .|||.
# ..--. ..==.
# ..... .....
# 83: -BT-----a:
# ..... .....
# ..--. ..--.
# .|--. -> .|||.
# ..--. ..==.
# ..... .....
r3 = Rect(r2.left, r.bottom, r2.width, r2.bottom - r.bottom)
_remove_r_from_edges_b(r2, r2_id, edges_b)
_add_r(r3, id(r3), r_dict, edges_l, edges_r, edges_t, edges_b)
r2.height = r.top - r2.top
insort(edges_b, (r2.bottom, r2_id))
r.width = r2.right - r.left
#r.top > r2.top
#r.bottom < r2.bottom:
else: #r.left > r2.left
if r.right <= r2.right:
#####debug('p085|p086')
# 85: RBTLR---: r is not outside of r2; forget r
# ..... .....
# .---. .---.
# .-++. -> .---.
# .---. .---.
# ..... .....
# 86: RBTL----:
# ..... .....
# .---. .---.
# .-+-. -> .---.
# .---. .---.
# ..... .....
break
else: #r.right > r2.right
#####debug('p087|p088')
# 87: -BTLR---:
# Separate r2 into on-top & on-bottom parts.
# Lose the middle. Put r3 in edges lists.
# Update r2's location in edges_b. Extend r
# to cover what was the middle of r2.
# ..... .....
# .--.. .--..
# .-+|. -> .|||.
# .--.. .==..
# ..... .....
# 88: -BT-----b:
# ..... .....
# .--. .--..
# .--| -> .|||.
# .--. .==..
# ..... .....
r3 = Rect(r2.left, r.bottom, r2.width, r2.bottom - r.bottom)
_remove_r_from_edges_b(r2, r2_id, edges_b)
_add_r(r3, id(r3), r_dict, edges_l, edges_r, edges_t, edges_b)
r2.height = r.top - r2.top
insort(edges_b, (r2.bottom, r2_id))
r.width = r.right - r2.left
r.left = r2.left
#r.top > r2.top
else: #r.bottom > r2.bottom
if r.left == r2.left:
if r.right == r2.right:
#####debug('p089')
# 89: R-TLRB-L: expand r to encompass r2; delete r2
# ..... .....
# .---. .|||.
# .+++. -> .|||.
# .|||. .|||.
# ..... .....
r.union_ip(r2)
if r == extent_of_all_r:
#####debug('shortcut 2b\n')
return [r]
_del_r(r2, r2_id, r_dict, edges_l, edges_r, edges_t, edges_b)
elif r.right < r2.right:
#####debug('p090')
# 90: R-TL-B-L: crop r to the part that is below r2
# ..... .....
# .---. .---.
# .++-. -> .---.
# .||.. .||..
# ..... .....
r.height = r.bottom - r2.bottom
r.top = r2.bottom
else: #r.right > r2.right
#####debug('p091')
# 91: --TLRB-L:
# Crop r2 to the part that is above r.
# Update r2's location in edges_b.
# ..... .....
# .--.. .--..
# .++|. -> .|||.
# .|||. .|||.
# ..... .....
_remove_r_from_edges_b(r2, r2_id, edges_b)
r2.height = r.top - r2.top
insort(edges_b, (r2.bottom, r2_id))
#r.top > r2.top
#r.bottom > r2.bottom
elif r.left < r2.left:
if r.right == r2.right:
#####debug('p092')
# 92: R-T-RB-L:
# Crop r2 to the part that is below r.
# Update r2's location in edges_t.
# ..... .....
# ..--. ..--.
# .|++. -> .|||.
# .|||. .|||.
# ..... .....
_remove_r_from_edges_b(r2, r2_id, edges_b)
r2.height = r.top - r2.top
insort(edges_b, (r2.bottom, r2_id))
elif r.right < r2.right:
#####debug('p093|p094')
# 93: R-T--B-L:
# Create r3 to cover the area of r that is in
# line with r2 and the area of r2 that is in
# line with r. Enqueue r3. Crop r to the part
# that is above r3. Crop r2 to the part that
# is below r3. Remove r_id numbers from the
# collisions set if their rects no longer either
# collide with r or are beside r. Update r2's
# position in edges_b.
# ..... .....
# ..--. ..--.
# .|+-. -> .===.
# .||.. .||..
# ..... .....
# 94: --T--B--b:
# ..... .....
# ...-. ...-.
# .||-. -> .===.
# .||.. .||..
# ..... .....
r3 = Rect(r.left, r.top, r2.right - r.left, r2.bottom - r.top)
append_to_queue(r3)
r.height -= r3.height
r.top = r3.bottom
_remove_r_from_edges_b(r2, r2_id, edges_b)
r2.height -= r3.height
collisions_update(_get_rects_below_top_edge_exclusive(r, edges_b))
insort(edges_b, (r2.bottom, r2_id))
else: #r.right > r2.right
#####debug('p095')
# 95: --T-RB-L:
# Crop r2 to the part that is above r.
# Update r2's location in edges_b.
# ..... .....
# ..-.. ..-..
# .|+|. -> .|||.
# .|||. .|||.
# ..... .....
_remove_r_from_edges_b(r2, r2_id, edges_b)
r2.height = r.top - r2.top
insort(edges_b, (r2.bottom, r2_id))
#r.top > r2.top
#r.bottom > r2.bottom
else: #r.left > r2.left
if r.right <= r2.right:
#####debug('p096|p097')
# 96: R-TLRB--: crop r to the part that is below r2
# ..... .....
# .---. .---.
# .-++. -> .---.
# . ||. ..||.
# ..... .....
# 97: R-TL-B--: crop r to the part that is below r2
# ..... .....
# .---. .---.
# .-+-. -> .---.
# . |.. ..|..
# ..... .....
r.height = r.bottom - r2.bottom
r.top = r2.bottom
else: #r.right > r2.right
#####debug('p098|p099')
# 99: R-T--B-L:
# Create r3 to cover the area of r that is in
# line with r2 and the area of r2 that is in
# line with r. Enqueue r3. Crop r to the part
# that is above r3. Crop r2 to the part that
# is below r3. Remove r_id numbers from the
# collisions set if their rects no longer either
# collide with r or are beside r. Update r2's
# position in edges_b.
# ..... .....
# .--.. .--.
# .-+|. -> .===.
# ..||. ..||.
# ..... .....
# 98: --T--B--b:
# ..... .....
# .-... .-...
# .-||. -> .===.
# ..||. ..||.
# ..... .....
r3 = Rect(r2.left, r.top, r.right - r2.left, r2.bottom - r.top)
append_to_queue(r3)
r.height -= r3.height
r.top = r3.bottom
_remove_r_from_edges_b(r2, r2_id, edges_b)
r2.height -= r3.height
collisions_update(_get_rects_below_top_edge_exclusive(r, edges_b))
insort(edges_b, (r2.bottom, r2_id))
# No r2 rectangles either collided with r or were beside r, so
# add r to r_dict and the edge position lists.
else:
# If r is as wide as extent_of_all_r, then try to expand r's
# height to the height of extent_of_all_r. If r is both as
# wide and as tall as extent_of_all_r, then no further
# optimization is possible; return r.
if r.width == extent_of_all_r.width:
# If there is a rect as wide as extent_of_all_r that is
# directly above r, then r2 will refer to it.
r2, r2_id = _get_full_width_r_above(r, extent_of_all_r, r_dict, edges_b)
# If there is a rect as wide as extent_of_all_r that is
# directly below r, then r3 will refer to it.
r3, r3_id = _get_full_width_r_below(r, extent_of_all_r, r_dict, edges_t)
if r2 and r3:
# Expand r2 to encompass r and r3; delete r3; forget r
# ----------
# ||||||||||
# ==========
#####debug('shortcut 3')
_remove_r_from_edges_b(r2, r2_id, edges_b)
r2.union_ip(r3)
if r2 == extent_of_all_r:
#####debug('shortcut 3a\n')
return [r2]
insort(edges_b, (r2.bottom, r2_id))
_del_r(r3, r3_id, r_dict, edges_l, edges_r, edges_t, edges_b)
continue
elif r2:
# Expand r2 to encompass r; forget r
# ----------
# ||||||||||
#####debug('shortcut 4')
_remove_r_from_edges_b(r2, r2_id, edges_b)
r2.union_ip(r)
if r2 == extent_of_all_r:
#####debug('shortcut 4a\n')
return [r2]
insort(edges_b, (r2.bottom, r2_id))
continue
elif r3:
# Expand r3 to encompass r; forget r
# ||||||||||
# ==========
#####debug('shortcut 5')
_remove_r_from_edges_t(r3, r3_id, edges_t)
r3.union_ip(r)
if r3 == extent_of_all_r:
#####debug('shortcut 5a\n')
return [r3]
insort(edges_t, (r3.top, r3_id))
continue
# Add r to r_dict and edges lists
_add_r(r, r_id, r_dict, edges_l, edges_r, edges_t, edges_b)
# Done optimizing!
if PYTHON_VERSION >= 3:
return_list = list(r_dict.values())
else:
return_list = r_dict.values()
#####debug('OUTPUT: ' + str(return_list) + '\n')
return return_list
def _get_rects_left_of_right_edge_inclusive(r, edges_l):
"""helper for optimize_dirty_rects func"""
index_l = bisect_left(edges_l, (r.right, Inf))
return (t[1] for t in edges_l[:index_l])
def _get_rects_right_of_left_edge_inclusive(r, edges_r):
"""helper for optimize_dirty_rects func"""
index_r = bisect_left(edges_r, (r.left, negInf))
return (t[1] for t in edges_r[index_r:])
def _get_rects_above_bottom_edge_exclusive(r, edges_t):
"""helper for optimize_dirty_rects func"""
index_t = bisect_left(edges_t, (r.bottom, negInf))
return (t[1] for t in edges_t[:index_t])
def _get_rects_below_top_edge_exclusive(r, edges_b):
"""helper for optimize_dirty_rects func"""
index_b = bisect_left(edges_b, (r.top, Inf))
return (t[1] for t in edges_b[index_b:])
def _get_full_width_r_above(r, extent_of_all_r, r_dict, edges_b):
"""helper for optimize_dirty_rects func"""
if r.top != extent_of_all_r.top:
index_b = bisect_left(edges_b, (r.top, negInf))
if index_b < len(edges_b):
r2_id = edges_b[index_b][1]
r2 = r_dict[r2_id]
if r2.bottom == r.top and r2.width == extent_of_all_r.width:
return r2, r2_id
return None, None
def _get_full_width_r_below(r, extent_of_all_r, r_dict, edges_t):
"""helper for optimize_dirty_rects func"""
if r.bottom != extent_of_all_r.bottom:
index_t = bisect_left(edges_t, (r.bottom, negInf))
if index_t < len(edges_t):
r2_id = edges_t[index_t][1]
r2 = r_dict[r2_id]
if r2.top == r.bottom and r2.width == extent_of_all_r.width:
return r2, r2_id
return None, None
def _add_r(r, r_id, r_dict, edges_l, edges_r, edges_t, edges_b):
"""helper for optimize_dirty_rects func"""
r_dict[r_id] = r
insort(edges_l, (r.left, r_id))
insort(edges_r, (r.right, r_id))
insort(edges_t, (r.top, r_id))
insort(edges_b, (r.bottom, r_id))
def _del_r(r, r_id, r_dict, edges_l, edges_r, edges_t, edges_b):
"""helper for optimize_dirty_rects func"""
del r_dict[r_id]
_remove_r_from_edges_l(r, r_id, edges_l)
_remove_r_from_edges_r(r, r_id, edges_r)
_remove_r_from_edges_t(r, r_id, edges_t)
_remove_r_from_edges_b(r, r_id, edges_b)
def _remove_r_from_edges_l(r, r_id, edges_l):
"""helper for optimize_dirty_rects func"""
index_l = bisect_left(edges_l, (r.left, r_id))
assert edges_l[index_l] == (r.left, r_id), 'algorithm err: left del'
del edges_l[index_l]
def _remove_r_from_edges_r(r, r_id, edges_r):
"""helper for optimize_dirty_rects func"""
index_r = bisect_left(edges_r, (r.right, r_id))
assert edges_r[index_r] == (r.right, r_id), 'algorithm err: right del'
del edges_r[index_r]
def _remove_r_from_edges_t(r, r_id, edges_t):
"""helper for optimize_dirty_rects func"""
index_t = bisect_left(edges_t, (r.top, r_id))
assert edges_t[index_t] == (r.top, r_id), 'algorithm err: top del'
del edges_t[index_t]
def _remove_r_from_edges_b(r, r_id, edges_b):
"""helper for optimize_dirty_rects func"""
index_b = bisect_left(edges_b, (r.bottom, r_id))
assert edges_b[index_b] == (r.bottom, r_id), 'algorithm err: bottom del'
del edges_b[index_b]
| 46.549213
| 98
| 0.283926
| 5,607
| 70,941
| 3.416087
| 0.098448
| 0.032891
| 0.026731
| 0.039365
| 0.752637
| 0.735147
| 0.717605
| 0.70095
| 0.664143
| 0.63616
| 0
| 0.038994
| 0.575239
| 70,941
| 1,523
| 99
| 46.579777
| 0.596655
| 0.30641
| 0
| 0.738532
| 0
| 0
| 0.002265
| 0
| 0
| 0
| 0
| 0
| 0.009174
| 1
| 0.029817
| false
| 0
| 0.011468
| 0
| 0.084862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71146a4e40552fabe6a61fb51a32d6af6e8e6fa4
| 4,075
|
gyp
|
Python
|
sample/ajax/node_modules/_xd-synchttp@0.4.1@xd-synchttp/src/binding.gyp
|
WoJoinGitHub/t4node
|
957c7b5ea2cd8328c3c86f3f9cd2643c2ab33563
|
[
"Apache-2.0"
] | 4
|
2019-01-03T08:08:09.000Z
|
2019-04-23T05:56:49.000Z
|
sample/ajax/node_modules/_xd-synchttp@0.4.1@xd-synchttp/src/binding.gyp
|
WoJoinGitHub/t4node
|
957c7b5ea2cd8328c3c86f3f9cd2643c2ab33563
|
[
"Apache-2.0"
] | null | null | null |
sample/ajax/node_modules/_xd-synchttp@0.4.1@xd-synchttp/src/binding.gyp
|
WoJoinGitHub/t4node
|
957c7b5ea2cd8328c3c86f3f9cd2643c2ab33563
|
[
"Apache-2.0"
] | null | null | null |
{
"targets": [
{
"target_name":"binding",
"sources":["png.c","jpeg.c","gif.c","imagesize.c","binding.c"],
'dependencies':['./binding.gyp:libhttp','./binding.gyp:libpng','./binding.gyp:libjpeg'],
},
{
"target_name":"libhttp",
"type":'static_library',
"sources":["http.c"]
},
{
"target_name": "libjpeg",
"type": "static_library",
'include_dirs': [
'./third-party/jpeg/'
],
'direct_dependent_settings': {
'include_dirs': [
'./third-party/jpeg/'
]
},
"sources": ["./third-party/jpeg/jaricom.c",
"./third-party/jpeg/jcapimin.c",
"./third-party/jpeg/jcapistd.c",
"./third-party/jpeg/jcarith.c",
"./third-party/jpeg/jccoefct.c",
"./third-party/jpeg/jccolor.c",
"./third-party/jpeg/jcdctmgr.c",
"./third-party/jpeg/jchuff.c",
"./third-party/jpeg/jcinit.c",
"./third-party/jpeg/jcmainct.c",
"./third-party/jpeg/jcmarker.c",
"./third-party/jpeg/jcmaster.c",
"./third-party/jpeg/jcomapi.c",
"./third-party/jpeg/jcparam.c",
"./third-party/jpeg/jcprepct.c",
"./third-party/jpeg/jcsample.c",
"./third-party/jpeg/jctrans.c",
"./third-party/jpeg/jdapimin.c",
"./third-party/jpeg/jdapistd.c",
"./third-party/jpeg/jdarith.c",
"./third-party/jpeg/jdatadst.c",
"./third-party/jpeg/jdatasrc.c",
"./third-party/jpeg/jdcoefct.c",
"./third-party/jpeg/jdcolor.c",
"./third-party/jpeg/jddctmgr.c",
"./third-party/jpeg/jdhuff.c",
"./third-party/jpeg/jdinput.c",
"./third-party/jpeg/jdmainct.c",
"./third-party/jpeg/jdmarker.c",
"./third-party/jpeg/jdmaster.c",
"./third-party/jpeg/jdmerge.c",
"./third-party/jpeg/jdpostct.c",
"./third-party/jpeg/jdsample.c",
"./third-party/jpeg/jdtrans.c",
"./third-party/jpeg/jerror.c",
"./third-party/jpeg/jfdctflt.c",
"./third-party/jpeg/jfdctfst.c",
"./third-party/jpeg/jfdctint.c",
"./third-party/jpeg/jidctflt.c",
"./third-party/jpeg/jidctfst.c",
"./third-party/jpeg/jidctint.c",
"./third-party/jpeg/jquant1.c",
"./third-party/jpeg/jquant2.c",
"./third-party/jpeg/jutils.c",
"./third-party/jpeg/jmemmgr.c",
"./third-party/jpeg/jmemnobs.c"
]
},
{
'target_name': 'libpng',
'type': 'static_library',
'include_dirs': [
'./third-party/libpng',
],
'direct_dependent_settings': {
'include_dirs': ['./third-party/libpng'],
},
'dependencies': ['./binding.gyp:zlib'],
'libraries': ['-lm'],
'sources': [
"./third-party/libpng/pngerror.c",
"./third-party/libpng/pngget.c",
"./third-party/libpng/pngmem.c",
"./third-party/libpng/pngpread.c",
"./third-party/libpng/pngread.c",
"./third-party/libpng/pngrio.c",
"./third-party/libpng/pngrtran.c",
"./third-party/libpng/pngrutil.c",
"./third-party/libpng/pngset.c",
"./third-party/libpng/pngtrans.c",
"./third-party/libpng/pngwio.c",
"./third-party/libpng/pngwrite.c",
"./third-party/libpng/pngwtran.c",
"./third-party/libpng/pngwutil.c",
"./third-party/libpng/png.c"
]
},
{
'target_name': 'zlib',
'type': 'static_library',
'include_dirs': [
'./third-party/zlib/'
],
'direct_dependent_settings': {
'include_dirs': [
'./third-party/zlib/'
]
},
'sources': ["./third-party/zlib/adler32.c",
"./third-party/zlib/crc32.c",
"./third-party/zlib/deflate.c",
"./third-party/zlib/infback.c",
"./third-party/zlib/inffast.c",
"./third-party/zlib/inflate.c",
"./third-party/zlib/inftrees.c",
"./third-party/zlib/trees.c",
"./third-party/zlib/zutil.c",
"./third-party/zlib/compress.c",
"./third-party/zlib/uncompr.c",
"./third-party/zlib/gzclose.c",
"./third-party/zlib/gzlib.c",
"./third-party/zlib/gzread.c",
"./third-party/zlib/gzwrite.c"
]
},
],
'conditions': [
['OS=="linux"', {
}]
]
}
| 29.107143
| 90
| 0.565153
| 483
| 4,075
| 4.724638
| 0.221532
| 0.359334
| 0.351884
| 0.295793
| 0.12007
| 0.1078
| 0.1078
| 0
| 0
| 0
| 0
| 0.001838
| 0.198773
| 4,075
| 139
| 91
| 29.316547
| 0.69709
| 0
| 0
| 0.134328
| 0
| 0
| 0.715302
| 0.581851
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8548330d203064b04fc68a41db859c80564fd24e
| 15,868
|
py
|
Python
|
Sentiment_Adjectives/lm_finetune/bert_ima_finetune.py
|
tilmanbeck/CausaLM
|
0533a5dfa6107f996ddf84d736d0169d3cbdffc4
|
[
"MIT"
] | 35
|
2020-06-14T13:15:37.000Z
|
2022-03-28T10:41:39.000Z
|
Sentiment_Adjectives/lm_finetune/bert_ima_finetune.py
|
tilmanbeck/CausaLM
|
0533a5dfa6107f996ddf84d736d0169d3cbdffc4
|
[
"MIT"
] | 1
|
2022-01-05T02:51:25.000Z
|
2022-01-05T02:51:25.000Z
|
Sentiment_Adjectives/lm_finetune/bert_ima_finetune.py
|
tilmanbeck/CausaLM
|
0533a5dfa6107f996ddf84d736d0169d3cbdffc4
|
[
"MIT"
] | 10
|
2020-07-20T08:05:10.000Z
|
2022-02-09T01:26:00.000Z
|
from transformers.modeling_bert import BertLMPredictionHead, BertPreTrainedModel, BertModel
from BERT.lm_finetune.grad_reverse_layer import GradReverseLayerFunction
from BERT.bert_text_dataset import BertTextDataset
from BERT.bert_pos_tagger import BertTokenClassificationDataset
from torch.nn import CrossEntropyLoss
import torch.nn as nn
import torch
class BertIMAPredictionHead(nn.Module):
def __init__(self, config):
super(BertIMAPredictionHead, self).__init__()
# self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, 2)
# p = float(i + epoch * len_dataloader) / n_epoch / len_dataloader
# self.alpha = 2. / (1. + np.exp(-10 * p)) - 1
self.alpha = 1.
def forward(self, hidden_states):
# hidden_states = self.transform(hidden_states)
reversed_hidden_states = GradReverseLayerFunction.apply(hidden_states, self.alpha)
output = self.decoder(reversed_hidden_states)
return output
class BertIMAPreTrainingHeads(nn.Module):
def __init__(self, config):
super(BertIMAPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config)
self.adj_predictions = BertIMAPredictionHead(config)
def forward(self, sequence_output, pooled_output):
lm_prediction_scores = self.predictions(sequence_output)
adj_prediction_scores = self.adj_predictions(sequence_output)
return lm_prediction_scores, adj_prediction_scores
class BertForIMAPreTraining(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**masked_adj_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked adjective prediction (classification) loss.
Indices should be in ``[0, 1]``.
``0`` indicates masked word is not adjective,
``1`` indicates masked word is adjective.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**adj_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the masked adjective predictions (classification) head (scores of True/False before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForIMAPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertIMAPreTrainingHeads(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None,
head_mask=None, masked_lm_labels=None, masked_adj_labels=None, pos_tagging_labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output, pooled_output = outputs[:2]
lm_prediction_scores, adj_prediction_scores = self.cls(sequence_output, pooled_output)
outputs = (lm_prediction_scores, adj_prediction_scores,) + outputs[2:] # add hidden states and attention if they are here
if masked_lm_labels is not None and masked_adj_labels is not None:
loss_f = CrossEntropyLoss(ignore_index=BertTextDataset.MLM_IGNORE_LABEL_IDX)
masked_lm_loss = loss_f(lm_prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
masked_adj_loss = loss_f(adj_prediction_scores.view(-1, 2), masked_adj_labels.view(-1))
total_loss = masked_lm_loss + masked_adj_loss
loss_f_per_sample = CrossEntropyLoss(ignore_index=BertTextDataset.MLM_IGNORE_LABEL_IDX, reduction='none')
mlm_loss_per_sample = self.calc_loss_per_sample(loss_f_per_sample, lm_prediction_scores, masked_lm_labels, self.config.vocab_size)
ima_loss_per_sample = self.calc_loss_per_sample(loss_f_per_sample, adj_prediction_scores, masked_adj_labels, 2)
outputs = (mlm_loss_per_sample, ima_loss_per_sample,) + outputs
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@staticmethod
def calc_loss_per_sample(loss_f, scores, masked_labels, label_size, ignore_index=BertTextDataset.MLM_IGNORE_LABEL_IDX):
return torch.stack([loss_f(scores.view(-1, label_size), masked_labels.view(-1))
.view_as(masked_labels)[i, :].masked_select(masked_labels[i, :] > ignore_index).mean()
for i in range(masked_labels.size(0))])
class BertTokenClassificationHead(nn.Module):
def __init__(self, config):
super(BertTokenClassificationHead, self).__init__()
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, hidden_states):
output = self.classifier(hidden_states)
return output
class BertIMAwControlPreTrainingHeads(nn.Module):
def __init__(self, config):
super(BertIMAwControlPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(config)
self.adj_predictions = BertIMAPredictionHead(config)
self.pos_tagging = BertTokenClassificationHead(config)
def forward(self, sequence_output, pooled_output):
lm_prediction_scores = self.predictions(sequence_output)
adj_prediction_scores = self.adj_predictions(sequence_output)
pos_tagging_scores = self.pos_tagging(sequence_output)
return lm_prediction_scores, adj_prediction_scores, pos_tagging_scores
class BertForIMAwControlPreTraining(BertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**masked_adj_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked adjective prediction (classification) loss.
Indices should be in ``[0, 1]``.
``0`` indicates masked word is not adjective,
``1`` indicates masked word is adjective.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**lm_prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**adj_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the masked adjective predictions (classification) head (scores of True/False before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForIMAwControlPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertIMAwControlPreTrainingHeads(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None,
head_mask=None, masked_lm_labels=None, masked_adj_labels=None, pos_tagging_labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output, pooled_output = outputs[:2]
lm_prediction_scores, adj_prediction_scores, pos_tagging_scores = self.cls(sequence_output, pooled_output)
outputs = (lm_prediction_scores, adj_prediction_scores, pos_tagging_scores,) + outputs[2:] # add hidden states and attention if they are here
total_loss = 0.0
if pos_tagging_labels is not None:
loss_f = CrossEntropyLoss(ignore_index=BertTokenClassificationDataset.POS_IGNORE_LABEL_IDX)
loss_f_per_sample = CrossEntropyLoss(ignore_index=BertTokenClassificationDataset.POS_IGNORE_LABEL_IDX, reduction='none')
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = pos_tagging_scores.view(-1, self.config.num_labels)
active_labels = torch.where(
active_loss, pos_tagging_labels.view(-1), torch.tensor(loss_f.ignore_index).type_as(pos_tagging_labels)
)
pos_tagging_loss = loss_f(active_logits, active_labels)
# pos_tagging_loss_per_sample = BertForIMAPreTraining.calc_loss_per_sample(loss_f_per_sample,
# active_logits,
# active_labels,
# self.config.num_labels)
else:
pos_tagging_loss = loss_f(pos_tagging_scores.view(-1, self.config.num_labels), pos_tagging_labels.view(-1))
pos_tagging_loss_per_sample = BertForIMAPreTraining.calc_loss_per_sample(loss_f_per_sample,
pos_tagging_scores,
pos_tagging_labels,
self.config.num_labels,
BertTokenClassificationDataset.POS_IGNORE_LABEL_IDX)
total_loss += pos_tagging_loss
outputs = (pos_tagging_loss_per_sample,) + outputs
if masked_lm_labels is not None and masked_adj_labels is not None:
loss_f = CrossEntropyLoss(ignore_index=BertTextDataset.MLM_IGNORE_LABEL_IDX)
masked_lm_loss = loss_f(lm_prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
masked_adj_loss = loss_f(adj_prediction_scores.view(-1, 2), masked_adj_labels.view(-1))
total_loss += masked_lm_loss + masked_adj_loss
loss_f_per_sample = CrossEntropyLoss(ignore_index=BertTextDataset.MLM_IGNORE_LABEL_IDX, reduction='none')
mlm_loss_per_sample = BertForIMAPreTraining.calc_loss_per_sample(loss_f_per_sample,
lm_prediction_scores,
masked_lm_labels,
self.config.vocab_size)
ima_loss_per_sample = BertForIMAPreTraining.calc_loss_per_sample(loss_f_per_sample,
adj_prediction_scores,
masked_adj_labels,
2)
outputs = (mlm_loss_per_sample, ima_loss_per_sample,) + outputs
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
| 60.564885
| 153
| 0.650681
| 1,794
| 15,868
| 5.459866
| 0.122074
| 0.055539
| 0.02389
| 0.019602
| 0.838795
| 0.800919
| 0.798673
| 0.780602
| 0.768249
| 0.732415
| 0
| 0.005148
| 0.265503
| 15,868
| 261
| 154
| 60.796935
| 0.835264
| 0.40087
| 0
| 0.463768
| 0
| 0
| 0.001308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.050725
| 0.007246
| 0.253623
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
855dabac66e871e20f403b6761df07a3be15c02a
| 71
|
py
|
Python
|
mstc/learning/__init__.py
|
PhosphorylatedRabbits/mass_spec_trans_coding
|
820103c6026b69486e0c89e487d4e90af84e87dc
|
[
"MIT"
] | 1
|
2021-09-06T08:35:14.000Z
|
2021-09-06T08:35:14.000Z
|
mstc/learning/__init__.py
|
PhosphorylatedRabbits/mass_spec_trans_coding
|
820103c6026b69486e0c89e487d4e90af84e87dc
|
[
"MIT"
] | null | null | null |
mstc/learning/__init__.py
|
PhosphorylatedRabbits/mass_spec_trans_coding
|
820103c6026b69486e0c89e487d4e90af84e87dc
|
[
"MIT"
] | null | null | null |
from .pipeline import generate_cross_validation_pipeline # noqa: F401
| 35.5
| 70
| 0.84507
| 9
| 71
| 6.333333
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 0.112676
| 71
| 1
| 71
| 71
| 0.857143
| 0.140845
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a4517666c3848750ddc429b8dce94a83167f9c57
| 132
|
py
|
Python
|
crawler/test_code/test_get_ip_address.py
|
Coslate/NBA_Win_Predictor
|
c8f4fb5a12fdd36bd43e573510bfb2307f37ec1f
|
[
"MIT"
] | null | null | null |
crawler/test_code/test_get_ip_address.py
|
Coslate/NBA_Win_Predictor
|
c8f4fb5a12fdd36bd43e573510bfb2307f37ec1f
|
[
"MIT"
] | null | null | null |
crawler/test_code/test_get_ip_address.py
|
Coslate/NBA_Win_Predictor
|
c8f4fb5a12fdd36bd43e573510bfb2307f37ec1f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/env python3.6
import package_tool_surf.tool_surf as tool_surf
print(f'ip_address = {tool_surf.GetPublicIPAddress()}')
| 22
| 55
| 0.780303
| 21
| 132
| 4.619048
| 0.714286
| 0.329897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016667
| 0.090909
| 132
| 5
| 56
| 26.4
| 0.791667
| 0.181818
| 0
| 0
| 0
| 0
| 0.420561
| 0.299065
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
a471f15b413ec164b3d71a60d3c51d10c2d63a42
| 2,548
|
py
|
Python
|
epytope/Data/pssms/smmpmbec/mat/A_30_01_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 7
|
2021-02-01T18:11:28.000Z
|
2022-01-31T19:14:07.000Z
|
epytope/Data/pssms/smmpmbec/mat/A_30_01_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 22
|
2021-01-02T15:25:23.000Z
|
2022-03-14T11:32:53.000Z
|
epytope/Data/pssms/smmpmbec/mat/A_30_01_10.py
|
christopher-mohr/epytope
|
8ac9fe52c0b263bdb03235a5a6dffcb72012a4fd
|
[
"BSD-3-Clause"
] | 4
|
2021-05-28T08:50:38.000Z
|
2022-03-14T11:45:32.000Z
|
A_30_01_10 = {0: {'A': -0.226, 'C': 0.176, 'E': -0.07, 'D': 0.236, 'G': -0.022, 'F': 0.205, 'I': -0.423, 'H': -0.175, 'K': -0.699, 'M': -0.184, 'L': 0.1, 'N': 0.02, 'Q': -0.071, 'P': 0.372, 'S': 0.088, 'R': -0.425, 'T': 0.333, 'W': 0.31, 'V': 0.12, 'Y': 0.334}, 1: {'A': 0.222, 'C': 0.03, 'E': 0.206, 'D': 0.049, 'G': -0.054, 'F': -0.234, 'I': -0.006, 'H': -0.242, 'K': -0.256, 'M': -0.056, 'L': 0.054, 'N': -0.092, 'Q': 0.393, 'P': 0.757, 'S': -0.461, 'R': 0.14, 'T': -0.163, 'W': -0.146, 'V': -0.166, 'Y': 0.024}, 2: {'A': -0.017, 'C': 0.028, 'E': 0.384, 'D': 0.262, 'G': 0.27, 'F': -0.122, 'I': -0.031, 'H': -0.563, 'K': -0.238, 'M': 0.252, 'L': -0.031, 'N': -0.39, 'Q': 0.003, 'P': 0.722, 'S': -0.202, 'R': -0.13, 'T': -0.085, 'W': 0.296, 'V': -0.146, 'Y': -0.262}, 3: {'A': -0.023, 'C': 0.041, 'E': 0.105, 'D': 0.101, 'G': 0.016, 'F': 0.06, 'I': 0.021, 'H': -0.115, 'K': -0.209, 'M': -0.001, 'L': 0.0, 'N': 0.063, 'Q': 0.034, 'P': 0.035, 'S': -0.027, 'R': -0.2, 'T': 0.009, 'W': 0.052, 'V': 0.019, 'Y': 0.018}, 4: {'A': -0.471, 'C': -0.102, 'E': 0.256, 'D': 0.14, 'G': 0.122, 'F': -0.277, 'I': 0.09, 'H': -0.175, 'K': -0.065, 'M': 0.018, 'L': -0.151, 'N': 0.079, 'Q': 0.351, 'P': 0.517, 'S': -0.152, 'R': 0.006, 'T': -0.099, 'W': 0.114, 'V': -0.096, 'Y': -0.105}, 5: {'A': -0.007, 'C': 0.007, 'E': 0.041, 'D': 0.06, 'G': 0.013, 'F': -0.01, 'I': -0.005, 'H': -0.024, 'K': -0.018, 'M': -0.024, 'L': -0.042, 'N': 0.008, 'Q': 0.001, 'P': 0.049, 'S': -0.008, 'R': -0.018, 'T': -0.015, 'W': 0.01, 'V': -0.013, 'Y': -0.006}, 6: {'A': 0.024, 'C': 0.138, 'E': 0.054, 'D': -0.168, 'G': 0.28, 'F': -0.353, 'I': 0.265, 'H': -0.031, 'K': -0.178, 'M': -0.21, 'L': -0.064, 'N': 0.171, 'Q': 0.353, 'P': 0.09, 'S': 0.242, 'R': -0.431, 'T': -0.093, 'W': -0.09, 'V': 0.029, 'Y': -0.028}, 7: {'A': 0.262, 'C': 0.108, 'E': 0.131, 'D': 0.123, 'G': 0.074, 'F': -0.166, 'I': -0.157, 'H': 0.102, 'K': -0.166, 'M': 0.015, 'L': 0.07, 'N': -0.1, 'Q': 0.102, 'P': -0.207, 'S': -0.039, 'R': -0.048, 'T': 0.027, 'W': -0.07, 'V': 0.091, 'Y': -0.151}, 8: {'A': -0.033, 'C': -0.012, 'E': 0.04, 'D': 0.028, 'G': -0.015, 'F': -0.025, 'I': -0.258, 'H': 0.232, 'K': 0.245, 'M': -0.104, 'L': -0.175, 'N': 0.064, 'Q': 0.056, 'P': -0.264, 'S': 0.003, 'R': 0.37, 'T': 0.001, 'W': 0.023, 'V': -0.217, 'Y': 0.04}, 9: {'A': -0.411, 'C': 0.032, 'E': 0.268, 'D': 0.275, 'G': -0.141, 'F': -0.083, 'I': -0.159, 'H': 0.013, 'K': -0.502, 'M': -0.007, 'L': -0.137, 'N': 0.19, 'Q': 0.313, 'P': 0.183, 'S': -0.063, 'R': -0.118, 'T': 0.07, 'W': 0.337, 'V': -0.151, 'Y': 0.09}, -1: {'con': 3.96982}}
| 2,548
| 2,548
| 0.390502
| 618
| 2,548
| 1.605178
| 0.279935
| 0.020161
| 0.010081
| 0.012097
| 0.014113
| 0
| 0
| 0
| 0
| 0
| 0
| 0.36926
| 0.16248
| 2,548
| 1
| 2,548
| 2,548
| 0.095595
| 0
| 0
| 0
| 0
| 0
| 0.079639
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a476fa3f4a2dab1ad63200eb80ffebbaa8bf3b30
| 36
|
py
|
Python
|
rentomatic/domain/__init__.py
|
skhalymon/rentomatic
|
87a59c9a16df6b86dbd3508170b000fba1661e14
|
[
"MIT"
] | null | null | null |
rentomatic/domain/__init__.py
|
skhalymon/rentomatic
|
87a59c9a16df6b86dbd3508170b000fba1661e14
|
[
"MIT"
] | null | null | null |
rentomatic/domain/__init__.py
|
skhalymon/rentomatic
|
87a59c9a16df6b86dbd3508170b000fba1661e14
|
[
"MIT"
] | null | null | null |
from .storageroom import StorageRoom
| 36
| 36
| 0.888889
| 4
| 36
| 8
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 36
| 1
| 36
| 36
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a49aa1819c8a4512630914c19964fce993e4f4c2
| 9,449
|
py
|
Python
|
basset/tests/test_converter.py
|
team-supercharge/basset-ios
|
8336eb2df0d03fe184c7fd14843c8facd471607d
|
[
"MIT"
] | null | null | null |
basset/tests/test_converter.py
|
team-supercharge/basset-ios
|
8336eb2df0d03fe184c7fd14843c8facd471607d
|
[
"MIT"
] | null | null | null |
basset/tests/test_converter.py
|
team-supercharge/basset-ios
|
8336eb2df0d03fe184c7fd14843c8facd471607d
|
[
"MIT"
] | null | null | null |
import shutil
import tempfile
from unittest import TestCase
from wand.image import Image
from basset.exceptions import *
from basset.helpers.converter import Converter
class TestConverter(TestCase):
temp_dir_path = ""
converter_tests_resource_path = "converter"
converter_output_tests_resource_path = "converterOutput"
script_root_dir_path = ""
@classmethod
def setUpClass(cls):
TestConverter.script_root_dir_path = os.getcwd()
@classmethod
def tearDownClass(cls):
os.chdir(TestConverter.script_root_dir_path)
def setUp(self):
self.temp_dir_path = tempfile.mkdtemp()
os.mkdir(os.path.join(self.temp_dir_path, self.converter_output_tests_resource_path))
shutil.copytree(os.path.join(TestConverter.script_root_dir_path, "basset/tests/Resources/tests_converter"),
os.path.join(self.temp_dir_path, self.converter_tests_resource_path))
os.chdir(self.temp_dir_path)
def tearDown(self):
shutil.rmtree(self.temp_dir_path)
def test_set_up(self):
def assert_valid_eps_file(test_file_path):
self.assertTrue(os.path.isfile(test_file_path))
self.assertEqual(Image(filename=test_file_path).size, (100, 100))
self.assertEqual(Image(filename=test_file_path).format, "EPT")
for i in range(1, 3):
assert_valid_eps_file(
os.path.join(self.converter_tests_resource_path, "convert_test", "test-0" + str(i) + ".eps"))
assert_valid_eps_file(
os.path.join(self.converter_tests_resource_path, "convert_test", "subfolder", "test-04.eps"))
assert_valid_eps_file(
os.path.join(self.converter_tests_resource_path, "convert_test", "subfolder", "subsubfolder",
"test-05.eps"))
def assert_valid_png_file(self, test_file_path, size):
self.assertTrue(os.path.isfile(test_file_path))
self.assertEqual(Image(filename=test_file_path).size, size)
self.assertEqual(Image(filename=test_file_path).format, "PNG")
def test_convert(self):
converter = Converter()
converter.input_dir = os.path.join(self.converter_tests_resource_path, "convert_test")
converter.output_dir = self.converter_output_tests_resource_path
converter.convert()
for i in range(1, 3):
test_file_path_1x = os.path.join(self.converter_output_tests_resource_path, "test-0" + str(i) + ".png")
test_file_path_2x = os.path.join(self.converter_output_tests_resource_path, "test-0" + str(i) + "@2x.png")
test_file_path_3x = os.path.join(self.converter_output_tests_resource_path, "test-0" + str(i) + "@3x.png")
self.assert_valid_png_file(test_file_path=test_file_path_1x, size=(100, 100))
self.assert_valid_png_file(test_file_path=test_file_path_2x, size=(200, 200))
self.assert_valid_png_file(test_file_path=test_file_path_3x, size=(300, 300))
self.assert_valid_png_file(os.path.join(self.converter_output_tests_resource_path, "subfolder", "test-04.png"),
(100, 100))
self.assert_valid_png_file(
os.path.join(self.converter_output_tests_resource_path, "subfolder", "test-04@2x.png"),
(200, 200))
self.assert_valid_png_file(
os.path.join(self.converter_output_tests_resource_path, "subfolder", "test-04@3x.png"),
(300, 300))
self.assert_valid_png_file(
os.path.join(self.converter_output_tests_resource_path, "subfolder", "subsubfolder", "test-05.png"),
(100, 100))
self.assert_valid_png_file(
os.path.join(self.converter_output_tests_resource_path, "subfolder", "subsubfolder", "test-05@2x.png"),
(200, 200))
self.assert_valid_png_file(
os.path.join(self.converter_output_tests_resource_path, "subfolder", "subsubfolder", "test-05@3x.png"),
(300, 300))
def test_should_raise_exception_with_assets_dir_not_present(self):
converter = Converter()
os.chdir(os.path.join(self.converter_tests_resource_path, "suggest_asset_diretory_test"))
converter.input_dir = "FaceAssetsDir"
converter.output_dir = self.converter_output_tests_resource_path
try:
converter.convert()
self.fail("This should fail")
except AssetsDirNotFoundException as e:
self.assertEqual(e.asset_dir_candidate, "Vector_assets")
def test_should_raise_exception_with_empty_parameter_if_no_vector_files_found(self):
converter = Converter()
os.chdir(os.path.join(self.converter_tests_resource_path, "suggest_asset_diretory_test", "Images.xcassets"))
converter.input_dir = "FaceAssetsDir"
converter.output_dir = self.converter_output_tests_resource_path
try:
converter.convert()
self.fail("This should fail")
except AssetsDirNotFoundException as e:
self.assertEqual(e.asset_dir_candidate, None)
def test_dont_reconvert_old_files_test(self):
converter = Converter()
os.chdir(os.path.join(self.converter_tests_resource_path, "dont_reconvert_old_files_test"))
converter.input_dir = "Assets"
converter.output_dir = self.converter_output_tests_resource_path
converter.convert()
sha1_of_generated_files = []
sha1_of_generated_files.append(converter.sha1_of_file(os.path.join(converter.output_dir, "test-01.png")))
sha1_of_generated_files.append(converter.sha1_of_file(os.path.join(converter.output_dir, "test-02.png")))
shutil.copy2(os.path.join(converter.input_dir, "test-01.eps"), os.path.join(converter.input_dir, "test-02.eps"))
converter.convert()
sha1_of_generated_files.append(converter.sha1_of_file(os.path.join(converter.output_dir, "test-01.png")))
sha1_of_generated_files.append(converter.sha1_of_file(os.path.join(converter.output_dir, "test-02.png")))
self.assertEqual(sha1_of_generated_files[0], sha1_of_generated_files[2])
self.assertNotEqual(sha1_of_generated_files[1], sha1_of_generated_files[3])
def test_respect_force_flag(self):
converter = Converter()
os.chdir(os.path.join(self.converter_tests_resource_path, "dont_reconvert_old_files_test"))
converter.input_dir = "Assets"
converter.output_dir = self.converter_output_tests_resource_path
converter.force_convert = True
converter.convert()
sha1_of_generated_files = []
sha1_of_generated_files.append(converter.sha1_of_file(os.path.join(converter.output_dir, "test-01.png")))
sha1_of_generated_files.append(converter.sha1_of_file(os.path.join(converter.output_dir, "test-02.png")))
converter.convert()
sha1_of_generated_files.append(converter.sha1_of_file(os.path.join(converter.output_dir, "test-01.png")))
sha1_of_generated_files.append(converter.sha1_of_file(os.path.join(converter.output_dir, "test-02.png")))
self.assertNotEqual(sha1_of_generated_files[0], sha1_of_generated_files[2])
self.assertNotEqual(sha1_of_generated_files[1], sha1_of_generated_files[3])
def test_escape_filenames(self):
converter = Converter()
converter.input_dir = os.path.join(self.converter_tests_resource_path, "convert_test")
converter.output_dir = self.converter_output_tests_resource_path
fancy_filename = "& :()[]{}|"
shutil.rmtree(os.path.join(converter.input_dir, "subfolder", "subsubfolder"))
os.remove(os.path.join(converter.input_dir, "test-01.eps"))
os.remove(os.path.join(converter.input_dir, "test-02.eps"))
os.remove(os.path.join(converter.input_dir, "test-03.eps"))
os.rename(os.path.join(converter.input_dir, "subfolder", "test-04.eps"),
os.path.join(converter.input_dir, "subfolder", fancy_filename + ".eps"))
converter.convert()
self.assert_valid_png_file(
os.path.join(self.converter_output_tests_resource_path, "subfolder", fancy_filename + ".png"),
(100, 100))
self.assert_valid_png_file(
os.path.join(self.converter_output_tests_resource_path, "subfolder", fancy_filename + "@2x.png"),
(200, 200))
self.assert_valid_png_file(
os.path.join(self.converter_output_tests_resource_path, "subfolder", fancy_filename + "@3x.png"),
(300, 300))
def test_should_raise_exception_when_assets_dir_contains_xcassets_dir(self):
os.mkdir("test.xcassets")
converter = Converter()
converter.input_dir = "."
converter.output_dir = self.converter_output_tests_resource_path
pass
def test_should_raise_exception_when_imageset_dir_is_xcassets_dir(self):
converter = Converter()
converter.input_dir = os.path.join(self.converter_tests_resource_path, "convert_xcassets_exception_test")
converter.output_dir = self.converter_output_tests_resource_path
try:
converter.convert()
self.fail("This should fail")
except AssetsDirContainsImagesetDirectoryException as e:
self.assertEqual(e.imageset_directory_path, os.path.join(converter.input_dir, "test.xcassets","test.imageset"))
self.assertEqual(e.assets_dir, converter.input_dir)
| 47.245
| 123
| 0.698804
| 1,229
| 9,449
| 5.022783
| 0.108218
| 0.042767
| 0.068038
| 0.054431
| 0.812571
| 0.771424
| 0.749879
| 0.724931
| 0.710028
| 0.669529
| 0
| 0.023411
| 0.190814
| 9,449
| 199
| 124
| 47.482412
| 0.783939
| 0
| 0
| 0.5
| 0
| 0
| 0.099809
| 0.019157
| 0
| 0
| 0
| 0
| 0.196203
| 1
| 0.094937
| false
| 0.006329
| 0.037975
| 0
| 0.164557
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a49eb9f0519734ac375994589e3db5e2f4ccca7d
| 156
|
py
|
Python
|
scikits/talkbox/features/__init__.py
|
neEverett/talkbox
|
ee0ec30a6a6d483eb9284f72bdaf26bd99765f80
|
[
"MIT"
] | 65
|
2015-02-18T05:23:12.000Z
|
2022-02-21T13:09:34.000Z
|
scikits/talkbox/features/__init__.py
|
neEverett/talkbox
|
ee0ec30a6a6d483eb9284f72bdaf26bd99765f80
|
[
"MIT"
] | 5
|
2016-06-26T08:46:22.000Z
|
2019-01-09T03:03:39.000Z
|
scikits/talkbox/features/__init__.py
|
neEverett/talkbox
|
ee0ec30a6a6d483eb9284f72bdaf26bd99765f80
|
[
"MIT"
] | 31
|
2015-02-23T22:42:15.000Z
|
2022-02-21T13:09:34.000Z
|
from scikits.talkbox.features.mel import mel2hz, hz2mel
__all__ = ['mel2hz', 'hz2mel']
from scikits.talkbox.features.mfcc import mfcc
__all__ += ['mfcc']
| 26
| 55
| 0.75
| 20
| 156
| 5.45
| 0.5
| 0.201835
| 0.330275
| 0.477064
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028986
| 0.115385
| 156
| 5
| 56
| 31.2
| 0.76087
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a4a4bc948dded2d6afc5dcd9d23e525577f35d17
| 20
|
py
|
Python
|
dataloaders/__init__.py
|
psui3905/CCT
|
637cbac130b39f02733339c79cdf1d531e339e9c
|
[
"MIT"
] | 308
|
2020-06-09T13:37:17.000Z
|
2022-03-24T07:43:33.000Z
|
dataloaders/__init__.py
|
lesvay/CCT
|
cf98ea7e6aefa7091e6c375a9025ba1e0f6e53ca
|
[
"MIT"
] | 55
|
2020-06-16T11:57:54.000Z
|
2022-03-09T12:04:58.000Z
|
dataloaders/__init__.py
|
lesvay/CCT
|
cf98ea7e6aefa7091e6c375a9025ba1e0f6e53ca
|
[
"MIT"
] | 51
|
2020-06-08T02:42:14.000Z
|
2022-02-25T16:38:36.000Z
|
from .voc import VOC
| 20
| 20
| 0.8
| 4
| 20
| 4
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 20
| 1
| 20
| 20
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f10f65a7b08891482ef78385aedd40a97602cbcc
| 44
|
py
|
Python
|
build/lib/requestmap/Utilities/__init__.py
|
yyjlincoln/RequestMap
|
3fff1117e4aef59afe66cce29cd29e7670ae46f7
|
[
"Apache-2.0"
] | 2
|
2021-12-12T06:25:51.000Z
|
2021-12-26T11:08:27.000Z
|
build/lib/requestmap/Utilities/__init__.py
|
yyjlincoln/RequestMap
|
3fff1117e4aef59afe66cce29cd29e7670ae46f7
|
[
"Apache-2.0"
] | null | null | null |
build/lib/requestmap/Utilities/__init__.py
|
yyjlincoln/RequestMap
|
3fff1117e4aef59afe66cce29cd29e7670ae46f7
|
[
"Apache-2.0"
] | null | null | null |
from . import JITDictionary as JITDictionary
| 44
| 44
| 0.863636
| 5
| 44
| 7.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113636
| 44
| 1
| 44
| 44
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
74c8547e0e3099bdcdaff0b6bd2f51cdf97e4409
| 112
|
py
|
Python
|
app/file_management/__init__.py
|
kiza054/woodhall-scout-blog-prototype
|
bc7dc0b766263bb7a1a4d342d27c57d7989ff152
|
[
"MIT"
] | null | null | null |
app/file_management/__init__.py
|
kiza054/woodhall-scout-blog-prototype
|
bc7dc0b766263bb7a1a4d342d27c57d7989ff152
|
[
"MIT"
] | null | null | null |
app/file_management/__init__.py
|
kiza054/woodhall-scout-blog-prototype
|
bc7dc0b766263bb7a1a4d342d27c57d7989ff152
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
bp = Blueprint('file_management', __name__)
from app.file_management import routes
| 22.4
| 43
| 0.821429
| 15
| 112
| 5.733333
| 0.666667
| 0.325581
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116071
| 112
| 5
| 44
| 22.4
| 0.868687
| 0
| 0
| 0
| 0
| 0
| 0.132743
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
74efa79d02e726ff5ce2006cb72132cea91ea0e8
| 10,947
|
py
|
Python
|
cmframework/test/cmdsshandler_test.py
|
akraino-edge-stack/ta-config-manager
|
8a3f88d0dbf6afdb0130b9d35e563f8a54d15d44
|
[
"Apache-2.0"
] | null | null | null |
cmframework/test/cmdsshandler_test.py
|
akraino-edge-stack/ta-config-manager
|
8a3f88d0dbf6afdb0130b9d35e563f8a54d15d44
|
[
"Apache-2.0"
] | null | null | null |
cmframework/test/cmdsshandler_test.py
|
akraino-edge-stack/ta-config-manager
|
8a3f88d0dbf6afdb0130b9d35e563f8a54d15d44
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Nokia
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from mock import call
import json
from collections import OrderedDict
from cmframework.utils.cmdsshandler import CMDSSHandler
from cmframework.apis.cmerror import CMError
from dss.api import dss_error
class CMDSSHandlerTest(unittest.TestCase):
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_init(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.assert_called_once_with('test_uri')
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_get_domains_exception(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.side_effect = dss_error.Error('no domains')
with self.assertRaises(CMError) as context:
handler.get_domains()
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_get_domains(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
expected_result = ['a domain', 'b domain', 'c domain']
mock_dss_client.return_value.get_domains.return_value = expected_result
domains = handler.get_domains()
assert domains == expected_result
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_get_domain_not_existing(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.return_value = ['a domain', 'b domain', 'c domain']
domain = handler.get_domain('not domain')
assert domain is None
mock_dss_client.return_value.get_domains.assert_called_once()
mock_dss_client.return_value.get_domain.assert_not_called()
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_get_domain_dss_fails(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.return_value = ['a domain', 'b domain', 'c domain']
mock_dss_client.return_value.get_domain.side_effect = dss_error.Error('some error')
with self.assertRaises(CMError) as context:
domain = handler.get_domain('a domain')
mock_dss_client.return_value.get_domains.assert_called_once()
mock_dss_client.return_value.get_domain.assert_called_once_with('a domain')
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_get_domain(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.return_value = ['a domain', 'b domain', 'c domain']
expected_result = OrderedDict([('name1', 'value1'), ('name2', 'value2')])
mock_dss_client.return_value.get_domain.return_value = expected_result
domain = handler.get_domain('a domain')
assert domain == expected_result
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_set_dss_fails(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.set.side_effect = dss_error.Error('some error')
with self.assertRaises(CMError) as context:
handler.set('a domain', 'a name', 'a value')
mock_dss_client.return_value.set.assert_called_once_with('a domain', 'a name', 'a value')
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_set(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
handler.set('a domain', 'a name', 'a value')
mock_dss_client.return_value.set.assert_called_once_with('a domain', 'a name', 'a value')
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_delete_dss_fails(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.return_value = ['a domain', 'b domain', 'c domain']
mock_dss_client.return_value.get_domain.return_value = OrderedDict([('name', 'value')])
mock_dss_client.return_value.delete.side_effect = dss_error.Error('some error')
with self.assertRaises(CMError) as context:
handler.delete('a domain', 'name')
mock_dss_client.return_value.delete.assert_called_once_with('a domain', 'name')
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_delete_non_existing_name(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.return_value = ['a domain', 'b domain', 'c domain']
mock_dss_client.return_value.get_domain.return_value = OrderedDict([('name', 'value')])
handler.delete('a domain', 'a name')
mock_dss_client.return_value.get_domain.assert_called_once_with('a domain')
mock_dss_client.return_value.delete.assert_not_called()
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_delete_non_existing_domain(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.return_value = ['a domain', 'b domain', 'c domain']
handler.delete('not domain', 'no name')
mock_dss_client.return_value.get_domains.assert_called_once()
mock_dss_client.return_value.get_domain.assert_not_called()
mock_dss_client.return_value.delete.assert_not_called()
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_get_dss_fails(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.return_value = ['a domain', 'b domain', 'c domain']
mock_dss_client.return_value.get_domain.side_effect = dss_error.Error('some error')
with self.assertRaises(CMError) as context:
handler.get('a domain', 'name')
mock_dss_client.return_value.get_domain.assert_called_once_with('a domain')
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_get_non_existing_name(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.return_value = ['a domain', 'b domain', 'c domain']
mock_dss_client.return_value.get_domain.return_value = OrderedDict([('name', 'value')])
value = handler.get('a domain', 'a name')
assert value is None
mock_dss_client.return_value.get_domain.assert_called_once_with('a domain')
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_get_non_existing_domain(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.return_value = ['a domain', 'b domain', 'c domain']
mock_dss_client.return_value.get_domain.return_value = OrderedDict([('name', 'value')])
value = handler.get('some domain', 'a name')
assert value is None
mock_dss_client.return_value.get_domain.assert_not_called()
mock_dss_client.return_value.get_domains.assert_called_once()
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_get(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.return_value = ['a domain', 'b domain', 'c domain']
mock_dss_client.return_value.get_domain.return_value = OrderedDict([('name', 'value')])
value = handler.get('a domain', 'name')
assert value == 'value'
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_delete_domain_dss_fails(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.return_value = ['a domain', 'b domain', 'c domain']
mock_dss_client.return_value.delete_domain.side_effect = dss_error.Error('some error')
with self.assertRaises(CMError) as context:
handler.delete_domain('a domain')
mock_dss_client.return_value.delete_domain.assert_called_once_with('a domain')
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_delete_domain_non_existent(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.return_value = ['a domain', 'b domain', 'c domain']
handler.delete_domain('not domain')
mock_dss_client.return_value.get_domains.assert_called_once()
mock_dss_client.return_value.delete_domain.assert_not_called()
@mock.patch('cmframework.utils.cmdsshandler.dss_client.Client')
@mock.patch('cmframework.utils.cmdsshandler.logging')
def test_delete_domain(self, mock_logging, mock_dss_client):
handler = CMDSSHandler(uri='test_uri')
mock_dss_client.return_value.get_domains.return_value = ['a domain', 'b domain', 'c domain']
handler.delete_domain('a domain')
mock_dss_client.return_value.get_domains.assert_called_once()
mock_dss_client.return_value.delete_domain.assert_called_once_with('a domain')
if __name__ == '__main__':
unittest.main()
| 42.430233
| 100
| 0.730063
| 1,450
| 10,947
| 5.211034
| 0.084828
| 0.100053
| 0.113552
| 0.118184
| 0.867258
| 0.863684
| 0.850582
| 0.848465
| 0.834833
| 0.826628
| 0
| 0.0013
| 0.156755
| 10,947
| 257
| 101
| 42.595331
| 0.817246
| 0.049511
| 0
| 0.660494
| 0
| 0
| 0.237683
| 0.148961
| 0
| 0
| 0
| 0
| 0.209877
| 1
| 0.111111
| false
| 0
| 0.049383
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2d07488f1b9312b8fb796945441f4f88c35afe8a
| 27
|
py
|
Python
|
parser/__init__.py
|
magarnicle/vcarded
|
d8663c0fd6a4472d1bf7a56e43dc134e7a853817
|
[
"Unlicense"
] | null | null | null |
parser/__init__.py
|
magarnicle/vcarded
|
d8663c0fd6a4472d1bf7a56e43dc134e7a853817
|
[
"Unlicense"
] | null | null | null |
parser/__init__.py
|
magarnicle/vcarded
|
d8663c0fd6a4472d1bf7a56e43dc134e7a853817
|
[
"Unlicense"
] | null | null | null |
from parser.parse import *
| 13.5
| 26
| 0.777778
| 4
| 27
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7422b791b165d64a9c090a04a0ee6a03c755c42d
| 1,890
|
py
|
Python
|
mycode.py
|
Shaik-sameer-AIML/mobilerobot-openloopcontrol
|
47748cc44a35d280db5d3f7c72a1c28c4c07e4d2
|
[
"BSD-3-Clause"
] | null | null | null |
mycode.py
|
Shaik-sameer-AIML/mobilerobot-openloopcontrol
|
47748cc44a35d280db5d3f7c72a1c28c4c07e4d2
|
[
"BSD-3-Clause"
] | null | null | null |
mycode.py
|
Shaik-sameer-AIML/mobilerobot-openloopcontrol
|
47748cc44a35d280db5d3f7c72a1c28c4c07e4d2
|
[
"BSD-3-Clause"
] | null | null | null |
from robomaster import robot
import time
if _name_ == '_main_':
ep_robot = robot.Robot()
ep_robot.initialize(conn_type="ap")
ep_chassis = ep_robot.chassis
ep_led = ep_robot.led
ep_led.set_led(comp="all",r=255,g=0,b=0,effect="on")
ep_chassis.move(x=2, y=0, z=0, xy_speed=1).wait_for_completed()
ep_led.set_led(comp="all",r=80,g=255,b=30,effect="on")
ep_chassis.move(x=0, y=0, z=80, xy_speed=1).wait_for_completed()
ep_led.set_led(comp="all",r=0,g=100,b=255,effect="on")
ep_chassis.move(x=2, y=0, z=0, xy_speed=1).wait_for_completed()
ep_led.set_led(comp="all",r=100,g=70,b=255,effect="on")
ep_chassis.move(x=-2, y=0, z=0, xy_speed=1).wait_for_completed()
ep_led.set_led(comp="all",r=255,g=0,b=0,effect="on")
ep_led.set_led(comp="all",r=255,g=0,b=0,effect="on")
ep_chassis.move(x=0, y=0, z=90, xy_speed=1).wait_for_completed()
ep_led.set_led(comp="all",r=80,g=255,b=30,effect="on")
ep_chassis.move(x=0.5, y=0, z=0, xy_speed=1).wait_for_completed()
ep_chassis.drive_speed(x=0.2,y=0,z=50)
time.sleep(20)
ep_chassis.drive_speed(x=0,y=0,z=0)
ep_chassis.drive_speed(x=0,y=0.2,z=50)
time.sleep(20)
ep_chassis.drive_speed(x=0,y=0,z=0)
for i in range(10):
ep_led.set_led(comp="all",r=255,g=0,b=0,effect="on")
time.sleep(0.1)
ep_led.set_led(comp="all",r=0,g=255,b=0,effect="on")
time.sleep(0.1)
ep_led.set_led(comp="all",r=0,g=0,b=255,effect="on")
time.sleep(0.1)
print("Completed...")
ep_chassis.drive_speed(x=0,y=0.2,z=100)
time.sleep(20)
ep_chassis.drive_speed(x=0,y=0,z=0)
ep_led.set_led(comp="all",r=0,g=0,b=255,effect="on")
time.sleep(0.1)
ep_robot.close()
## OUTPUT:

| 35
| 112
| 0.639153
| 388
| 1,890
| 2.93299
| 0.170103
| 0.102812
| 0.077329
| 0.106327
| 0.760984
| 0.760984
| 0.760984
| 0.734622
| 0.734622
| 0.732865
| 0
| 0.082759
| 0.156085
| 1,890
| 54
| 112
| 35
| 0.630721
| 0.003704
| 0
| 0.487805
| 0
| 0
| 0.039894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.04878
| null | null | 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
742c6a53f115d2b8a18c4ed73dc55c5b1e2cc9d4
| 8,584
|
py
|
Python
|
tests/unit/test_test_matchers.py
|
RerrerBuub/asciidoxy
|
3402f37d59e30975e9919653465839e396f05513
|
[
"Apache-2.0"
] | 14
|
2020-04-28T08:51:43.000Z
|
2022-02-12T13:40:34.000Z
|
tests/unit/test_test_matchers.py
|
RerrerBuub/asciidoxy
|
3402f37d59e30975e9919653465839e396f05513
|
[
"Apache-2.0"
] | 47
|
2020-05-18T14:19:31.000Z
|
2022-03-04T13:46:46.000Z
|
tests/unit/test_test_matchers.py
|
RerrerBuub/asciidoxy
|
3402f37d59e30975e9919653465839e396f05513
|
[
"Apache-2.0"
] | 8
|
2020-05-17T20:52:42.000Z
|
2022-02-25T16:16:01.000Z
|
# Copyright (C) 2019-2021, TomTom (http://tomtom.com).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test test functionality for partial model comparison."""
from asciidoxy.model import Compound, Parameter
from .matchers import (AtLeast, HasNot, IsEmpty, IsFalse, IsNone, IsNotEmpty, IsTrue, PartialModel,
SizeIs, Unordered, m_compound)
def test_match_only_specified_attributes():
PartialModel(Compound, id="abc").assert_matches(Compound(id="abc"))
PartialModel(Compound, id="abc").assert_matches(Compound(id="abc", name="name"))
PartialModel(Compound, name="name").assert_matches(Compound(id="abc", name="name"))
PartialModel(Compound, id="abc", name="name").assert_matches(Compound(id="abc", name="name"))
PartialModel(Compound, id="abdc").assert_not_matches(Compound(id="abc"))
PartialModel(Compound, id="abdc").assert_not_matches(Compound(id="abc", name="name"))
PartialModel(Compound, name="dname").assert_not_matches(Compound(id="abc", name="name"))
PartialModel(Compound, id="abdc",
name="dname").assert_not_matches(Compound(id="abc", name="name"))
def test_match_nested_expectations():
PartialModel(Compound, members=[PartialModel(Compound, id="abc")
]).assert_matches(Compound(members=[Compound(id="abc")]))
PartialModel(Compound,
members=[PartialModel(Compound, members=[PartialModel(Compound, id="abc")])
]).assert_matches(
Compound(members=[Compound(members=[Compound(id="abc")])]))
PartialModel(Compound, members=[PartialModel(Compound, id="abcd")
]).assert_not_matches(Compound(members=[Compound(id="abc")]))
PartialModel(Compound,
members=[PartialModel(Compound,
id="abcd")]).assert_not_matches(Compound(members=[]))
PartialModel(Compound,
members=[PartialModel(Compound, members=[PartialModel(Compound, id="abcd")])
]).assert_not_matches(
Compound(members=[Compound(members=[Compound(id="abc")])]))
def test_match_nested_original_objects():
PartialModel(Compound, members=[Compound(id="abc")
]).assert_matches(Compound(members=[Compound(id="abc")]))
PartialModel(Compound,
members=[PartialModel(Compound, members=[Compound(id="abc")])]).assert_matches(
Compound(members=[Compound(members=[Compound(id="abc")])]))
PartialModel(Compound, members=[Compound(id="dabc")
]).assert_not_matches(Compound(members=[Compound(id="abc")]))
PartialModel(Compound, members=[PartialModel(Compound, members=[Compound(id="dabc")])
]).assert_not_matches(
Compound(members=[Compound(members=[Compound(id="abc")])]))
def test_type_must_match():
PartialModel(Compound, id="abc").assert_matches(Compound(id="abc"))
PartialModel(Compound, name="abc").assert_not_matches(Parameter(name="abc"))
def test_unordered():
m_compound(members=Unordered(m_compound(name="a"), m_compound(name="b"))).assert_matches(
Compound(members=[Compound(name="a"), Compound(name="b")]))
m_compound(members=Unordered(m_compound(name="b"), m_compound(name="a"))).assert_matches(
Compound(members=[Compound(name="a"), Compound(name="b")]))
m_compound(members=Unordered(m_compound(name="a"), m_compound(name="b"))).assert_matches(
Compound(members=[Compound(name="b"), Compound(name="a")]))
m_compound(members=Unordered(m_compound(name="a"), m_compound(name="b"))).assert_not_matches(
Compound(members=[Compound(name="c"), Compound(name="b")]))
m_compound(members=Unordered(m_compound(name="a"), m_compound(name="b"))).assert_not_matches(
Compound(members=[Compound(name="a")]))
def test_atleast():
m_compound(members=AtLeast(m_compound(name="a"), m_compound(name="b"))).assert_matches(
Compound(members=[Compound(name="a"), Compound(name="b")]))
m_compound(members=AtLeast(m_compound(name="a"))).assert_matches(
Compound(members=[Compound(name="a"), Compound(name="b")]))
m_compound(members=AtLeast(m_compound(name="b"))).assert_matches(
Compound(members=[Compound(name="a"), Compound(name="b")]))
m_compound(members=AtLeast(m_compound(name="a"), m_compound(name="c"))).assert_not_matches(
Compound(members=[Compound(name="a"), Compound(name="b")]))
m_compound(members=AtLeast(m_compound(name="a"), m_compound(name="b"))).assert_not_matches(
Compound(members=[Compound(name="a"), Compound(name="c")]))
m_compound(members=AtLeast(m_compound(name="a"), m_compound(name="b"))).assert_not_matches(
Compound(members=[Compound(name="a")]))
def test_hasnot():
m_compound(members=HasNot(m_compound(name="a"))).assert_matches(
Compound(members=[Compound(name="c"), Compound(name="d")]))
m_compound(members=HasNot(m_compound(name="a"), m_compound(name="b"))).assert_matches(
Compound(members=[Compound(name="c"), Compound(name="d")]))
m_compound(members=HasNot(m_compound(name="a"), m_compound(name="b"))).assert_not_matches(
Compound(members=[Compound(name="a"), Compound(name="d")]))
m_compound(members=HasNot(m_compound(name="a"), m_compound(name="b"))).assert_not_matches(
Compound(members=[Compound(name="c"), Compound(name="b")]))
m_compound(members=HasNot(m_compound(name="a"), m_compound(name="b"))).assert_not_matches(
Compound(members=[Compound(name="a"), Compound(name="b")]))
def test_isempty():
m_compound(name=IsEmpty()).assert_matches(Compound(name=None))
m_compound(name=IsEmpty()).assert_matches(Compound(name=""))
m_compound(name=IsEmpty()).assert_not_matches(Compound(name="bla"))
def test_isnotempty():
m_compound(name=IsNotEmpty()).assert_matches(Compound(name="bla"))
m_compound(name=IsNotEmpty()).assert_matches(Compound(name=" "))
m_compound(name=IsNotEmpty()).assert_matches(Compound(name=" "))
m_compound(name=IsNotEmpty()).assert_matches(Compound(name="\t"))
m_compound(name=IsNotEmpty()).assert_not_matches(Compound(name=""))
m_compound(name=IsNotEmpty()).assert_not_matches(Compound(name=None))
def test_isfalse():
m_compound(static=IsFalse()).assert_matches(Compound(static=False))
m_compound(static=IsFalse()).assert_not_matches(Compound(static=True))
m_compound(static=IsFalse()).assert_not_matches(Compound(static="bla"))
m_compound(static=IsFalse()).assert_not_matches(Compound(static=None))
def test_istrue():
m_compound(static=IsTrue()).assert_matches(Compound(static=True))
m_compound(static=IsTrue()).assert_not_matches(Compound(static=False))
m_compound(static=IsTrue()).assert_not_matches(Compound(static="bla"))
m_compound(static=IsTrue()).assert_not_matches(Compound(static=None))
def test_isnone():
m_compound(name=IsNone()).assert_matches(Compound(name=None))
m_compound(name=IsNone()).assert_not_matches(Compound(name=""))
m_compound(name=IsNone()).assert_not_matches(Compound(name="bla"))
m_compound(name=IsNone()).assert_not_matches(Compound(name=0))
def test_sizeis():
m_compound(members=SizeIs(1)).assert_matches(Compound(members=[Compound()]))
m_compound(members=SizeIs(2)).assert_matches(Compound(members=[Compound(), Compound()]))
m_compound(members=SizeIs(0)).assert_matches(Compound(members=[]))
m_compound(members=SizeIs(1)).assert_not_matches(Compound(members=[Compound(), Compound()]))
m_compound(members=SizeIs(2)).assert_not_matches(Compound(members=[Compound()]))
m_compound(members=SizeIs(0)).assert_not_matches(Compound(members=[Compound()]))
# Constructor replaces None with empty list
compound_members_none = Compound()
compound_members_none.members = None
m_compound(members=SizeIs(0)).assert_not_matches(compound_members_none)
| 50.792899
| 99
| 0.687092
| 1,054
| 8,584
| 5.406072
| 0.114801
| 0.185328
| 0.095823
| 0.138996
| 0.822218
| 0.805019
| 0.786943
| 0.769393
| 0.717269
| 0.583187
| 0
| 0.002728
| 0.145969
| 8,584
| 168
| 100
| 51.095238
| 0.774519
| 0.077936
| 0
| 0.350877
| 0
| 0
| 0.029762
| 0
| 0
| 0
| 0
| 0
| 0.552632
| 1
| 0.114035
| false
| 0
| 0.017544
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
744b26a34d0b11d61b4ad844fc454d894622bc00
| 334
|
py
|
Python
|
src/spaceone/inventory/connector/__init__.py
|
spaceone-dev/plugin-oracle-cloud-service-inven-collector
|
555dac75b69ebc9ea0f778d69e81407a88be1853
|
[
"Apache-2.0"
] | 2
|
2021-02-22T07:08:17.000Z
|
2021-04-19T10:16:28.000Z
|
src/spaceone/inventory/connector/__init__.py
|
spaceone-dev/plugin-oracle-cloud-service-inven-collector
|
555dac75b69ebc9ea0f778d69e81407a88be1853
|
[
"Apache-2.0"
] | null | null | null |
src/spaceone/inventory/connector/__init__.py
|
spaceone-dev/plugin-oracle-cloud-service-inven-collector
|
555dac75b69ebc9ea0f778d69e81407a88be1853
|
[
"Apache-2.0"
] | 4
|
2021-02-08T02:21:57.000Z
|
2021-06-25T02:28:35.000Z
|
from spaceone.inventory.libs.connector import OCIConnector
from spaceone.inventory.connector.autonomous_database import AutonomousDatabaseConnector
from spaceone.inventory.connector.baremetal_vm_database import BareMetalVMDatabaseConnector
from spaceone.inventory.connector.exadata_cloud_database import ExadataCloudDatabaseConnector
| 66.8
| 93
| 0.916168
| 33
| 334
| 9.121212
| 0.484848
| 0.159468
| 0.27907
| 0.299003
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047904
| 334
| 4
| 94
| 83.5
| 0.946541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
745f7e3fef475e081b2c57b32039dbc3780619e7
| 109
|
py
|
Python
|
Python/myprojects/models/__init__.py
|
iatanasov77/my-web-projects-guis
|
833899dc99ed952ee813c49f0d5852da498d93eb
|
[
"MIT"
] | null | null | null |
Python/myprojects/models/__init__.py
|
iatanasov77/my-web-projects-guis
|
833899dc99ed952ee813c49f0d5852da498d93eb
|
[
"MIT"
] | null | null | null |
Python/myprojects/models/__init__.py
|
iatanasov77/my-web-projects-guis
|
833899dc99ed952ee813c49f0d5852da498d93eb
|
[
"MIT"
] | null | null | null |
from .postmodel import *
from .commentmodel import *
from .questionmodel import *
from .choicemodel import *
| 21.8
| 28
| 0.779817
| 12
| 109
| 7.083333
| 0.5
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146789
| 109
| 4
| 29
| 27.25
| 0.913978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
74a7273d74378282c6c8da6a8a51a09ffb640c7c
| 24
|
py
|
Python
|
midas_web_solution/midas_web_solution/settings/__init__.py
|
jupiny/MIDASWebSolution
|
c6250bb7aeab815b3c759ae4f7b419da50c26b1c
|
[
"MIT"
] | null | null | null |
midas_web_solution/midas_web_solution/settings/__init__.py
|
jupiny/MIDASWebSolution
|
c6250bb7aeab815b3c759ae4f7b419da50c26b1c
|
[
"MIT"
] | null | null | null |
midas_web_solution/midas_web_solution/settings/__init__.py
|
jupiny/MIDASWebSolution
|
c6250bb7aeab815b3c759ae4f7b419da50c26b1c
|
[
"MIT"
] | null | null | null |
from .partials import *
| 12
| 23
| 0.75
| 3
| 24
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 24
| 1
| 24
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
77cabf0231475641fc08f1640f659ef30f41c2f0
| 114
|
py
|
Python
|
Demos/HelloWorld/Assets/Test.py
|
WhoBrokeTheBuild/Ryme
|
945c4ab135dbe411f43787cbf222589b13420d4b
|
[
"MIT"
] | null | null | null |
Demos/HelloWorld/Assets/Test.py
|
WhoBrokeTheBuild/Ryme
|
945c4ab135dbe411f43787cbf222589b13420d4b
|
[
"MIT"
] | null | null | null |
Demos/HelloWorld/Assets/Test.py
|
WhoBrokeTheBuild/Ryme
|
945c4ab135dbe411f43787cbf222589b13420d4b
|
[
"MIT"
] | null | null | null |
import ryme
ryme.Log("Hello, World!")
ryme.Log(repr(ryme.GetVersion()))
ryme.Log(repr(ryme.GetCurrentPath()))
| 12.666667
| 37
| 0.710526
| 16
| 114
| 5.0625
| 0.5
| 0.259259
| 0.271605
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087719
| 114
| 8
| 38
| 14.25
| 0.778846
| 0
| 0
| 0
| 0
| 0
| 0.115044
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
77ccd1dd7eaa3977f52ebf40d1349e25ff3bf7f7
| 52
|
py
|
Python
|
submod_b/subpackages/submod_b/__init__.py
|
SpenserHaddad/pyinstaller-namespace-issue-repro
|
0578b4063279ef95ce2c0735b76fe2c75945ad4f
|
[
"MIT"
] | null | null | null |
submod_b/subpackages/submod_b/__init__.py
|
SpenserHaddad/pyinstaller-namespace-issue-repro
|
0578b4063279ef95ce2c0735b76fe2c75945ad4f
|
[
"MIT"
] | null | null | null |
submod_b/subpackages/submod_b/__init__.py
|
SpenserHaddad/pyinstaller-namespace-issue-repro
|
0578b4063279ef95ce2c0735b76fe2c75945ad4f
|
[
"MIT"
] | null | null | null |
def sayhi():
return "Hello from submodule B!!!"
| 17.333333
| 38
| 0.634615
| 7
| 52
| 4.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211538
| 52
| 2
| 39
| 26
| 0.804878
| 0
| 0
| 0
| 0
| 0
| 0.480769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
77d436d7c37ad9bfe4ac8247a98cd877859555d8
| 34
|
py
|
Python
|
bumpv/client/config/__init__.py
|
kylie-a/bumpversion
|
13a150daa02f29e7dd74b5240c54c7929ec176b8
|
[
"MIT"
] | null | null | null |
bumpv/client/config/__init__.py
|
kylie-a/bumpversion
|
13a150daa02f29e7dd74b5240c54c7929ec176b8
|
[
"MIT"
] | null | null | null |
bumpv/client/config/__init__.py
|
kylie-a/bumpversion
|
13a150daa02f29e7dd74b5240c54c7929ec176b8
|
[
"MIT"
] | 1
|
2019-11-24T15:36:19.000Z
|
2019-11-24T15:36:19.000Z
|
from .config import Configuration
| 17
| 33
| 0.852941
| 4
| 34
| 7.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 34
| 1
| 34
| 34
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
77ed766a4e2ebc7683b208018ab9e6092c469125
| 3,171
|
py
|
Python
|
src/pyvalidations/rules/datetime.py
|
MajAhd/py_validations
|
372b2b40c5b4e1565c04b1ec53b195596261c50d
|
[
"MIT"
] | 6
|
2021-12-15T23:33:07.000Z
|
2022-02-13T17:12:53.000Z
|
src/pyvalidations/rules/datetime.py
|
MajAhd/py_validation
|
372b2b40c5b4e1565c04b1ec53b195596261c50d
|
[
"MIT"
] | null | null | null |
src/pyvalidations/rules/datetime.py
|
MajAhd/py_validation
|
372b2b40c5b4e1565c04b1ec53b195596261c50d
|
[
"MIT"
] | null | null | null |
from datetime import datetime
import re
class DateTime:
"""
Validate value is date/time
:param value : string
"""
def __init__(self, value):
self.value = value
def is_date(self):
"""
check date : YY-MM-YY
:return: bool
"""
try:
return re.match(r'([12]\d{3}-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01]))', self.value)
except Exception:
return False
def is_time(self):
"""
check Time : HH:MM AM , HH:MM PM , HH:MM
:return: bool
"""
try:
return re.match(r'^([0-1]?[0-9]|2[0-3]):[0-5][0-9]([AaPp][Mm])?$', self.value)
except Exception:
return False
def is_date_time(self):
"""
check datetime : YY-MM-YY HH:MM
:return: bool
"""
try:
return re.match(
r'([12]\d{3}-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01])) ([0-1]?[0-9]|2[0-3]):[0-5][0-9]$',
self.value)
except Exception:
return False
def is_timezone(self):
"""
check timezone : +1:30 , -02:00
:return: bool
"""
try:
return re.match(r'[+-][0-9]{2}:[0-9]{2}\b', self.value)
except Exception:
return False
def date_equals(self, target):
"""
check datetime : YY-MM-YY == YY-MM-YY
:param target: YY-MM-YY
:return: bool
"""
try:
dt1 = datetime.strptime(self.value, "%Y-%M-%d")
dt2 = datetime.strptime(target, "%Y-%M-%d")
return dt1 == dt2
except Exception:
return False
def is_after(self, target):
"""
check datetime : YY-MM-YY > YY-MM-YY
:param target: YY-MM-YY
:return: bool
"""
try:
dt1 = datetime.strptime(self.value, "%Y-%M-%d")
dt2 = datetime.strptime(target, "%Y-%M-%d")
return dt1 > dt2
except Exception:
return False
def is_after_or_equal(self, target):
"""
check datetime : YY-MM-YY >= YY-MM-YY
:param target: YY-MM-YY
:return: bool
"""
try:
dt1 = datetime.strptime(self.value, "%Y-%M-%d")
dt2 = datetime.strptime(target, "%Y-%M-%d")
return dt1 >= dt2
except Exception:
return False
def is_before(self, target):
"""
check datetime : YY-MM-YY < YY-MM-YY
:param target: YY-MM-YY
:return: bool
"""
try:
dt1 = datetime.strptime(self.value, "%Y-%M-%d")
dt2 = datetime.strptime(target, "%Y-%M-%d")
return dt1 < dt2
except Exception:
return False
def is_before_or_equal(self, target):
"""
check datetime : YY-MM-YY <= YY-MM-YY
:param target: YY-MM-YY
:return: bool
"""
try:
dt1 = datetime.strptime(self.value, "%Y-%M-%d")
dt2 = datetime.strptime(target, "%Y-%M-%d")
return dt1 <= dt2
except Exception:
return False
| 26.425
| 102
| 0.465784
| 402
| 3,171
| 3.629353
| 0.136816
| 0.046607
| 0.069911
| 0.160384
| 0.840302
| 0.827279
| 0.823167
| 0.797121
| 0.695682
| 0.671693
| 0
| 0.045086
| 0.377483
| 3,171
| 119
| 103
| 26.647059
| 0.694022
| 0.193945
| 0
| 0.596774
| 0
| 0.048387
| 0.126525
| 0.089923
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0
| 0.032258
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
77f28698c19737832656f6ebadfaa2056f303f89
| 132
|
py
|
Python
|
src/Foundation/Module/mypkg/mod3.py
|
mryyomutga/PracticePython
|
e191d73064248d0983344b137fbe6b69e5eb1d12
|
[
"MIT"
] | null | null | null |
src/Foundation/Module/mypkg/mod3.py
|
mryyomutga/PracticePython
|
e191d73064248d0983344b137fbe6b69e5eb1d12
|
[
"MIT"
] | null | null | null |
src/Foundation/Module/mypkg/mod3.py
|
mryyomutga/PracticePython
|
e191d73064248d0983344b137fbe6b69e5eb1d12
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Module/mypkg/mod3.py
class MyClass():
def func3(self):
print("./mypkg/mod3.py")
print("func3")
| 18.857143
| 27
| 0.583333
| 18
| 132
| 4.277778
| 0.722222
| 0.233766
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046296
| 0.181818
| 132
| 6
| 28
| 22
| 0.666667
| 0.318182
| 0
| 0
| 0
| 0
| 0.246914
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
77f6fe5689f503338b3dd8893892e3c6aeea2a19
| 138
|
py
|
Python
|
online_pharmacy/cart/views.py
|
geekyJock8/online_pharmacy
|
892852857786ec17259b71f2a178896cd6d12e60
|
[
"Apache-2.0"
] | 5
|
2020-09-09T13:59:17.000Z
|
2021-09-30T07:20:55.000Z
|
online_pharmacy/cart/views.py
|
geekyJock8/online_pharmacy
|
892852857786ec17259b71f2a178896cd6d12e60
|
[
"Apache-2.0"
] | 10
|
2017-09-03T06:13:31.000Z
|
2017-10-10T15:22:30.000Z
|
online_pharmacy/cart/views.py
|
geekyJock8/Online-Pharmacy
|
892852857786ec17259b71f2a178896cd6d12e60
|
[
"Apache-2.0"
] | 9
|
2017-09-03T04:59:18.000Z
|
2019-10-17T11:33:18.000Z
|
from django.http import HttpResponse
def showCart(request):
return HttpResponse('<h1>This is the cart of the required user. </h1>')
| 23
| 75
| 0.73913
| 20
| 138
| 5.1
| 0.85
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017241
| 0.15942
| 138
| 5
| 76
| 27.6
| 0.862069
| 0
| 0
| 0
| 0
| 0
| 0.350365
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
7ae7ad6c6e8005228328a3deaa9863517a65cdf0
| 85
|
py
|
Python
|
ranzen/torch/__init__.py
|
predictive-analytics-lab/mantra
|
6c63d1d1e01745f31dbdc7c34f6c7932bcdccef8
|
[
"Apache-2.0"
] | null | null | null |
ranzen/torch/__init__.py
|
predictive-analytics-lab/mantra
|
6c63d1d1e01745f31dbdc7c34f6c7932bcdccef8
|
[
"Apache-2.0"
] | 4
|
2021-11-03T18:48:36.000Z
|
2022-03-16T14:01:45.000Z
|
ranzen/torch/__init__.py
|
wearepal/ranzen
|
e249220026ccb5c05218c7202866690b5447d37e
|
[
"Apache-2.0"
] | null | null | null |
from .data import *
from .loss import *
from .sampling import *
from .utils import *
| 17
| 23
| 0.717647
| 12
| 85
| 5.083333
| 0.5
| 0.491803
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.188235
| 85
| 4
| 24
| 21.25
| 0.884058
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
7aed17745ee389a2a253ca209da95b23e8120ec6
| 94
|
py
|
Python
|
python/multiply/solution.py
|
hiljusti/codewars-solutions
|
1a423e8cb0fbcac94738f6e51dc333f057b0a731
|
[
"WTFPL"
] | 2
|
2020-02-22T08:47:51.000Z
|
2021-05-21T22:21:55.000Z
|
python/multiply/solution.py
|
hiljusti/codewars-solutions
|
1a423e8cb0fbcac94738f6e51dc333f057b0a731
|
[
"WTFPL"
] | null | null | null |
python/multiply/solution.py
|
hiljusti/codewars-solutions
|
1a423e8cb0fbcac94738f6e51dc333f057b0a731
|
[
"WTFPL"
] | 1
|
2021-11-09T17:22:10.000Z
|
2021-11-09T17:22:10.000Z
|
# https://www.codewars.com/kata/50654ddff44f800200000004
def multiply(a, b):
return a * b
| 15.666667
| 56
| 0.723404
| 13
| 94
| 5.230769
| 0.846154
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.234568
| 0.138298
| 94
| 5
| 57
| 18.8
| 0.604938
| 0.574468
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
2495089e3bc04ee397577a6be58e07f9ed45a8e7
| 7,324
|
py
|
Python
|
backpack/extensions/secondorder/hbp/linear.py
|
rioyokotalab/backpack
|
000a1dbe7b2d6e5b309151df800edf866b9b514c
|
[
"MIT"
] | null | null | null |
backpack/extensions/secondorder/hbp/linear.py
|
rioyokotalab/backpack
|
000a1dbe7b2d6e5b309151df800edf866b9b514c
|
[
"MIT"
] | null | null | null |
backpack/extensions/secondorder/hbp/linear.py
|
rioyokotalab/backpack
|
000a1dbe7b2d6e5b309151df800edf866b9b514c
|
[
"MIT"
] | null | null | null |
from backpack.core.derivatives.linear import LinearDerivatives
from backpack.extensions.secondorder.hbp.hbp_options import (
BackpropStrategy,
ExpectationApproximation,
)
from backpack.extensions.secondorder.hbp.hbpbase import HBPBaseModule
from backpack.utils.ein import einsum
class HBPLinear(HBPBaseModule):
def __init__(self):
super().__init__(derivatives=LinearDerivatives(), params=["weight", "bias"])
def weight(self, ext, module, g_inp, g_out, backproped):
bp_strategy = ext.get_backprop_strategy()
if BackpropStrategy.is_batch_average(bp_strategy):
return self._weight_for_batch_average(ext, module, backproped)
elif BackpropStrategy.is_sqrt(bp_strategy):
return self._weight_for_sqrt(ext, module, backproped)
def _weight_for_batch_average(self, ext, module, backproped):
kron_factors = self._bias_for_batch_average(backproped)
kron_factors += self._factors_from_input(ext, module)
return kron_factors
def _weight_for_sqrt(self, ext, module, backproped):
kron_factors = self._factor_from_sqrt(backproped)
kron_factors += self._factors_from_input(ext, module)
return kron_factors
def _factors_from_input(self, ext, module):
ea_strategy = ext.get_ea_strategy()
if ExpectationApproximation.should_average_param_jac(ea_strategy):
mean_input = self.__mean_input(module).unsqueeze(-1)
return [mean_input, mean_input.transpose()]
else:
return [self.__mean_input_outer(module)]
def _factor_from_sqrt(self, backproped):
return [einsum("vni,vnj->ij", (backproped, backproped))]
def bias(self, ext, module, g_inp, g_out, backproped):
bp_strategy = ext.get_backprop_strategy()
if BackpropStrategy.is_batch_average(bp_strategy):
return self._bias_for_batch_average(backproped)
elif BackpropStrategy.is_sqrt(bp_strategy):
return self._factor_from_sqrt(backproped)
def _bias_for_batch_average(self, backproped):
return [backproped]
def __mean_input(self, module):
_, flat_input = self.derivatives.batch_flat(module.input0)
return flat_input.mean(0)
def __mean_input_outer(self, module):
N, flat_input = self.derivatives.batch_flat(module.input0)
return einsum("ni,nj->ij", (flat_input, flat_input)) / N
class HBPLinearEfficient(HBPBaseModule):
def __init__(self):
super().__init__(
derivatives=LinearDerivatives(),
params=["weight", "bias"]
)
self._attr = 'kron_factors_from_sqrt'
def _set_bias_flag(self, module, value):
attr = '_bias_is_called_before_weight'
setattr(module, attr, value)
def _get_bias_flag(self, module):
attr = '_bias_is_called_before_weight'
return getattr(module, attr, False)
def _set_weight_flag(self, module, value):
attr = '_weight_is_called_before_weight'
setattr(module, attr, value)
def _get_weight_flag(self, module):
attr = '_weight_is_called_before_weight'
return getattr(module, attr, False)
def weight(self, ext, module, g_inp, g_out, backproped):
bp_strategy = ext.get_backprop_strategy()
attr = self._attr
kron_factors = None
if not self._get_bias_flag(module):
self._set_weight_flag(module, True)
if BackpropStrategy.is_batch_average(bp_strategy):
kron_factors = self._weight_for_batch_average(ext, module, backproped)
elif BackpropStrategy.is_sqrt(bp_strategy):
kron_factors = self._weight_for_sqrt(ext, module, backproped)
setattr(module, attr, kron_factors)
else:
kron_factors = getattr(module, attr)
self._set_bias_flag(module, False)
delattr(module, attr)
kron_factors += self._factors_from_input(ext, module)
return kron_factors
def _weight_for_batch_average(self, ext, module, backproped):
kron_factors = self._bias_for_batch_average(backproped)
return kron_factors
def _weight_for_sqrt(self, ext, module, backproped):
kron_factors = self._factor_from_sqrt(backproped, module)
return kron_factors
def _factors_from_input(self, ext, module):
ea_strategy = ext.get_ea_strategy()
if ExpectationApproximation.should_average_param_jac(ea_strategy):
mean_input = self.__mean_input(module).unsqueeze(-1)
return [mean_input, mean_input.transpose()]
else:
yield self.__mean_input_outer(module)
def _factor_from_sqrt(self, backproped, module):
return [einsum('bic,bjc->ij', (backproped, backproped))]
def bias(self, ext, module, g_inp, g_out, backproped):
bp_strategy = ext.get_backprop_strategy()
attr = self._attr
kron_factors = None
if not self._get_weight_flag(module):
self._set_bias_flag(module, True)
if BackpropStrategy.is_batch_average(bp_strategy):
kron_factors = self._bias_for_batch_average(backproped)
elif BackpropStrategy.is_sqrt(bp_strategy):
kron_factors = self._factor_from_sqrt(backproped, module)
setattr(module, attr, kron_factors)
else:
kron_factors = getattr(module, attr)
self._set_weight_flag(module, False)
delattr(module, attr)
return kron_factors
def _bias_for_batch_average(self, backproped):
return [backproped]
def __mean_input(self, module):
_, flat_input = self.derivatives.batch_flat(module.input0)
return flat_input.mean(0)
def __mean_input_outer(self, module):
N, flat_input = self.derivatives.batch_flat(module.input0)
return einsum('bi,bj->ij', (flat_input, flat_input)) / N
class HBPFRLinear(HBPLinearEfficient):
def _weight_for_batch_average(self, ext, module, backproped):
raise NotImplementedError("Undefined")
def _bias_for_batch_average(self, backproped):
raise NotImplementedError("Undefined")
def _factors_from_input(self, ext, module):
ea_strategy = ext.get_ea_strategy()
if ExpectationApproximation.should_average_param_jac(ea_strategy):
raise NotImplementedError("Undefined")
else:
yield self.__mean_input_outer(module)
def __mean_input_outer(self, module):
attr = 'last_flat_input'
last_fl_inp = getattr(module, attr, None)
N, flat_input = self.derivatives.batch_flat(module.input0)
if last_fl_inp is None:
setattr(module, attr, flat_input)
return einsum('bi,bj->ij', (flat_input, flat_input)) / N
else:
delattr(module, attr)
return einsum('bi,bj->ij', (flat_input, last_fl_inp)) / N
def _factor_from_sqrt(self, backproped, module):
attr = 'last_backproped'
last_bp = getattr(module, attr, None)
if last_bp is None:
setattr(module, attr, backproped)
return [einsum('bic,bjc->ij', (backproped, backproped))]
else:
delattr(module, attr)
return [einsum('bic,bjc->ij', (backproped, last_bp))]
| 35.553398
| 86
| 0.673266
| 874
| 7,324
| 5.262014
| 0.106407
| 0.057404
| 0.03392
| 0.028919
| 0.882148
| 0.815395
| 0.772777
| 0.71646
| 0.701022
| 0.676451
| 0
| 0.001607
| 0.235254
| 7,324
| 205
| 87
| 35.726829
| 0.819497
| 0
| 0
| 0.690789
| 0
| 0
| 0.04083
| 0.019391
| 0
| 0
| 0
| 0
| 0
| 1
| 0.190789
| false
| 0
| 0.026316
| 0.026316
| 0.414474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
24d8bdd26e24a5d0554a74a1dd0dbb30e847cb4d
| 188
|
py
|
Python
|
Week1_PyThonBasic/InputOutputExerccise/Exercise1.py
|
minhvip2001/pythonproject
|
0ad9e70203fae2cd038872a8d1a71c0bc9416cf3
|
[
"MIT"
] | null | null | null |
Week1_PyThonBasic/InputOutputExerccise/Exercise1.py
|
minhvip2001/pythonproject
|
0ad9e70203fae2cd038872a8d1a71c0bc9416cf3
|
[
"MIT"
] | null | null | null |
Week1_PyThonBasic/InputOutputExerccise/Exercise1.py
|
minhvip2001/pythonproject
|
0ad9e70203fae2cd038872a8d1a71c0bc9416cf3
|
[
"MIT"
] | null | null | null |
number1 = int(input("Enter number 1: "))
number2 = int(input("Enter number 2: "))
def multiplication(number1, number2):
return number1 * number2
print(multiplication(number1, number2))
| 37.6
| 41
| 0.734043
| 23
| 188
| 6
| 0.521739
| 0.304348
| 0.188406
| 0.275362
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060976
| 0.12766
| 188
| 5
| 41
| 37.6
| 0.780488
| 0
| 0
| 0
| 0
| 0
| 0.169312
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0.2
| 0.4
| 0.2
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 6
|
24ea3463355371cb0f00a55e6bd59c982719a000
| 96
|
py
|
Python
|
venv/lib/python3.8/site-packages/toml/decoder.py
|
Retraces/UkraineBot
|
3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71
|
[
"MIT"
] | 2
|
2022-03-13T01:58:52.000Z
|
2022-03-31T06:07:54.000Z
|
venv/lib/python3.8/site-packages/toml/decoder.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | 19
|
2021-11-20T04:09:18.000Z
|
2022-03-23T15:05:55.000Z
|
venv/lib/python3.8/site-packages/toml/decoder.py
|
DesmoSearch/Desmobot
|
b70b45df3485351f471080deb5c785c4bc5c4beb
|
[
"MIT"
] | null | null | null |
/home/runner/.cache/pip/pool/85/21/93/2dffb65810d9fdd768087585cba37ae3b5f2312875a37acdda8e1d1058
| 96
| 96
| 0.895833
| 9
| 96
| 9.555556
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.427083
| 0
| 96
| 1
| 96
| 96
| 0.46875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7019f81dcc2a8932414b45e0800613e41099e1b2
| 51
|
py
|
Python
|
CybORG/CybORG/Shared/Actions/ShellActionsFolder/OpenConnectionFolder/CredentialAccessFolder/BruteForceAccessFolder/__init__.py
|
rafvasq/cage-challenge-1
|
95affdfa38afc1124f1a1a09c92fbc0ed5b96318
|
[
"MIT"
] | 18
|
2021-08-20T15:07:55.000Z
|
2022-03-11T12:05:15.000Z
|
CybORG/CybORG/Shared/Actions/ShellActionsFolder/OpenConnectionFolder/CredentialAccessFolder/BruteForceAccessFolder/__init__.py
|
rafvasq/cage-challenge-1
|
95affdfa38afc1124f1a1a09c92fbc0ed5b96318
|
[
"MIT"
] | 7
|
2021-11-09T06:46:58.000Z
|
2022-03-31T12:35:06.000Z
|
CybORG/CybORG/Shared/Actions/ShellActionsFolder/OpenConnectionFolder/CredentialAccessFolder/BruteForceAccessFolder/__init__.py
|
rafvasq/cage-challenge-1
|
95affdfa38afc1124f1a1a09c92fbc0ed5b96318
|
[
"MIT"
] | 13
|
2021-08-17T00:26:31.000Z
|
2022-03-29T20:06:45.000Z
|
from .SSHHydraBruteForce import SSHHydraBruteForce
| 25.5
| 50
| 0.901961
| 4
| 51
| 11.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 51
| 1
| 51
| 51
| 0.978723
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
702c3e09d098185abfb8f671a99d031e6b5a4436
| 7,053
|
py
|
Python
|
backend/rma/migrations/0001_initial.py
|
StichtingIAPC/swipe
|
d1ea35a40813d2d5e9cf9edde33148c0a825efc4
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
backend/rma/migrations/0001_initial.py
|
StichtingIAPC/swipe
|
d1ea35a40813d2d5e9cf9edde33148c0a825efc4
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
backend/rma/migrations/0001_initial.py
|
StichtingIAPC/swipe
|
d1ea35a40813d2d5e9cf9edde33148c0a825efc4
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-05-29 18:22
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import money.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('crm', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CustomerRMATask',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('handled', models.BooleanField(default=False)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='crm.Customer')),
],
),
migrations.CreateModel(
name='CustomerTaskDescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now=True)),
('date_modified', models.DateTimeField(auto_now_add=True)),
('text', models.TextField()),
('customer_rma_task', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='rma.CustomerRMATask')),
('user_created', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='rma_customertaskdescription_created_by', to=settings.AUTH_USER_MODEL)),
('user_modified', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='rma_customertaskdescription_modified_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='InternalRMA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now=True)),
('date_modified', models.DateTimeField(auto_now_add=True)),
('state', models.CharField(max_length=3)),
('description', models.TextField()),
('customer', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='crm.Customer')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='InternalRMAState',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now=True)),
('state', models.CharField(max_length=3)),
('internal_rma', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='rma.InternalRMA')),
('user_created', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='rma_internalrmastate_created_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='RMACause',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now=True)),
('date_modified', models.DateTimeField(auto_now_add=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TestRMAState',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now=True)),
('state', models.CharField(max_length=3)),
('user_created', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='rma_testrmastate_created_by', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='DirectRefundRMA',
fields=[
('rmacause_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='rma.RMACause')),
],
options={
'abstract': False,
},
bases=('rma.rmacause',),
),
migrations.CreateModel(
name='StockRMA',
fields=[
('rmacause_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='rma.RMACause')),
('value_currency', money.models.CurrencyField(max_length=3)),
('value', money.models.MoneyField(decimal_places=5, max_digits=28, no_currency_field=True)),
],
options={
'abstract': False,
},
bases=('rma.rmacause',),
),
migrations.CreateModel(
name='TestRMA',
fields=[
('rmacause_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='rma.RMACause')),
('state', models.CharField(max_length=3)),
],
options={
'abstract': False,
},
bases=('rma.rmacause',),
),
migrations.AddField(
model_name='rmacause',
name='user_created',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='rma_rmacause_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='rmacause',
name='user_modified',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='rma_rmacause_modified_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='internalrma',
name='rma_cause',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='rma.RMACause'),
),
migrations.AddField(
model_name='internalrma',
name='user_created',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='rma_internalrma_created_by', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='internalrma',
name='user_modified',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='rma_internalrma_modified_by', to=settings.AUTH_USER_MODEL),
),
]
| 46.098039
| 191
| 0.595066
| 692
| 7,053
| 5.849711
| 0.156069
| 0.035573
| 0.058794
| 0.092391
| 0.79916
| 0.79916
| 0.76754
| 0.710227
| 0.710227
| 0.680089
| 0
| 0.005678
| 0.275911
| 7,053
| 152
| 192
| 46.401316
| 0.786959
| 0.009641
| 0
| 0.659722
| 1
| 0
| 0.142223
| 0.036952
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034722
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
7088d947d53275271d7105866e2038ba15ebc949
| 98
|
py
|
Python
|
python_nested_circular_imports/solution_case/dir_nest/shared_code/shared_callee.py
|
pmdscully/python_snippets
|
cd07ace1c0b5838fdbaeff08e72647946a8a7c5d
|
[
"Apache-2.0"
] | null | null | null |
python_nested_circular_imports/solution_case/dir_nest/shared_code/shared_callee.py
|
pmdscully/python_snippets
|
cd07ace1c0b5838fdbaeff08e72647946a8a7c5d
|
[
"Apache-2.0"
] | null | null | null |
python_nested_circular_imports/solution_case/dir_nest/shared_code/shared_callee.py
|
pmdscully/python_snippets
|
cd07ace1c0b5838fdbaeff08e72647946a8a7c5d
|
[
"Apache-2.0"
] | null | null | null |
import sys
def my_callee():
print(__file__ + " -> " + sys._getframe().f_code.co_name + "()")
| 19.6
| 68
| 0.602041
| 13
| 98
| 3.923077
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183673
| 98
| 5
| 68
| 19.6
| 0.6375
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
70aa30f9b23f7c055b558c12d48d78d41c35538e
| 67
|
py
|
Python
|
pytest_examples/test_all_fail.py
|
ds2643/runnerpy
|
44b802c34c8a4c29083365186a7c80bb703998bc
|
[
"MIT"
] | 2
|
2017-12-09T13:31:08.000Z
|
2017-12-30T06:28:24.000Z
|
pytest_examples/test_all_fail.py
|
ds2643/runnerpy
|
44b802c34c8a4c29083365186a7c80bb703998bc
|
[
"MIT"
] | null | null | null |
pytest_examples/test_all_fail.py
|
ds2643/runnerpy
|
44b802c34c8a4c29083365186a7c80bb703998bc
|
[
"MIT"
] | null | null | null |
def test_foo():
assert False
def test_bar():
assert False
| 11.166667
| 16
| 0.656716
| 10
| 67
| 4.2
| 0.6
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.253731
| 67
| 5
| 17
| 13.4
| 0.84
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3b1bbb670ea60d2d6c04d0c6528225de43777ab5
| 144
|
py
|
Python
|
dist/Basilisk/simulation/exponentialAtmosphere/__init__.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | null | null | null |
dist/Basilisk/simulation/exponentialAtmosphere/__init__.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | 1
|
2019-03-13T20:52:22.000Z
|
2019-03-13T20:52:22.000Z
|
dist/Basilisk/simulation/exponentialAtmosphere/__init__.py
|
ian-cooke/basilisk_mag
|
a8b1e37c31c1287549d6fd4d71fcaa35b6fc3f14
|
[
"0BSD"
] | null | null | null |
# This __init__.py file for the exponentialAtmosphere package is automatically generated by the build system
from exponentialAtmosphere import *
| 72
| 108
| 0.854167
| 18
| 144
| 6.611111
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 144
| 2
| 109
| 72
| 0.944444
| 0.736111
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3b301ff447ec0b4f6ac272f4fabe15299f8ed82d
| 47
|
py
|
Python
|
stve/workspace/__init__.py
|
TE-ToshiakiTanaka/stve
|
30b1a0c9b8b20f7059999b0b25b16d6b43aa935c
|
[
"MIT"
] | null | null | null |
stve/workspace/__init__.py
|
TE-ToshiakiTanaka/stve
|
30b1a0c9b8b20f7059999b0b25b16d6b43aa935c
|
[
"MIT"
] | null | null | null |
stve/workspace/__init__.py
|
TE-ToshiakiTanaka/stve
|
30b1a0c9b8b20f7059999b0b25b16d6b43aa935c
|
[
"MIT"
] | null | null | null |
from stve.workspace.workspace import Workspace
| 23.5
| 46
| 0.87234
| 6
| 47
| 6.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085106
| 47
| 1
| 47
| 47
| 0.953488
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3b44bc796b6f270f39233bdc03ee5f6e6ccecb4c
| 301
|
py
|
Python
|
initdb.py
|
tradaviahe1982/labman-master
|
0648410e1b449e8c21574a5bbbc0bcd9c38f1634
|
[
"MIT"
] | 10
|
2016-08-18T07:12:37.000Z
|
2019-10-13T06:35:27.000Z
|
initdb.py
|
PSOdAz/labman
|
591f698a12e474df08ccba2503174655cc6a2265
|
[
"MIT"
] | null | null | null |
initdb.py
|
PSOdAz/labman
|
591f698a12e474df08ccba2503174655cc6a2265
|
[
"MIT"
] | 4
|
2018-09-06T15:49:59.000Z
|
2020-09-29T13:16:21.000Z
|
#!/usr/bin/env python3
from app import db
if __name__ == '__main__':
print('Initializing the database...')
admin_username, admin_password = db.init_db()
print('Initialization done')
print('Initial admin username: {}, password: {}'.format(
admin_username, admin_password))
| 25.083333
| 60
| 0.677741
| 35
| 301
| 5.457143
| 0.657143
| 0.204188
| 0.188482
| 0.272251
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004082
| 0.186047
| 301
| 11
| 61
| 27.363636
| 0.77551
| 0.069767
| 0
| 0
| 0
| 0
| 0.340502
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.428571
| 0.142857
| 0
| 0.142857
| 0.428571
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
|
0
| 6
|
3b52f5c94fb5128dc7d9a50696d9770af42e4434
| 22,707
|
py
|
Python
|
tests/ED_spinful_fermions_test.py
|
anton-buyskikh/QuSpin
|
4e46b495e399414d9361d659e186492a1ac5b511
|
[
"BSD-3-Clause"
] | 195
|
2016-10-24T18:05:31.000Z
|
2022-03-29T10:11:56.000Z
|
tests/ED_spinful_fermions_test.py
|
cileeky/QuSpin
|
769d3817870f6ff55c4283af46f94e11c36f4121
|
[
"BSD-3-Clause"
] | 303
|
2016-10-25T20:08:11.000Z
|
2022-03-31T16:52:09.000Z
|
tests/ED_spinful_fermions_test.py
|
cileeky/QuSpin
|
769d3817870f6ff55c4283af46f94e11c36f4121
|
[
"BSD-3-Clause"
] | 54
|
2017-01-03T18:47:52.000Z
|
2022-03-16T06:54:33.000Z
|
from __future__ import print_function, division
import sys,os
quspin_path = os.path.join(os.getcwd(),"../")
sys.path.insert(0,quspin_path)
from quspin.operators import hamiltonian
from quspin.basis import spinful_fermion_basis_1d # Hilbert spaces
import numpy as np # general math functions
from itertools import product
import scipy.sparse as sp
from numpy.linalg import norm
from numpy.random import random,seed
#seed(0)
no_checks = dict()
#no_checks = dict(check_pcon=False,check_symm=False,check_herm=False)
dtypes=[np.float32,np.float64,np.complex64,np.complex128]
def eps(dtype):
return 9E-3
def check_m(Lmax):
for dtype in dtypes:
for L in range(2,Lmax+1):
h1=[[2.0*random()-1.0,i] for i in range(L)]
h2=[[2.0*random()-1.0,i] for i in range(L)]
J1=[[2.0*random()-1.0,i,(i+1)%L] for i in range(L)]
J0=random()
J2p=[[2.0*J0-1.0,i,(i+1)%L] for i in range(L)]
J2m=[[-(2.0*J0-1.0),i,(i+1)%L] for i in range(L)]
J0=random()
J1p=[[2.0*J0-1.0,i,(i+1)%L] for i in range(L)]
J1m=[[-(2.0*J0-1.0),i,(i+1)%L] for i in range(L)]
static=[["z|n",J1],["+-|",J2p],["-+|",J2m],["|+-",J1p],["|-+",J1m],["z|",h1],["|n",h2]]
basis=spinful_fermion_basis_1d(L=L)
H=hamiltonian(static,[],dtype=dtype,basis=basis,**no_checks)
Ns=H.Ns
E=H.eigvalsh()
Em=[]
for Nf,Ndown in product(range(L+1),range(L+1)):
basis=spinful_fermion_basis_1d(L=L,Nf=(Nf,Ndown))
H=hamiltonian(static,[],dtype=dtype,basis=basis,**no_checks)
Etemp=H.eigvalsh()
Em.append(Etemp)
Em=np.concatenate(Em)
Em.sort()
if norm(Em-E) > eps(dtype):
raise Exception( "test failed m symmetry at L={0:3d} with dtype {1} {2}".format(L,dtype,norm(Em-E) ) )
#check_m(5)
def check_z(L,dtype,Nf=None):
J1=[[2.0*random()-1.0,i,i] for i in range(L)]
J0=random()
J2p=[[2.0*J0-1.0,i,i+1] for i in range(L-1)]
J2m=[[-(2.0*J0-1.0),i,i+1] for i in range(L-1)]
J1p=[[2.0*J0-1.0,i,i+1] for i in range(L-1)]
J1m=[[-(2.0*J0-1.0),i,i+1] for i in range(L-1)]
static=[["z|z",J1],["+-|",J2p],["-+|",J2m],["|+-",J1p],["|-+",J1m]]
basis=spinful_fermion_basis_1d(L=L,Nf=Nf)
H=hamiltonian(static,[],dtype=dtype,basis=basis,**no_checks)
Ns=H.Ns
E=H.eigvalsh()
basis1=spinful_fermion_basis_1d(L=L,Nf=Nf,sblock=1)
H1=hamiltonian(static,[],dtype=dtype,basis=basis1,**no_checks)
basis2=spinful_fermion_basis_1d(L=L,Nf=Nf,sblock=-1)
H2=hamiltonian(static,[],dtype=dtype,basis=basis2,**no_checks)
E1=H1.eigvalsh()
E2=H2.eigvalsh()
Ez=np.concatenate((E1,E2))
Ez.sort()
if norm(Ez-E) > eps(dtype):
raise Exception( "test failed z symmetry at L={0:3d} with dtype {1} and Nf={2} {3}".format(L,np.dtype(dtype),Nf, norm(Ez-E)))
#check_z(4,np.float64,Nf=(2,2))
#check_z(4,np.complex128)
def check_p(L,dtype,Nf=None):
L_2=int(L/2)
hr=[2.0*random()-1.0 for i in range(L_2)]
hi=[hr[i] for i in range(L_2)]
hi.reverse()
hi.extend(hr)
h=[[hi[i],i] for i in range(L)]
J=[[1.0,i,i] for i in range(L)]
J0=random()
J2p=[[2.0*J0-1.0,i,i+1] for i in range(L-1)]
J2m=[[-(2.0*J0-1.0),i,i+1] for i in range(L-1)]
J0=random()
J1p=[[2.0*J0-1.0,i,i+1] for i in range(L-1)]
J1m=[[-(2.0*J0-1.0),i,i+1] for i in range(L-1)]
if type(Nf) is tuple:
if type(Nf[0]) is int and type(Nf[1]) is int:
static=[["z|z",J],["+-|",J1p],["-+|",J1m],["|+-",J2p],["|-+",J2m],["z|",h]]
#static=[["z|z",J],["+-|",J2p],["-+|",J2m],["|+-",J1p],["|-+",J1m]]
else:
static=[["z|z",J],["|+",h],["|-",h],["-|",h],["+|",h]]
basis=spinful_fermion_basis_1d(L=L,Nf=Nf)
H=hamiltonian(static,[],dtype=dtype,basis=basis,**no_checks)
Ns=H.Ns
E=H.eigvalsh()
basis1=spinful_fermion_basis_1d(L=L,Nf=Nf,pblock=1)
H1=hamiltonian(static,[],dtype=dtype,basis=basis1,**no_checks)
basis2=spinful_fermion_basis_1d(L=L,Nf=Nf,pblock=-1)
H2=hamiltonian(static,[],dtype=dtype,basis=basis2,**no_checks)
E1=H1.eigvalsh()
E2=H2.eigvalsh()
Ep=np.concatenate((E1,E2))
Ep.sort()
if norm(Ep-E) > eps(dtype):
raise Exception( "test failed p symmetry at L={0:3d} with dtype {1} and Nf={2} {3}".format(L,np.dtype(dtype),Nf,norm(Ep-E)) )
#check_p(4,np.float64,Nf=(1,3))
#check_p(4,np.float64)
def check_pz(L,dtype,Nf=None):
L_2=int(L/2)
hr=[2.0*random()-1.0 for i in range(L_2)]
hi=[hr[i] for i in range(L_2)]
hi.reverse()
hi.extend(hr)
h=[[hi[i],i] for i in range(L)]
J=[[1.0,i,i] for i in range(L)]
J0=random()
Jp=[[2.0*J0-1.0,i,i+1] for i in range(L-1)]
Jm=[[-(2.0*J0-1.0),i,i+1] for i in range(L-1)]
static=[["z|z",J],["+-|",Jp],["-+|",Jm],["|+-",Jp],["|-+",Jm],["z|",h],["|z",h]]
basis=spinful_fermion_basis_1d(L=L,Nf=Nf)
H=hamiltonian(static,[],dtype=dtype,basis=basis,**no_checks)
Ns=H.Ns
E=H.eigvalsh()
basis1=spinful_fermion_basis_1d(L=L,Nf=Nf,psblock=1)
H1=hamiltonian(static,[],dtype=dtype,basis=basis1,**no_checks)
basis2=spinful_fermion_basis_1d(L=L,Nf=Nf,psblock=-1)
H2=hamiltonian(static,[],dtype=dtype,basis=basis2,**no_checks)
E1=H1.eigvalsh()
E2=H2.eigvalsh()
Epz=np.concatenate((E1,E2))
Epz.sort()
if norm(Epz-E) > eps(dtype):
raise Exception( "test failed pz symmetry at L={0:3d} with dtype {1} and Nf={2:2d} {3}".format(L,np.dtype(dtype),Nf,norm(Epz-E)) )
#check_pz(4,np.float64,Nf=(2,2))
def check_p_z(L,dtype,Nf=None):
L_2=int(L/2)
hr=[2.0*random()-1.0 for i in range(L_2)]
hi=[hr[i] for i in range(L_2)]
hi.reverse()
hi.extend(hr)
h=[[hi[i],i] for i in range(L)]
J=[[1.0,i,i] for i in range(L)]
J0=random()
Jp=[[2.0*J0-1.0,i,i+1] for i in range(L-1)]
Jm=[[-(2.0*J0-1.0),i,i+1] for i in range(L-1)]
if type(Nf) is tuple:
if type(Nf[0]) is int and type(Nf[1]) is int:
static=[["z|z",J],["+-|",Jp],["-+|",Jm],["|+-",Jp],["|-+",Jm],["z|",h],["|z",h]]
else:
static=[["z|z",J],["+|",h],["-|",h],["|+",h],["|-",h]]
basis=spinful_fermion_basis_1d(L=L,Nf=Nf)
H=hamiltonian(static,[],dtype=dtype,basis=basis,**no_checks)
Ns=H.Ns
E=H.eigvalsh()
basis1=spinful_fermion_basis_1d(L=L,Nf=Nf,pblock=1,sblock=1)
H1=hamiltonian(static,[],dtype=dtype,basis=basis1,**no_checks)
basis2=spinful_fermion_basis_1d(L=L,Nf=Nf,pblock=-1,sblock=1)
H2=hamiltonian(static,[],dtype=dtype,basis=basis2,**no_checks)
basis3=spinful_fermion_basis_1d(L=L,Nf=Nf,pblock=1,sblock=-1)
H3=hamiltonian(static,[],dtype=dtype,basis=basis3,**no_checks)
basis4=spinful_fermion_basis_1d(L=L,Nf=Nf,pblock=-1,sblock=-1)
H4=hamiltonian(static,[],dtype=dtype,basis=basis4,**no_checks)
E1=H1.eigvalsh()
E2=H2.eigvalsh()
E3=H3.eigvalsh()
E4=H4.eigvalsh()
Epz=np.concatenate((E1,E2,E3,E4))
Epz.sort()
if norm(Epz-E) > eps(dtype):
raise Exception( "test failed pz symmetry at L={0:3d} with dtype {1} and Nf={2:2d} {3}".format(L,np.dtype(dtype),Nf,norm(Epz-E)) )
#check_p_z(4,np.float64,Nf=(2,2))
#check_p_z(4,np.complex128)
def check_obc(Lmax):
for dtype in dtypes:
for L in range(2,Lmax+1,2):
check_z(L,dtype,Nf=(L//2,L//2))
check_z(L,dtype)
for dtype in dtypes:
for L in range(2,Lmax+1,2):
for Nup in range(L+1):
check_t_p(L,dtype,Nf=(Nup,L-Nup))
check_p(L,dtype)
for dtype in dtypes:
for L in range(2,Lmax+1,2):
check_pz(L,dtype,Nf=(L//2,L//2))
check_pz(L,dtype)
for dtype in dtypes:
for L in range(2,Lmax+1,2):
check_p_z(L,dtype,Nf=(L//2,L//2))
check_p_z(L,dtype)
################################################
def check_t(L,dtype,Nf=None):
hx=random()
h=[[hx,i] for i in range(L)]
J=random()
J=[[J,i,(i+1)%L] for i in range(L)]
J0=random()
J2p=[[2.0*J0-1.0,i,(i+1)%L] for i in range(L)]
J2m=[[-(2.0*J0-1.0),i,(i+1)%L] for i in range(L)]
J0=random()
J1p=[[2.0*J0-1.0,i,(i+1)%L] for i in range(L)]
J1m=[[-(2.0*J0-1.0),i,(i+1)%L] for i in range(L)]
if type(Nf) is tuple:
if type(Nf[0]) is int and type(Nf[1]) is int:
static=[["z|z",J],["+-|",J1p],["-+|",J1m],["|+-",J2p],["|-+",J2m],["z|",h]]
else:
static=[["z|z",J],["+|",h],["-|",h],["|+",h],["|-",h]]
basis=spinful_fermion_basis_1d(L=L,Nf=Nf)
H=hamiltonian(static,[],dtype=dtype,basis=basis,**no_checks)
Ns=H.Ns
E,_=H.eigh()
#E=H.eigvalsh() # gives ValueError: On entry to CHBRDB parameter number 12 had an illegal value
Et=np.array([])
for kblock in range(0,L):
basisk=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock)
Hk=hamiltonian(static,[],dtype=dtype,basis=basisk,**no_checks)
Et=np.append(Et,Hk.eigvalsh())
Et.sort()
if norm(Et-E) > eps(dtype):
raise Exception( "test failed t symmetry at L={0:3d} with dtype {1} and Nf={2} {3}".format(L,np.dtype(dtype),Nf,norm(Et-E)) )
#check_t(4,np.complex128,Nf=(1,3))
#check_t(4,np.complex128)
def check_t_z(L,dtype,Nf=None):
h0=random()
h=[[h0,i] for i in range(L)]
J0=random()
J=[[2.0*J0-1.0,i,i] for i in range(L)]
J0=random()
Jp=[[ 2.0*J0-1.0 ,i,(i+1)%L] for i in range(L)]
Jm=[[-(2.0*J0-1.0),i,(i+1)%L] for i in range(L)]
if type(Nf) is tuple:
if type(Nf[0]) is int and type(Nf[1]) is int:
static=[["z|z",J],["+-|",Jp],["-+|",Jm],["|+-",Jp],["|-+",Jm]]
else:
static=[["z|z",J],["+|",h],["-|",h],["|+",h],["|-",h]]
L_2=int(L/2)
for kblock in range(-L_2+1,L_2+1):
basisk=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock)
Hk=hamiltonian(static,[],dtype=dtype,basis=basisk,**no_checks)
Ns=Hk.Ns
Ek=Hk.eigvalsh()
basisk1=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,sblock=+1)
Hk1=hamiltonian(static,[],dtype=dtype,basis=basisk1,**no_checks)
basisk2=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,sblock=-1)
Hk2=hamiltonian(static,[],dtype=dtype,basis=basisk2,**no_checks)
Ek1=Hk1.eigvalsh()
Ek2=Hk2.eigvalsh()
Ekz=np.append(Ek1,Ek2)
Ekz.sort()
if norm(Ek-Ekz) > eps(dtype):
raise Exception( "test failed t z symmetry at L={0:3d} with dtype {1} and Nf={2} {3}".format(L,np.dtype(dtype),Nf,norm(Ek-Ekz)) )
#check_t_z(4,np.complex128,Nf=(2,2))
#check_t_z(4,np.complex128)
def check_t_p(L,dtype,Nf=None):
hx=random()
h=[[hx,i] for i in range(L)]
J=random()
J=[[J,i,i] for i in range(L)]
J0=random()
J2p=[[2.0*J0-1.0,i,(i+1)%L] for i in range(L)]
J2m=[[-(2.0*J0-1.0),i,(i+1)%L] for i in range(L)]
J0=random()
J1p=[[2.0*J0-1.0,i,(i+1)%L] for i in range(L)]
J1m=[[-(2.0*J0-1.0),i,(i+1)%L] for i in range(L)]
if type(Nf) is tuple:
if type(Nf[0]) is int and type(Nf[1]) is int:
static=[["z|z",J],["+-|",J1p],["-+|",J1m],["|+-",J2p],["|-+",J2m],["z|",h]]
else:
static=[["z|z",J],["+|",h],["-|",h],["|+",h],["|-",h]]
L_2=int(L/2)
if dtype is np.float32:
kdtype = np.complex64
elif dtype is np.float64:
kdtype = np.complex128
else:
kdtype = dtype
for kblock in range(-L_2+1,0):
basisk=spinful_fermion_basis_1d(L=L,kblock=kblock)
Hk=hamiltonian(static,[],dtype=kdtype,basis=basisk,**no_checks)
Ns=Hk.Ns
Ek=Hk.eigvalsh()
basisk1=spinful_fermion_basis_1d(L=L,kblock=kblock,pblock=+1)
Hk1=hamiltonian(static,[],dtype=dtype,basis=basisk1,**no_checks)
basisk2=spinful_fermion_basis_1d(L=L,kblock=kblock,pblock=-1)
Hk2=hamiltonian(static,[],dtype=dtype,basis=basisk2,**no_checks)
Ek1=Hk1.eigvalsh()
Ek2=Hk2.eigvalsh()
if norm(Ek-Ek1) > eps(dtype):
raise Exception( "test failed t p+ symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ek-Ek1)) )
if norm(Ek-Ek2) > eps(dtype):
raise Exception( "test failed t p- symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ek-Ek2)) )
basisk=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=0)
Hk=hamiltonian(static,[],dtype=kdtype,basis=basisk,**no_checks)
Ns=Hk.Ns
Ek=Hk.eigvalsh()
basisk1=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=0,pblock=+1)
Hk1=hamiltonian(static,[],dtype=dtype,basis=basisk1,**no_checks)
basisk2=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=0,pblock=-1)
Hk2=hamiltonian(static,[],dtype=dtype,basis=basisk2,**no_checks)
Ek1=Hk1.eigvalsh()
Ek2=Hk2.eigvalsh()
Ekp=np.append(Ek1,Ek2)
Ekp.sort()
if norm(Ek-Ekp) > eps(dtype):
raise Exception( "test failed t p symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,0,np.dtype(dtype),Nf,norm(Ek-Ekp)) )
if L%2 == 0:
for kblock in range(1,L_2):
basisk=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock)
Hk=hamiltonian(static,[],dtype=kdtype,basis=basisk,**no_checks)
Ns=Hk.Ns
Ek=Hk.eigvalsh()
basisk1=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,pblock=+1)
Hk1=hamiltonian(static,[],dtype=dtype,basis=basisk1,**no_checks)
basisk2=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,pblock=-1)
Hk2=hamiltonian(static,[],dtype=dtype,basis=basisk2,**no_checks)
Ek1=Hk1.eigvalsh()
Ek2=Hk2.eigvalsh()
if norm(Ek-Ek1) > eps(dtype):
raise Exception( "test failed t p+ symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ek-Ek1)) )
if norm(Ek-Ek2) > eps(dtype):
raise Exception( "test failed t p- symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ek-Ek1)) )
basisk=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=L_2)
Hk=hamiltonian(static,[],dtype=kdtype,basis=basisk,**no_checks)
Ns=Hk.Ns
Ek=Hk.eigvalsh()
basisk1=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=L_2,pblock=+1)
Hk1=hamiltonian(static,[],dtype=dtype,basis=basisk1,**no_checks)
basisk2=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=L_2,pblock=-1)
Hk2=hamiltonian(static,[],dtype=dtype,basis=basisk2,**no_checks)
Ek1=Hk1.eigvalsh()
Ek2=Hk2.eigvalsh()
Ekp=np.append(Ek1,Ek2)
Ekp.sort()
if norm(Ek-Ekp) > eps(dtype):
raise Exception( "test failed t p symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,int(L/2),np.dtype(dtype),Nf,norm(Ek-Ekp)) )
else:
for kblock in range(1,L_2+1):
basisk=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock)
Hk=hamiltonian(static,[],dtype=kdtype,basis=basisk,**no_checks)
Ns=Hk.Ns
Ek=Hk.eigvalsh()
basisk1=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,pblock=+1)
Hk1=hamiltonian(static,[],dtype=dtype,basis=basisk1,**no_checks)
basisk2=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,pblock=-1)
Hk2=hamiltonian(static,[],dtype=dtype,basis=basisk2,**no_checks)
Ek1=Hk1.eigvalsh()
Ek2=Hk2.eigvalsh()
if norm(Ek-Ek1) > eps(dtype):
raise Exception( "test failed t p+ symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ek-Ek1)) )
if norm(Ek-Ek2) > eps(dtype):
raise Exception( "test failed t p- symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ek-Ek2)) )
#check_t_p(4,np.complex128,Nf=(3,4))
#check_t_p(4,np.complex128)
def check_t_pz(L,dtype,Nf=None):
h0=random()
h=[[h0,i] for i in range(L)]
J=[[1.0,i,i] for i in range(L)]
J0=random()
Jp=[[ 2.0*J0-1.0 ,i,(i+1)%L] for i in range(L)]
Jm=[[-(2.0*J0-1.0),i,(i+1)%L] for i in range(L)]
static=[["z|z",J],["+-|",Jp],["-+|",Jm],["|+-",Jp],["|-+",Jm],["z|",h],["|z",h]]
if dtype is np.float32:
kdtype = np.complex64
elif dtype is np.float64:
kdtype = np.complex128
else:
kdtype = dtype
a=2
L_2=int(L/(a*2))
for kblock in range(-L_2+1,0):
basisk=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,a=a)
Hk=hamiltonian(static,[],dtype=kdtype,basis=basisk,**no_checks)
Ns=Hk.Ns
Ek=Hk.eigvalsh()
basisk1=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,a=a,psblock=+1)
Hk1=hamiltonian(static,[],dtype=dtype,basis=basisk1,**no_checks)
basisk2=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,a=a,psblock=-1)
Hk2=hamiltonian(static,[],dtype=dtype,basis=basisk2,**no_checks)
Ek1=Hk1.eigvalsh()
Ek2=Hk2.eigvalsh()
if norm(Ek-Ek1) > eps(dtype):
raise Exception( "test failed t pz+ symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ek-Ek1)) )
if norm(Ek-Ek2) > eps(dtype):
raise Exception( "test failed t pz- symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ek-Ek2)) )
basisk=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=0,a=a)
Hk=hamiltonian(static,[],dtype=kdtype,basis=basisk,**no_checks)
Ns=Hk.Ns
Ek=Hk.eigvalsh()
basisk1=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=0,a=a,psblock=+1)
Hk1=hamiltonian(static,[],dtype=dtype,basis=basisk1,**no_checks)
basisk2=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=0,a=a,psblock=-1)
Hk2=hamiltonian(static,[],dtype=dtype,basis=basisk2,**no_checks)
Ek1=Hk1.eigvalsh()
Ek2=Hk2.eigvalsh()
Ekp=np.append(Ek1,Ek2)
Ekp.sort()
if norm(Ek-Ekp) > eps(dtype):
raise Exception( "test failed t pz symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,0,np.dtype(dtype),Nf,norm(Ek-Ekp)) )
if((L/a)%2 == 0):
for kblock in range(1,L_2):
basisk=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,a=a)
Hk=hamiltonian(static,[],dtype=kdtype,basis=basisk,**no_checks)
Ns=Hk.Ns
Ek=Hk.eigvalsh()
basisk1=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,a=a,psblock=+1)
Hk1=hamiltonian(static,[],dtype=dtype,basis=basisk1,**no_checks)
basisk2=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,a=a,psblock=-1)
Hk2=hamiltonian(static,[],dtype=dtype,basis=basisk2,**no_checks)
Ek1=Hk1.eigvalsh()
Ek2=Hk2.eigvalsh()
if norm(Ek-Ek1) > eps(dtype):
raise Exception( "test failed t pz+ symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nup={3} {4}".format(L,kblock,np.dtype(dtype),Nup,norm(Ek-Ek1)) )
if norm(Ek-Ek2) > eps(dtype):
raise Exception( "test failed t pz- symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nup={3} {4}".format(L,kblock,np.dtype(dtype),Nup,norm(Ek-Ek2)) )
basisk=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=L_2,a=a)
Hk=hamiltonian(static,[],dtype=kdtype,basis=basisk,**no_checks)
Ns=Hk.Ns
Ek=Hk.eigvalsh()
basisk1=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=L_2,a=a,psblock=+1)
Hk1=hamiltonian(static,[],dtype=dtype,basis=basisk1,**no_checks)
basisk2=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=L_2,a=a,psblock=-1)
Hk2=hamiltonian(static,[],dtype=dtype,basis=basisk2,**no_checks)
Ek1=Hk1.eigvalsh()
Ek2=Hk2.eigvalsh()
Ekp=np.append(Ek1,Ek2)
Ekp.sort()
if norm(Ek-Ekp) > eps(dtype):
raise Exception( "test failed t pz symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nup={3} {4}".format(L,int(L/2),np.dtype(dtype),Nup,norm(Ek-Ekp)) )
else:
for kblock in range(1,L_2+1):
basisk=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,a=a)
Hk=hamiltonian(static,[],dtype=kdtype,basis=basisk,**no_checks)
Ns=Hk.Ns
Ek=Hk.eigvalsh()
basisk1=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,a=a,psblock=+1)
Hk1=hamiltonian(static,[],dtype=dtype,basis=basisk1,**no_checks)
basisk2=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,a=a,psblock=-1)
Hk2=hamiltonian(static,[],dtype=dtype,basis=basisk2,**no_checks)
Ek1=Hk1.eigvalsh()
Ek2=Hk2.eigvalsh()
if norm(Ek-Ek1) > eps(dtype):
raise Exception( "test failed t pz+ symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ek-Ek1)) )
if norm(Ek-Ek2) > eps(dtype):
raise Exception( "test failed t pz- symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ek-Ek2)) )
#check_t_pz(8,np.complex128,Nf=(4,4))
#check_t_pz(6,np.float32)
def check_t_p_z(L,dtype,Nf=None):
h0=random()
h=[[h0,i] for i in range(L)]
J=[[1.0,i,i] for i in range(L)]
J0=random()
Jp=[[ 2.0*J0-1.0 ,i,(i+1)%L] for i in range(L)]
Jm=[[-(2.0*J0-1.0),i,(i+1)%L] for i in range(L)]
if type(Nf) is tuple:
if type(Nf[0]) is int and type(Nf[1]) is int:
static=[["z|z",J],["+-|",Jp],["-+|",Jm],["|+-",Jp],["|-+",Jm],["z|",h],["|z",h]]
else:
static=[["z|z",J],["+|",h],["-|",h],["|+",h],["|-",h]]
L_2=int(L/2)
for kblock in range(-L_2+1,L_2+1):
# print(kblock)
basisk1=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,pblock=+1)
Hkp1=hamiltonian(static,[],dtype=dtype,basis=basisk1,**no_checks)
basisk2=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,pblock=-1)
Hkp2=hamiltonian(static,[],dtype=dtype,basis=basisk2,**no_checks)
Ns=Hkp1.Ns
Ekp1=Hkp1.eigvalsh()
Ekp2=Hkp2.eigvalsh()
basisk11=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,pblock=+1,sblock=+1)
Hkpz11=hamiltonian(static,[],dtype=dtype,basis=basisk11,**no_checks)
basisk12=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,pblock=+1,sblock=-1)
Hkpz12=hamiltonian(static,[],dtype=dtype,basis=basisk12,**no_checks)
Ekpz11=Hkpz11.eigvalsh()
Ekpz12=Hkpz12.eigvalsh()
Ekpz1=np.concatenate((Ekpz11,Ekpz12))
Ekpz1.sort()
basisk21=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,pblock=-1,sblock=+1)
Hkpz21=hamiltonian(static,[],dtype=dtype,basis=basisk21,**no_checks)
basisk22=spinful_fermion_basis_1d(L=L,Nf=Nf,kblock=kblock,pblock=-1,sblock=-1)
Hkpz22=hamiltonian(static,[],dtype=dtype,basis=basisk22,**no_checks)
Ekpz21=Hkpz21.eigvalsh()
Ekpz22=Hkpz22.eigvalsh()
Ekpz2=np.concatenate((Ekpz21,Ekpz22))
Ekpz2.sort()
# print(basisk1)
# print(basisk11)
# print(basisk12)
#exit()
if norm(Ekp1-Ekpz1) > eps(dtype):
raise Exception( "test failed t z p+ symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ekp1-Ekpz1)) )
if norm(Ekp2-Ekpz2) > eps(dtype):
raise Exception( "test failed t z p- symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ekp2-Ekpz2)) )
if(kblock not in [0,L_2]):
if norm(Ekp2-Ekpz1) > eps(dtype):
raise Exception( "test failed t z p+ symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ekp2-Ekpz1)) )
if norm(Ekp1-Ekpz2) > eps(dtype):
raise Exception( "test failed t z p- symmetry at L={0:3d} kblock={1:3d} with dtype {2} and Nf={3} {4}".format(L,kblock,np.dtype(dtype),Nf,norm(Ekp1-Ekpz2)) )
#check_t_p_z(8,np.complex128,Nf=(4,4))
#check_t_p_z(6,np.complex128)
def check_pbc(Lmax):
for dtype in (np.complex64,np.complex128):
for L in range(2,Lmax+1,1):
check_t(L,dtype)
for Nup in range(L+1):
check_t(L,dtype,Nf=(Nup,L-Nup))
for dtype in (np.complex64,np.complex128):
for L in range(2,Lmax+1,2):
check_t_z(L,dtype,Nf=(L//2,L//2))
check_t_z(L,dtype)
for dtype in dtypes:
for L in range(2,Lmax+1,1):
check_t_p(L,dtype)
for Nup in range(L+1):
check_t_p(L,dtype,Nf=(Nup,L-Nup))
for dtype in dtypes:
for L in range(2,Lmax+1,2):
check_t_pz(L,dtype,Nf=(L//2,L//2))
check_t_pz(L,dtype)
for dtype in dtypes:
for L in range(2,Lmax+1,2):
check_t_p_z(L,dtype,Nf=(L//2,L//2))
check_t_p_z(L,dtype)
check_m(4)
check_obc(4)
check_pbc(4)
#print('GET RID OF NO_CHECKS')
#print('RELEASE SEED')
| 30.438338
| 161
| 0.650504
| 4,356
| 22,707
| 3.308999
| 0.044766
| 0.03788
| 0.034966
| 0.084501
| 0.891009
| 0.869433
| 0.852435
| 0.839809
| 0.822395
| 0.811988
| 0
| 0.056539
| 0.119038
| 22,707
| 745
| 162
| 30.479195
| 0.664017
| 0.039459
| 0
| 0.674747
| 0
| 0.052525
| 0.107138
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026263
| false
| 0
| 0.018182
| 0.00202
| 0.046465
| 0.00202
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
3b62f3bfcba0c2da2ccb0950bca6eb4ec23b5372
| 35
|
py
|
Python
|
kzphoneme/__init__.py
|
Ardaq/kz_g2p
|
e725c6857a450c32b12689f4c7c7714fe3dedf87
|
[
"MIT"
] | 3
|
2020-11-23T06:37:57.000Z
|
2021-09-22T06:21:02.000Z
|
kzphoneme/__init__.py
|
Ardaq/kz_g2p
|
e725c6857a450c32b12689f4c7c7714fe3dedf87
|
[
"MIT"
] | null | null | null |
kzphoneme/__init__.py
|
Ardaq/kz_g2p
|
e725c6857a450c32b12689f4c7c7714fe3dedf87
|
[
"MIT"
] | 1
|
2021-09-22T06:22:18.000Z
|
2021-09-22T06:22:18.000Z
|
from kzphoneme.main import Phoneme
| 17.5
| 34
| 0.857143
| 5
| 35
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
3b63af1d15c77a2a243aa91e37a28a2868c401af
| 234
|
py
|
Python
|
infra/abstractS3.py
|
shu3-lab/ConvertingParquet
|
9ce021dc8fde6fed13e2c6c510627928215806c8
|
[
"Unlicense"
] | null | null | null |
infra/abstractS3.py
|
shu3-lab/ConvertingParquet
|
9ce021dc8fde6fed13e2c6c510627928215806c8
|
[
"Unlicense"
] | null | null | null |
infra/abstractS3.py
|
shu3-lab/ConvertingParquet
|
9ce021dc8fde6fed13e2c6c510627928215806c8
|
[
"Unlicense"
] | null | null | null |
from abc import ABCMeta,abstractmethod
class abstract_S3(metaclass=ABCMeta):
@abstractmethod
def from_S3(self,bucketname,filename):
pass
@abstractmethod
def to_S3(self,bucketname,filename):
pass
| 19.5
| 42
| 0.700855
| 26
| 234
| 6.192308
| 0.576923
| 0.26087
| 0.198758
| 0.298137
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016575
| 0.226496
| 234
| 11
| 43
| 21.272727
| 0.872928
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.125
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
3b94d4a0b983bdcc1402fc341e501c5e1c673adf
| 59
|
py
|
Python
|
src/gapp_login/wechat/__init__.py
|
decentfox/gapp-login
|
a2edf06b48a2d78c3928860a9a1f5ce26919ef8a
|
[
"BSD-3-Clause"
] | 1
|
2020-06-29T04:19:08.000Z
|
2020-06-29T04:19:08.000Z
|
src/gapp_login/wechat/__init__.py
|
decentfox/gapp-login
|
a2edf06b48a2d78c3928860a9a1f5ce26919ef8a
|
[
"BSD-3-Clause"
] | null | null | null |
src/gapp_login/wechat/__init__.py
|
decentfox/gapp-login
|
a2edf06b48a2d78c3928860a9a1f5ce26919ef8a
|
[
"BSD-3-Clause"
] | 2
|
2020-06-16T03:19:34.000Z
|
2020-07-15T08:50:09.000Z
|
from .clients import MiniProgramClient, OAuth2WeChatClient
| 29.5
| 58
| 0.881356
| 5
| 59
| 10.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018519
| 0.084746
| 59
| 1
| 59
| 59
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8e633049d057c411ef3258e8cdab40f4322d30cb
| 25
|
py
|
Python
|
notochord/test/ObjectStore/__init__.py
|
jroose/notochord
|
da9a6ff5d0fabbf0694d0bee1b81a240b66fa006
|
[
"MIT"
] | null | null | null |
notochord/test/ObjectStore/__init__.py
|
jroose/notochord
|
da9a6ff5d0fabbf0694d0bee1b81a240b66fa006
|
[
"MIT"
] | null | null | null |
notochord/test/ObjectStore/__init__.py
|
jroose/notochord
|
da9a6ff5d0fabbf0694d0bee1b81a240b66fa006
|
[
"MIT"
] | null | null | null |
from .FileStore import *
| 12.5
| 24
| 0.76
| 3
| 25
| 6.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8e6f28c6a2ac4730a10308bf89cb9ccb9f65ecef
| 126
|
py
|
Python
|
panelctmc/__init__.py
|
kmedian/panelctmc
|
6ac9aa458a8e52f73e1e17dd4f2016703b4edbd7
|
[
"MIT"
] | 1
|
2021-03-29T01:17:44.000Z
|
2021-03-29T01:17:44.000Z
|
panelctmc/__init__.py
|
kmedian/panelctmc
|
6ac9aa458a8e52f73e1e17dd4f2016703b4edbd7
|
[
"MIT"
] | 9
|
2018-10-05T12:39:48.000Z
|
2019-12-01T11:06:17.000Z
|
panelctmc/__init__.py
|
kmedian/panelctmc
|
6ac9aa458a8e52f73e1e17dd4f2016703b4edbd7
|
[
"MIT"
] | null | null | null |
from .panel_to_datalist import panel_to_datalist
from .panelctmc_class import PanelCtmc
from .panelctmc_func import panelctmc
| 31.5
| 48
| 0.880952
| 18
| 126
| 5.833333
| 0.444444
| 0.133333
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 126
| 3
| 49
| 42
| 0.921053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8e7323ed26f306d60397b94f783550c1b8fc360c
| 142
|
py
|
Python
|
general/util/__init__.py
|
duennbart/masterthesis_VAE
|
1a161bc5c234acc0a021d84cde8cd69e784174e1
|
[
"BSD-3-Clause"
] | 14
|
2020-06-28T15:38:48.000Z
|
2021-12-05T01:49:50.000Z
|
general/util/__init__.py
|
duennbart/masterthesis_VAE
|
1a161bc5c234acc0a021d84cde8cd69e784174e1
|
[
"BSD-3-Clause"
] | null | null | null |
general/util/__init__.py
|
duennbart/masterthesis_VAE
|
1a161bc5c234acc0a021d84cde8cd69e784174e1
|
[
"BSD-3-Clause"
] | 3
|
2020-06-28T15:38:49.000Z
|
2022-02-13T22:04:34.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from general.util.mssim import *
| 28.4
| 38
| 0.866197
| 19
| 142
| 5.736842
| 0.526316
| 0.275229
| 0.440367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112676
| 142
| 5
| 39
| 28.4
| 0.865079
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8e7f8052fb7b0b71bbeab81611aad3e213ccc117
| 33
|
py
|
Python
|
pyzfs/zfs/__init__.py
|
malramsay64/pyzfs
|
515b0f7a54b684d8af088dbd934414768715875e
|
[
"MIT"
] | 5
|
2020-04-06T12:27:57.000Z
|
2021-11-04T16:47:31.000Z
|
pyzfs/zfs/__init__.py
|
malramsay64/pyzfs
|
515b0f7a54b684d8af088dbd934414768715875e
|
[
"MIT"
] | 13
|
2020-03-13T00:06:51.000Z
|
2022-03-19T11:17:26.000Z
|
pyzfs/zfs/__init__.py
|
malramsay64/pyzfs
|
515b0f7a54b684d8af088dbd934414768715875e
|
[
"MIT"
] | 7
|
2020-03-13T00:38:12.000Z
|
2021-11-23T22:48:43.000Z
|
from .main import ZFSCalculation
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
8eb2d7e4ace2fb8fe72c6c1022d752135743b797
| 89
|
py
|
Python
|
osutk/blackhole/__init__.py
|
zardoru/osutk
|
70158073fee99c7d1a22ced13c83f937ed06a2dc
|
[
"Unlicense"
] | 1
|
2016-04-08T11:59:50.000Z
|
2016-04-08T11:59:50.000Z
|
osutk/blackhole/__init__.py
|
zardoru/osutk
|
70158073fee99c7d1a22ced13c83f937ed06a2dc
|
[
"Unlicense"
] | null | null | null |
osutk/blackhole/__init__.py
|
zardoru/osutk
|
70158073fee99c7d1a22ced13c83f937ed06a2dc
|
[
"Unlicense"
] | null | null | null |
from .compose import *
from .cycle import *
from .divisor import *
from .emitter import *
| 22.25
| 22
| 0.741573
| 12
| 89
| 5.5
| 0.5
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168539
| 89
| 4
| 23
| 22.25
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d92ec22396e632d976e444992d379781f777cf9a
| 960
|
py
|
Python
|
huaweicloud-sdk-codehub/huaweicloudsdkcodehub/v2/model/__init__.py
|
JeffreyDin/huaweicloud-sdk-python-v3
|
5a52828777594a3cc4acb3fd481c0e324a9fa3ab
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-codehub/huaweicloudsdkcodehub/v2/model/__init__.py
|
JeffreyDin/huaweicloud-sdk-python-v3
|
5a52828777594a3cc4acb3fd481c0e324a9fa3ab
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-codehub/huaweicloudsdkcodehub/v2/model/__init__.py
|
JeffreyDin/huaweicloud-sdk-python-v3
|
5a52828777594a3cc4acb3fd481c0e324a9fa3ab
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: JeffreyDin
@license:
@contact: dingjianfeng15@gmail.com
@software:
@file: __init__.py.py
@ide: PyCharm
@time: 2021/5/18 18:44
@desc:
"""
from __future__ import absolute_import
# import models into model package
from huaweicloudsdkcodehub.v2.model.create_repository_request import CreateRepositoryRequest
from huaweicloudsdkcodehub.v2.model.create_repository_request_body import CreateRepositoryRequestBody
from huaweicloudsdkcodehub.v2.model.create_repository_response import CreateRepositoryResponse
from huaweicloudsdkcodehub.v2.model.get_all_repository_by_projectid_request import GetAllRepositoryByProjectId2Request
from huaweicloudsdkcodehub.v2.model.get_all_repository_by_projectid_response import GetAllRepositoryByProjectId2Response
from huaweicloudsdkcodehub.v2.model.codehub_result import CodeHubResult
from huaweicloudsdkcodehub.v2.model.codehub_repositories import CodeHubRepositories
| 41.73913
| 120
| 0.865625
| 106
| 960
| 7.575472
| 0.518868
| 0.217933
| 0.235367
| 0.278954
| 0.440847
| 0.343711
| 0.283935
| 0.146949
| 0.146949
| 0
| 0
| 0.026786
| 0.066667
| 960
| 22
| 121
| 43.636364
| 0.86942
| 0.229167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d954c0a131825bf4df87028e46d3d7355f2f8e8f
| 123
|
py
|
Python
|
multiagent/algorithm/ucb.py
|
kiranprasad/multiagent-particle-envs
|
e28e3ff6606e80f11ee16bb2c42f21c442ad29a8
|
[
"MIT"
] | null | null | null |
multiagent/algorithm/ucb.py
|
kiranprasad/multiagent-particle-envs
|
e28e3ff6606e80f11ee16bb2c42f21c442ad29a8
|
[
"MIT"
] | null | null | null |
multiagent/algorithm/ucb.py
|
kiranprasad/multiagent-particle-envs
|
e28e3ff6606e80f11ee16bb2c42f21c442ad29a8
|
[
"MIT"
] | null | null | null |
from math import sqrt, log
def ucb(node):
return node.value / node.visits + sqrt(log(node.parent.visits)/node.visits)
| 24.6
| 79
| 0.723577
| 20
| 123
| 4.45
| 0.6
| 0.157303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146341
| 123
| 4
| 80
| 30.75
| 0.847619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
d95ae7208bcba4bb91daf66a784871e0c7f5c2ee
| 6,870
|
py
|
Python
|
Helper/point_cloud.py
|
Baumwollboebele/python_algorithms
|
62330a047a2b8965ec81f51e45f4db74024693d6
|
[
"MIT"
] | null | null | null |
Helper/point_cloud.py
|
Baumwollboebele/python_algorithms
|
62330a047a2b8965ec81f51e45f4db74024693d6
|
[
"MIT"
] | null | null | null |
Helper/point_cloud.py
|
Baumwollboebele/python_algorithms
|
62330a047a2b8965ec81f51e45f4db74024693d6
|
[
"MIT"
] | null | null | null |
from random import randint
from math import cos, sin, radians
class _Point2D:
def __init__(self, x=None, y=None):
"""
Initializes a 2D point object with x and y coordinates.
"""
if x is None:
x = randint(0, 50)
if y is None:
y = randint(0, 50)
self.x = x
self.y = y
def __str__(self):
return f"X: {self.x} Y: {self.y}"
def __repr__(self):
return f"X: {self.x} Y: {self.y}"
class _Point3D:
def __init__(self, x=None, y=None, z=None):
"""
Initializes a 3D point object wit x,y and z coordinates.
Args:
x (integer, optional): X coordinate. Defaults to randint(0, 50).
y (integer, optional): Y coordinate. Defaults to randint(0, 50).
z (integer, optional):Z. Defaults to randint(0, 50).
"""
if x is None:
x = randint(0, 50)
if y is None:
y = randint(0, 50)
if z is None:
z = randint(0, 50)
self.x = x
self.y = y
self.z = z
def __str__(self):
return f"X: {self.x} Y: {self.y} Z: {self.z}"
def __repr__(self):
return f"X: {self.x} Y: {self.y} Z: {self.z}"
class _PointCloud:
def __init__(self, size):
self.size = size
self.point_cloud = []
def get_x_values(self):
"""
Returns x values of all points.
Returns:
list: x-axis values
"""
values = []
for point in self.point_cloud:
values.append(point.x)
return values
def get_y_values(self):
"""
Returns y values of all points.
Returns:
list: y-axis values
"""
values = []
for point in self.point_cloud:
values.append(point.y)
return values
class PointCloud2D(_PointCloud):
def __init__(self, size):
"""
Initializes a random Point cloud within a 2D coordinate system.
Args:
size (integer): number of points
"""
super().__init__(size)
for _ in range(size):
self.point_cloud.append(_Point2D())
def rotate(self, rotation):
"""
Rotation of the point cloud around the z axis
with the angle [rotation].
Args:
rotation (integer | float): angle of rotation
"""
rotation = radians(rotation)
for point in self.point_cloud:
x = point.x
y = point.y
point.x = round((x*cos(rotation)) - (y*sin(rotation)), 2)
point.y = round((y*cos(rotation)) + (x*sin(rotation)), 2)
return
def translate(self, x, y):
"""
Translate the coordinates of the point cloud by x and y.
Args:
x (integer | float): translation by x
y (integer | float ): translation by y
"""
for point in self.point_cloud:
point.x += x
point.y += y
return
def random_rotation(self):
"""
Applies a random rotation to the point cloud.
"""
self.rotate(randint(0, 360))
return
def random_translation(self):
"""
Applies a random translation to the point cloud.
"""
self.translate(randint(0, 5), randint(0, 5))
return
def randomize(self):
"""
Applize random translation and rotation to the point cloud.
"""
self.random_translation()
self.random_rotation()
return
def get_centroid(self):
x = sum(self.get_x_values()) / len(self.get_x_values())
y = sum(self.get_y_values()) / len(self.get_y_values())
return _Point2D(x,y)
class PointCloud3D(_PointCloud):
def __init__(self, size):
"""
Initializes a random Point cloud within a 3D coordinate system.
Args:
size (integer): number of points
"""
super().__init__(size)
for _ in range(size):
self.point_cloud.append(_Point3D())
def get_z_values(self):
"""
Returns z values of all points.
Returns:
list: z-axis values
"""
values = []
for point in self.point_cloud:
values.append(point.z)
return values
def rotate_x_axis(self, rotation):
"""
Rotation of the Point Cloud around the X-Axis.
Args:
rotation (integer): angle of rotation
"""
rotation = radians(rotation)
for point in self.point_cloud:
point.y = round((point.y*cos(rotation)) - (point.z*sin(rotation)), 2)
point.z = round((point.y*sin(rotation)) + (point.z*cos(rotation)), 2)
return
def rotate_y_axis(self, rotation):
"""
Rotation of the Point Cloud around the Y-Axis
Args:
rotation (integer): angle of rotation
"""
rotation = radians(rotation)
for point in self.point_cloud:
point.x = round((point.x*cos(rotation)) + (point.z*sin(rotation)), 2)
point.z = round((-point.x*sin(rotation)) + (point.z*cos(rotation)), 2)
return
def rotate_z_axis(self, rotation):
rotation = radians(rotation)
for point in self.point_cloud:
point.x = round((point.x*cos(rotation)) - (point.y*sin(rotation)), 2)
point.x = round((point.x*sin(rotation)) + (point.y*cos(rotation)), 2)
return
def translate(self, x, y, z):
"""
Translate the coordinates of the point cloud by x and y.
Args:
x (integer | float): translation by x
y (integer | float): translation by y
z (integer | float): translation by z
"""
for point in self.point_cloud:
point.x += x
point.y += y
point.z += z
return
def random_rotation(self):
"""
Applies a random rotation to the point cloud.
"""
self.rotate_x_axis(randint(0, 360))
self.rotate_y_axis(randint(0, 360))
self.rotate_z_axis(randint(0, 360))
return
def random_translation(self):
"""
Applies a random translation to the point cloud.
"""
self.translate(randint(0, 5), randint(0, 5), randint(0, 5))
return
def randomize(self):
"""
Applize random translation and rotation to the point cloud.
"""
self.random_translation()
self.random_rotation()
return
def get_centroid(self):
x = sum(self.get_x_values()) / len(self.get_x_values())
y = sum(self.get_y_values()) / len(self.get_y_values())
z = sum(self.get_z_values()) / len(self.get_z_values())
return _Point3D(x, y, z)
| 25.350554
| 82
| 0.536099
| 866
| 6,870
| 4.120092
| 0.094688
| 0.070067
| 0.047085
| 0.035314
| 0.803531
| 0.779709
| 0.717489
| 0.705717
| 0.68722
| 0.665359
| 0
| 0.015699
| 0.350946
| 6,870
| 270
| 83
| 25.444444
| 0.784481
| 0.247162
| 0
| 0.619048
| 0
| 0.015873
| 0.0255
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.206349
| false
| 0
| 0.015873
| 0.031746
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d96095cfb6bde906def8dc842252efd88827a5be
| 152
|
py
|
Python
|
netmiko/keymile/__init__.py
|
josephwhite13/netmiko
|
c08c5ebb3484383f034e22b9576f88be07525f72
|
[
"MIT"
] | 2,833
|
2015-01-04T20:04:10.000Z
|
2022-03-31T13:03:17.000Z
|
netmiko/keymile/__init__.py
|
josephwhite13/netmiko
|
c08c5ebb3484383f034e22b9576f88be07525f72
|
[
"MIT"
] | 2,137
|
2015-01-28T17:33:41.000Z
|
2022-03-31T18:41:21.000Z
|
netmiko/keymile/__init__.py
|
georgesnow/netmiko
|
185f51ca5c24ea2977d6ca31db1ae263aa72cc12
|
[
"MIT"
] | 1,367
|
2015-01-04T20:04:10.000Z
|
2022-03-31T19:13:28.000Z
|
from netmiko.keymile.keymile_ssh import KeymileSSH
from netmiko.keymile.keymile_nos_ssh import KeymileNOSSSH
__all__ = ["KeymileSSH", "KeymileNOSSSH"]
| 30.4
| 57
| 0.835526
| 18
| 152
| 6.666667
| 0.5
| 0.183333
| 0.3
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085526
| 152
| 4
| 58
| 38
| 0.863309
| 0
| 0
| 0
| 0
| 0
| 0.151316
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d973a5aaa162bd85b869896c24c6158d13466dc0
| 158
|
py
|
Python
|
Packages/Dead/demo/Lib/__init__.py
|
xylar/cdat
|
8a5080cb18febfde365efc96147e25f51494a2bf
|
[
"BSD-3-Clause"
] | 62
|
2018-03-30T15:46:56.000Z
|
2021-12-08T23:30:24.000Z
|
Packages/Dead/demo/Lib/__init__.py
|
xylar/cdat
|
8a5080cb18febfde365efc96147e25f51494a2bf
|
[
"BSD-3-Clause"
] | 114
|
2018-03-21T01:12:43.000Z
|
2021-07-05T12:29:54.000Z
|
Packages/Dead/demo/Lib/__init__.py
|
CDAT/uvcdat
|
5133560c0c049b5c93ee321ba0af494253b44f91
|
[
"BSD-3-Clause"
] | 14
|
2018-06-06T02:42:47.000Z
|
2021-11-26T03:27:00.000Z
|
from About import *
from Args import *
from DemoSet import *
from DemoSplash import *
from geoparse import *
from MainDisplay import *
from MainMenu import *
| 19.75
| 25
| 0.778481
| 21
| 158
| 5.857143
| 0.428571
| 0.487805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177215
| 158
| 7
| 26
| 22.571429
| 0.946154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
79772d79e6e511a8dee9fd463d229b09ccb26bdd
| 46
|
py
|
Python
|
merit/__init__.py
|
tayrobin/merit
|
6274b5ca9f5f0962ca54c44d2adbd2a034188642
|
[
"MIT"
] | null | null | null |
merit/__init__.py
|
tayrobin/merit
|
6274b5ca9f5f0962ca54c44d2adbd2a034188642
|
[
"MIT"
] | null | null | null |
merit/__init__.py
|
tayrobin/merit
|
6274b5ca9f5f0962ca54c44d2adbd2a034188642
|
[
"MIT"
] | null | null | null |
from .merit import Merit
from .org import Org
| 15.333333
| 24
| 0.782609
| 8
| 46
| 4.5
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 46
| 2
| 25
| 23
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
79d2ca3c3501467105a969c028a5b55bec45812b
| 39
|
py
|
Python
|
python/ql/test/library-tests/PointsTo/regressions/wrong/module-imports/conflict-stdlib/code-invalid-package-name/unique_name_use.py
|
timoles/codeql
|
2d24387e9e300bf03be35694816b1e76ae88a50c
|
[
"MIT"
] | 4,036
|
2020-04-29T00:09:57.000Z
|
2022-03-31T14:16:38.000Z
|
python/ql/test/library-tests/PointsTo/regressions/wrong/module-imports/conflict-stdlib/code-invalid-package-name/unique_name_use.py
|
baby636/codeql
|
097b6e5e3364ecc7103586d6feb308861e15538e
|
[
"MIT"
] | 2,970
|
2020-04-28T17:24:18.000Z
|
2022-03-31T22:40:46.000Z
|
python/ql/test/library-tests/PointsTo/regressions/wrong/module-imports/conflict-stdlib/code-invalid-package-name/unique_name_use.py
|
ScriptBox99/github-codeql
|
2ecf0d3264db8fb4904b2056964da469372a235c
|
[
"MIT"
] | 794
|
2020-04-29T00:28:25.000Z
|
2022-03-30T08:21:46.000Z
|
from unique_name import foo
print(foo)
| 13
| 27
| 0.820513
| 7
| 39
| 4.428571
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 39
| 2
| 28
| 19.5
| 0.911765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
8db632eff1be39954e33b6fb7a47f71e55b16be2
| 101
|
py
|
Python
|
bard/__init__.py
|
Xevib/bard
|
922763027e013ff287aeb7ee67f24e9d980c1267
|
[
"BSD-3-Clause"
] | 1
|
2022-02-03T17:31:56.000Z
|
2022-02-03T17:31:56.000Z
|
bard/__init__.py
|
Xevib/bard
|
922763027e013ff287aeb7ee67f24e9d980c1267
|
[
"BSD-3-Clause"
] | 26
|
2018-02-11T19:56:18.000Z
|
2021-03-31T19:08:40.000Z
|
bard/__init__.py
|
Xevib/bard
|
922763027e013ff287aeb7ee67f24e9d980c1267
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import
from bard.bard import Bard
from bard.bard import ChangeHandler
| 33.666667
| 38
| 0.871287
| 15
| 101
| 5.533333
| 0.4
| 0.192771
| 0.289157
| 0.433735
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108911
| 101
| 3
| 39
| 33.666667
| 0.922222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
8dc9ba81d214257b682c4125705dedfae7ecb6c8
| 19,063
|
py
|
Python
|
tests/test_swf/responses/test_activity_tasks.py
|
gtourkas/moto
|
307104417b579d23d02f670ff55217a2d4a16bee
|
[
"Apache-2.0"
] | 5,460
|
2015-01-01T01:11:17.000Z
|
2022-03-31T23:45:38.000Z
|
tests/test_swf/responses/test_activity_tasks.py
|
gtourkas/moto
|
307104417b579d23d02f670ff55217a2d4a16bee
|
[
"Apache-2.0"
] | 4,475
|
2015-01-05T19:37:30.000Z
|
2022-03-31T13:55:12.000Z
|
tests/test_swf/responses/test_activity_tasks.py
|
gtourkas/moto
|
307104417b579d23d02f670ff55217a2d4a16bee
|
[
"Apache-2.0"
] | 1,831
|
2015-01-14T00:00:44.000Z
|
2022-03-31T20:30:04.000Z
|
from boto.swf.exceptions import SWFResponseError
from botocore.exceptions import ClientError
from freezegun import freeze_time
import sure # noqa # pylint: disable=unused-import
from unittest import SkipTest
import pytest
from moto import mock_swf, mock_swf_deprecated
from moto import settings
from moto.swf import swf_backend
from ..utils import setup_workflow, SCHEDULE_ACTIVITY_TASK_DECISION
from ..utils import setup_workflow_boto3
# PollForActivityTask endpoint
# Has boto3 equivalent
@mock_swf_deprecated
def test_poll_for_activity_task_when_one():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
resp = conn.poll_for_activity_task(
"test-domain", "activity-task-list", identity="surprise"
)
resp["activityId"].should.equal("my-activity-001")
resp["taskToken"].should_not.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234"
)
resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted")
resp["events"][-1]["activityTaskStartedEventAttributes"].should.equal(
{"identity": "surprise", "scheduledEventId": 5}
)
@mock_swf
def test_poll_for_activity_task_when_one_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
resp = client.poll_for_activity_task(
domain="test-domain",
taskList={"name": "activity-task-list"},
identity="surprise",
)
resp["activityId"].should.equal("my-activity-001")
resp["taskToken"].should_not.be.none
resp = client.get_workflow_execution_history(
domain="test-domain",
execution={"runId": client.run_id, "workflowId": "uid-abcd1234"},
)
resp["events"][-1]["eventType"].should.equal("ActivityTaskStarted")
resp["events"][-1]["activityTaskStartedEventAttributes"].should.equal(
{"identity": "surprise", "scheduledEventId": 5}
)
# Has boto3 equivalent
@mock_swf_deprecated
def test_poll_for_activity_task_when_none():
conn = setup_workflow()
resp = conn.poll_for_activity_task("test-domain", "activity-task-list")
resp.should.equal({"startedEventId": 0})
# Has boto3 equivalent
@mock_swf_deprecated
def test_poll_for_activity_task_on_non_existent_queue():
conn = setup_workflow()
resp = conn.poll_for_activity_task("test-domain", "non-existent-queue")
resp.should.equal({"startedEventId": 0})
@pytest.mark.parametrize("task_name", ["activity-task-list", "non-existent-queue"])
@mock_swf
def test_poll_for_activity_task_when_none_boto3(task_name):
client = setup_workflow_boto3()
resp = client.poll_for_decision_task(
domain="test-domain", taskList={"name": task_name}
)
resp.shouldnt.have.key("taskToken")
resp.should.have.key("startedEventId").equal(0)
resp.should.have.key("previousStartedEventId").equal(0)
# CountPendingActivityTasks endpoint
# Has boto3 equivalent
@mock_swf_deprecated
def test_count_pending_activity_tasks():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
resp = conn.count_pending_activity_tasks("test-domain", "activity-task-list")
resp.should.equal({"count": 1, "truncated": False})
# Has boto3 equivalent
@mock_swf_deprecated
def test_count_pending_decision_tasks_on_non_existent_task_list():
conn = setup_workflow()
resp = conn.count_pending_activity_tasks("test-domain", "non-existent")
resp.should.equal({"count": 0, "truncated": False})
@pytest.mark.parametrize(
"task_name,cnt", [("activity-task-list", 1), ("non-existent", 0)]
)
@mock_swf
def test_count_pending_activity_tasks_boto3(task_name, cnt):
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
resp = client.count_pending_activity_tasks(
domain="test-domain", taskList={"name": task_name}
)
resp.should.have.key("count").equal(cnt)
resp.should.have.key("truncated").equal(False)
# RespondActivityTaskCompleted endpoint
# Has boto3 equivalent
@mock_swf_deprecated
def test_respond_activity_task_completed():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")[
"taskToken"
]
resp = conn.respond_activity_task_completed(
activity_token, result="result of the task"
)
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234"
)
resp["events"][-2]["eventType"].should.equal("ActivityTaskCompleted")
resp["events"][-2]["activityTaskCompletedEventAttributes"].should.equal(
{"result": "result of the task", "scheduledEventId": 5, "startedEventId": 6}
)
@mock_swf
def test_respond_activity_task_completed_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
client.respond_activity_task_completed(
taskToken=activity_token, result="result of the task"
)
resp = client.get_workflow_execution_history(
domain="test-domain",
execution={"runId": client.run_id, "workflowId": "uid-abcd1234"},
)
resp["events"][-2]["eventType"].should.equal("ActivityTaskCompleted")
resp["events"][-2]["activityTaskCompletedEventAttributes"].should.equal(
{"result": "result of the task", "scheduledEventId": 5, "startedEventId": 6}
)
# Has boto3 equivalent
@mock_swf_deprecated
def test_respond_activity_task_completed_on_closed_workflow_execution():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")[
"taskToken"
]
# bad: we're closing workflow execution manually, but endpoints are not
# coded for now..
wfe = swf_backend.domains[0].workflow_executions[-1]
wfe.execution_status = "CLOSED"
# /bad
conn.respond_activity_task_completed.when.called_with(activity_token).should.throw(
SWFResponseError, "WorkflowExecution="
)
@mock_swf
def test_respond_activity_task_completed_on_closed_workflow_execution_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
client.terminate_workflow_execution(domain="test-domain", workflowId="uid-abcd1234")
with pytest.raises(ClientError) as ex:
client.respond_activity_task_completed(taskToken=activity_token)
ex.value.response["Error"]["Code"].should.equal("UnknownResourceFault")
ex.value.response["Error"]["Message"].should.equal(
"Unknown execution: WorkflowExecution=[workflowId=uid-abcd1234, runId={}]".format(
client.run_id
)
)
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
# Has boto3 equivalent
@mock_swf_deprecated
def test_respond_activity_task_completed_with_task_already_completed():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")[
"taskToken"
]
conn.respond_activity_task_completed(activity_token)
conn.respond_activity_task_completed.when.called_with(activity_token).should.throw(
SWFResponseError, "Unknown activity, scheduledEventId = 5"
)
@mock_swf
def test_respond_activity_task_completed_with_task_already_completed_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
client.respond_activity_task_completed(taskToken=activity_token)
with pytest.raises(ClientError) as ex:
client.respond_activity_task_completed(taskToken=activity_token)
ex.value.response["Error"]["Code"].should.equal("UnknownResourceFault")
ex.value.response["Error"]["Message"].should.equal(
"Unknown activity, scheduledEventId = 5"
)
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
# RespondActivityTaskFailed endpoint
# Has boto3 equivalent
@mock_swf_deprecated
def test_respond_activity_task_failed():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")[
"taskToken"
]
resp = conn.respond_activity_task_failed(
activity_token, reason="short reason", details="long details"
)
resp.should.be.none
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234"
)
resp["events"][-2]["eventType"].should.equal("ActivityTaskFailed")
resp["events"][-2]["activityTaskFailedEventAttributes"].should.equal(
{
"reason": "short reason",
"details": "long details",
"scheduledEventId": 5,
"startedEventId": 6,
}
)
@mock_swf
def test_respond_activity_task_failed_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
client.respond_activity_task_failed(
taskToken=activity_token, reason="short reason", details="long details"
)
resp = client.get_workflow_execution_history(
domain="test-domain",
execution={"runId": client.run_id, "workflowId": "uid-abcd1234"},
)
resp["events"][-2]["eventType"].should.equal("ActivityTaskFailed")
resp["events"][-2]["activityTaskFailedEventAttributes"].should.equal(
{
"reason": "short reason",
"details": "long details",
"scheduledEventId": 5,
"startedEventId": 6,
}
)
# Has boto3 equivalent
@mock_swf_deprecated
def test_respond_activity_task_completed_with_wrong_token():
# NB: we just test ONE failure case for RespondActivityTaskFailed
# because the safeguards are shared with RespondActivityTaskCompleted, so
# no need to retest everything end-to-end.
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
conn.poll_for_activity_task("test-domain", "activity-task-list")
conn.respond_activity_task_failed.when.called_with(
"not-a-correct-token"
).should.throw(SWFResponseError, "Invalid token")
@mock_swf
def test_respond_activity_task_completed_with_wrong_token_boto3():
# NB: we just test ONE failure case for RespondActivityTaskFailed
# because the safeguards are shared with RespondActivityTaskCompleted, so
# no need to retest everything end-to-end.
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
with pytest.raises(ClientError) as ex:
client.respond_activity_task_failed(taskToken="not-a-correct-token")
ex.value.response["Error"]["Code"].should.equal("ValidationException")
ex.value.response["Error"]["Message"].should.equal("Invalid token")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
# RecordActivityTaskHeartbeat endpoint
# Has boto3 equivalent
@mock_swf_deprecated
def test_record_activity_task_heartbeat():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = conn.poll_for_activity_task("test-domain", "activity-task-list")[
"taskToken"
]
resp = conn.record_activity_task_heartbeat(activity_token)
resp.should.equal({"cancelRequested": False})
@mock_swf
def test_record_activity_task_heartbeat_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
activity_token = client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
resp = client.record_activity_task_heartbeat(taskToken=activity_token)
resp.should.have.key("cancelRequested").equal(False)
# Has boto3 equivalent
@mock_swf_deprecated
def test_record_activity_task_heartbeat_with_wrong_token():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
conn.poll_for_activity_task("test-domain", "activity-task-list")["taskToken"]
conn.record_activity_task_heartbeat.when.called_with(
"bad-token", details="some progress details"
).should.throw(SWFResponseError)
@mock_swf
def test_record_activity_task_heartbeat_with_wrong_token_boto3():
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
with pytest.raises(ClientError) as ex:
client.record_activity_task_heartbeat(taskToken="bad-token")
ex.value.response["Error"]["Code"].should.equal("ValidationException")
ex.value.response["Error"]["Message"].should.equal("Invalid token")
ex.value.response["ResponseMetadata"]["HTTPStatusCode"].should.equal(400)
# Has boto3 equivalent
@mock_swf_deprecated
def test_record_activity_task_heartbeat_sets_details_in_case_of_timeout():
conn = setup_workflow()
decision_token = conn.poll_for_decision_task("test-domain", "queue")["taskToken"]
conn.respond_decision_task_completed(
decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
with freeze_time("2015-01-01 12:00:00"):
activity_token = conn.poll_for_activity_task(
"test-domain", "activity-task-list"
)["taskToken"]
conn.record_activity_task_heartbeat(
activity_token, details="some progress details"
)
with freeze_time("2015-01-01 12:05:30"):
# => Activity Task Heartbeat timeout reached!!
resp = conn.get_workflow_execution_history(
"test-domain", conn.run_id, "uid-abcd1234"
)
resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut")
attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"]
attrs["details"].should.equal("some progress details")
@mock_swf
def test_record_activity_task_heartbeat_sets_details_in_case_of_timeout_boto3():
if settings.TEST_SERVER_MODE:
raise SkipTest("Unable to manipulate time in ServerMode")
client = setup_workflow_boto3()
decision_token = client.poll_for_decision_task(
domain="test-domain", taskList={"name": "queue"}
)["taskToken"]
client.respond_decision_task_completed(
taskToken=decision_token, decisions=[SCHEDULE_ACTIVITY_TASK_DECISION]
)
with freeze_time("2015-01-01 12:00:00"):
activity_token = client.poll_for_activity_task(
domain="test-domain", taskList={"name": "activity-task-list"}
)["taskToken"]
client.record_activity_task_heartbeat(
taskToken=activity_token, details="some progress details"
)
with freeze_time("2015-01-01 12:05:30"):
# => Activity Task Heartbeat timeout reached!!
resp = client.get_workflow_execution_history(
domain="test-domain",
execution={"runId": client.run_id, "workflowId": "uid-abcd1234"},
)
resp["events"][-2]["eventType"].should.equal("ActivityTaskTimedOut")
attrs = resp["events"][-2]["activityTaskTimedOutEventAttributes"]
attrs["details"].should.equal("some progress details")
| 37.305284
| 90
| 0.720506
| 2,191
| 19,063
| 5.960749
| 0.089
| 0.095559
| 0.031853
| 0.036371
| 0.886217
| 0.867381
| 0.856662
| 0.84977
| 0.814778
| 0.788821
| 0
| 0.011719
| 0.158475
| 19,063
| 510
| 91
| 37.378431
| 0.802394
| 0.053349
| 0
| 0.617866
| 0
| 0
| 0.207061
| 0.021261
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059553
| false
| 0
| 0.027295
| 0
| 0.086849
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5c33980f49792d2a64714ed7a93da999da37c2ad
| 3,866
|
py
|
Python
|
jstc/engines/test_handlebars.py
|
canaryhealth/jstc
|
d4be1f213e041b80708e8a7e40edfe2ae308b637
|
[
"MIT"
] | null | null | null |
jstc/engines/test_handlebars.py
|
canaryhealth/jstc
|
d4be1f213e041b80708e8a7e40edfe2ae308b637
|
[
"MIT"
] | null | null | null |
jstc/engines/test_handlebars.py
|
canaryhealth/jstc
|
d4be1f213e041b80708e8a7e40edfe2ae308b637
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: Philip J Grabner <phil@canary.md>
# date: 2016/09/15
# copy: (C) Copyright 2016-EOT Canary Health, Inc., All Rights Reserved.
#------------------------------------------------------------------------------
import unittest
from .. import api
#------------------------------------------------------------------------------
class TestHandlebarsEngine(unittest.TestCase):
#----------------------------------------------------------------------------
def test_partial(self):
try:
from .handlebars import HandlebarsEngine
# todo: make the compiler version details regex'ed...
self.assertEqual(
HandlebarsEngine().precompile('hello, world!', {'name': 'foo', 'partial': True}),
('application/javascript-fragment',
'''\
{"compiler":[7,">= 4.0.0"],"main":function(container,depth0,helpers,partials,data) {
return "hello, world!";
},"useData":true}'''))
except api.PrecompilerUnavailable:
raise unittest.SkipTest(
'handlebars executable not available (use "npm install handlebars")')
#----------------------------------------------------------------------------
def test_assemble(self):
try:
from .handlebars import HandlebarsEngine
comp = HandlebarsEngine()
parts = []
for name, text in [
('hello', 'hello, world!'),
('hello/name', 'hello, {{name}}!'),
]:
attrs = dict(name=name, partial=True)
parts.append(comp.precompile(text, attrs) + (attrs,))
# todo: make the compiler version details regex'ed...
self.assertEqual(
comp.assemble(parts),
('application/javascript',
'''\
(function(){var t=Handlebars.template,ts=Handlebars.templates=Handlebars.templates||{};ts["hello"]=t({"compiler":[7,">= 4.0.0"],"main":function(container,depth0,helpers,partials,data) {
return "hello, world!";
},"useData":true});ts["hello/name"]=t({"compiler":[7,">= 4.0.0"],"main":function(container,depth0,helpers,partials,data) {
var helper;
return "hello, "
+ container.escapeExpression(((helper = (helper = helpers.name || (depth0 != null ? depth0.name : depth0)) != null ? helper : helpers.helperMissing),(typeof helper === "function" ? helper.call(depth0 != null ? depth0 : {},{"name":"name","hash":{},"data":data}) : helper)))
+ "!";
},"useData":true});})();'''))
except api.PrecompilerUnavailable:
raise unittest.SkipTest(
'handlebars executable not available (use "npm install handlebars")')
#----------------------------------------------------------------------------
def test_standalone(self):
try:
from .handlebars import HandlebarsEngine
# todo: make the compiler version details regex'ed...
self.assertEqual(
HandlebarsEngine().precompile('this is {{name}}', {'name': 'foo'}),
('application/javascript',
'''\
(function(){var t=Handlebars.template,ts=Handlebars.templates=Handlebars.templates||{};ts["foo"]=t({"compiler":[7,">= 4.0.0"],"main":function(container,depth0,helpers,partials,data) {
var helper;
return "this is "
+ container.escapeExpression(((helper = (helper = helpers.name || (depth0 != null ? depth0.name : depth0)) != null ? helper : helpers.helperMissing),(typeof helper === "function" ? helper.call(depth0 != null ? depth0 : {},{"name":"name","hash":{},"data":data}) : helper)));
},"useData":true});})();'''))
except api.PrecompilerUnavailable:
raise unittest.SkipTest(
'handlebars executable not available (use "npm install handlebars")')
#------------------------------------------------------------------------------
# end of $Id$
# $ChangeLog$
#------------------------------------------------------------------------------
| 44.953488
| 277
| 0.530005
| 350
| 3,866
| 5.845714
| 0.302857
| 0.029326
| 0.01955
| 0.021505
| 0.762463
| 0.762463
| 0.741447
| 0.741447
| 0.741447
| 0.741447
| 0
| 0.01315
| 0.154165
| 3,866
| 85
| 278
| 45.482353
| 0.612538
| 0.247801
| 0
| 0.560976
| 0
| 0
| 0.240341
| 0.049116
| 0
| 0
| 0
| 0.035294
| 0.073171
| 1
| 0.073171
| false
| 0
| 0.121951
| 0
| 0.219512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
308ca2d2472880b0da634185070429e05d278732
| 95
|
py
|
Python
|
instagram_api/exceptions/sentry_block.py
|
Yuego/instagram_api
|
b53f72db36c505a2eb24ebac1ba8267a0cc295bb
|
[
"MIT"
] | 13
|
2019-08-07T21:24:34.000Z
|
2020-12-12T12:23:50.000Z
|
instagram_api/exceptions/sentry_block.py
|
Yuego/instagram_api
|
b53f72db36c505a2eb24ebac1ba8267a0cc295bb
|
[
"MIT"
] | null | null | null |
instagram_api/exceptions/sentry_block.py
|
Yuego/instagram_api
|
b53f72db36c505a2eb24ebac1ba8267a0cc295bb
|
[
"MIT"
] | null | null | null |
from .request import RequestException
class SentryBlockException(RequestException):
pass
| 15.833333
| 45
| 0.821053
| 8
| 95
| 9.75
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136842
| 95
| 5
| 46
| 19
| 0.95122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
30b4933f47a82ede24d0a0e7152602edab6bb49b
| 268
|
py
|
Python
|
hacker/challenges/crypto/make_money_fast.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | 1
|
2020-04-21T11:39:25.000Z
|
2020-04-21T11:39:25.000Z
|
hacker/challenges/crypto/make_money_fast.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | 7
|
2020-02-12T01:08:01.000Z
|
2022-02-10T11:56:56.000Z
|
hacker/challenges/crypto/make_money_fast.py
|
Tenebrar/codebase
|
59c9a35289fb29afedad0e3edd0519b67372ef9f
|
[
"Unlicense"
] | null | null | null |
# Decode with spammimic.com (it's encoded in a lot more than just punctuation, but there's no exact explanation)
# Every changed bit changes a select part of the generated spam, but there's no real value to learning to decode it
print("The answer is 'bezahler4ever'")
| 67
| 115
| 0.776119
| 47
| 268
| 4.425532
| 0.787234
| 0.076923
| 0.086538
| 0.105769
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004464
| 0.164179
| 268
| 3
| 116
| 89.333333
| 0.924107
| 0.835821
| 0
| 0
| 1
| 0
| 0.707317
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
30d9589eb3330a7a3eb462950686fdeaa041130c
| 190
|
py
|
Python
|
exercises/chapter02/test_02_10.py
|
deep-diver/fastai-course
|
1a0a39311fba0e1b3f4720a612a17dc7c708d9bb
|
[
"MIT"
] | null | null | null |
exercises/chapter02/test_02_10.py
|
deep-diver/fastai-course
|
1a0a39311fba0e1b3f4720a612a17dc7c708d9bb
|
[
"MIT"
] | null | null | null |
exercises/chapter02/test_02_10.py
|
deep-diver/fastai-course
|
1a0a39311fba0e1b3f4720a612a17dc7c708d9bb
|
[
"MIT"
] | null | null | null |
def test():
assert "learner.summary()" in __solution__, "summary() 메서드를 사용하셨나요?"
assert "learner.unfreeze()" in __solution__, "unfreeze() 메서드를 사용하셨나요?"
__msg__.good("잘 하셨습니다!")
| 31.666667
| 74
| 0.673684
| 22
| 190
| 5.272727
| 0.636364
| 0.224138
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163158
| 190
| 5
| 75
| 38
| 0.72956
| 0
| 0
| 0
| 0
| 0
| 0.463158
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| true
| 0
| 0
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
30dfda242f67a38a0622bfdb5305cd9bd49868fa
| 44
|
py
|
Python
|
ticc/utils.py
|
gioxc88/ticc
|
7704d7c6ca8c53d9eba994ba3154ffc0b0e1faef
|
[
"MIT"
] | null | null | null |
ticc/utils.py
|
gioxc88/ticc
|
7704d7c6ca8c53d9eba994ba3154ffc0b0e1faef
|
[
"MIT"
] | 1
|
2021-03-16T07:30:20.000Z
|
2021-03-18T06:11:31.000Z
|
ticc/utils.py
|
gioxc88/ticc
|
7704d7c6ca8c53d9eba994ba3154ffc0b0e1faef
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy import stats
| 11
| 23
| 0.795455
| 8
| 44
| 4.375
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.204545
| 44
| 3
| 24
| 14.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ebd5a255f1914ffe249931d7c2bbad03d1fd8026
| 148
|
py
|
Python
|
utils/__init__.py
|
Riroaki/LemonML-
|
3bb344c135e4dd7dab63a4fd2184ac0aaacc367d
|
[
"MIT"
] | 34
|
2019-06-23T03:45:41.000Z
|
2021-11-30T12:28:41.000Z
|
utils/__init__.py
|
Riroaki/LemonML-
|
3bb344c135e4dd7dab63a4fd2184ac0aaacc367d
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
Riroaki/LemonML-
|
3bb344c135e4dd7dab63a4fd2184ac0aaacc367d
|
[
"MIT"
] | 5
|
2019-06-14T09:41:24.000Z
|
2019-10-23T11:21:22.000Z
|
import utils._cross_validate as cross_validate
import utils._scaling as scaling
from utils._batch import batch
import utils._make_data as make_data
| 29.6
| 46
| 0.864865
| 24
| 148
| 5
| 0.416667
| 0.275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108108
| 148
| 4
| 47
| 37
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
692071d62b9b2b1be70e7be550a9922cbc54df90
| 1,810
|
py
|
Python
|
imagersite/imager_images/forms.py
|
brandonholderman/django-imager
|
7538c843ca575415b3ac303e832147803e68419f
|
[
"MIT"
] | null | null | null |
imagersite/imager_images/forms.py
|
brandonholderman/django-imager
|
7538c843ca575415b3ac303e832147803e68419f
|
[
"MIT"
] | 5
|
2018-04-27T06:53:27.000Z
|
2018-05-11T03:18:12.000Z
|
imagersite/imager_images/forms.py
|
brandonholderman/django-imager
|
7538c843ca575415b3ac303e832147803e68419f
|
[
"MIT"
] | null | null | null |
from django.forms import ModelForm
from .models import Photo, Album
class PhotoForm(ModelForm):
"""Form for adding photo."""
class Meta:
model = Photo
fields = ['image', 'album', 'title', 'description', 'published']
# import pdb; pdb.set_trace()
def __init__(self, *args, **kwargs):
username = kwargs.pop('username')
super().__init__(*args, **kwargs)
self.fields['album'].queryset = Album.objects.filter(
user__username=username)
class AlbumForm(ModelForm):
"""Form for adding albums."""
class Meta:
model = Album
fields = ['cover_image', 'name', 'description', 'published']
# import pdb; pdb.set_trace()
def __init__(self, *args, **kwargs):
username = kwargs.pop('username')
super().__init__(*args, **kwargs)
self.fields['cover_image'].queryset = Photo.objects.filter(
album__user__username=username)
class PhotoEditForm(ModelForm):
"""Form for editing photo info."""
class Meta:
model = Photo
fields = ['album', 'title', 'description', 'published']
def __init__(self, *args, **kwargs):
username = kwargs.pop('username')
super().__init__(*args, **kwargs)
self.fields['album'].queryset = Album.objects.filter(
user__username=username)
class AlbumEditForm(ModelForm):
"""Form for editing album info."""
class Meta:
model = Album
fields = ['cover_image', 'name', 'description', 'published']
def __init__(self, *args, **kwargs):
username = kwargs.pop('username')
super().__init__(*args, **kwargs)
self.fields['cover_image'].queryset = Photo.objects.filter(
album__user__username=username)
| 31.754386
| 72
| 0.6
| 185
| 1,810
| 5.6
| 0.232432
| 0.07722
| 0.061776
| 0.057915
| 0.754826
| 0.706564
| 0.706564
| 0.706564
| 0.706564
| 0.706564
| 0
| 0
| 0.256354
| 1,810
| 56
| 73
| 32.321429
| 0.769688
| 0.08895
| 0
| 0.789474
| 0
| 0
| 0.122311
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
693da5445ec466f1b982891ea075e9d39dc32238
| 39
|
py
|
Python
|
tests/conftest.py
|
marctom/GPflow
|
649b039b685e2c158bdaac3ecd8c2ad3ee18bc8f
|
[
"Apache-2.0"
] | 1
|
2020-01-27T19:05:28.000Z
|
2020-01-27T19:05:28.000Z
|
tests/conftest.py
|
marctom/GPflow
|
649b039b685e2c158bdaac3ecd8c2ad3ee18bc8f
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
marctom/GPflow
|
649b039b685e2c158bdaac3ecd8c2ad3ee18bc8f
|
[
"Apache-2.0"
] | null | null | null |
from gpflow.test_util import session_tf
| 39
| 39
| 0.897436
| 7
| 39
| 4.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6970c4d5773551301ba50fe97d48ad4404687383
| 124
|
py
|
Python
|
torch_geometric/utils/num_nodes.py
|
DL-85/pytorch_geometric
|
eb12a94a667e881c4a6bff26b0453428bcb72393
|
[
"MIT"
] | 62
|
2018-12-12T16:06:34.000Z
|
2022-02-18T11:32:04.000Z
|
torch_geometric/utils/num_nodes.py
|
chentingpc/pytorch_geometric
|
44c4c5069dbc4c8a96761a3b5a7e7b45c8352a53
|
[
"MIT"
] | 2
|
2019-12-26T12:51:47.000Z
|
2020-09-02T10:39:55.000Z
|
torch_geometric/utils/num_nodes.py
|
chentingpc/pytorch_geometric
|
44c4c5069dbc4c8a96761a3b5a7e7b45c8352a53
|
[
"MIT"
] | 19
|
2018-12-12T17:51:43.000Z
|
2021-09-19T08:01:29.000Z
|
def maybe_num_nodes(edge_index, num_nodes=None):
return edge_index.max().item() + 1 if num_nodes is None else num_nodes
| 41.333333
| 74
| 0.766129
| 23
| 124
| 3.826087
| 0.608696
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009346
| 0.137097
| 124
| 2
| 75
| 62
| 0.813084
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.