hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cbdacd5d1088bf007bfe18e106fb35e07815b192
| 20,347
|
py
|
Python
|
pyswitch/snmp/base/acl/acl.py
|
mfeed/PySwitchLib
|
54e872bcbe77f2ae840d845dadb7c5b9c12482ed
|
[
"Apache-2.0"
] | 6
|
2017-10-02T21:02:02.000Z
|
2018-07-04T13:56:55.000Z
|
pyswitch/snmp/base/acl/acl.py
|
mfeed/PySwitchLib
|
54e872bcbe77f2ae840d845dadb7c5b9c12482ed
|
[
"Apache-2.0"
] | 23
|
2017-10-03T18:49:11.000Z
|
2019-07-20T00:25:44.000Z
|
pyswitch/snmp/base/acl/acl.py
|
mfeed/PySwitchLib
|
54e872bcbe77f2ae840d845dadb7c5b9c12482ed
|
[
"Apache-2.0"
] | 4
|
2018-02-27T05:43:37.000Z
|
2019-06-30T13:30:25.000Z
|
"""
Copyright 2017 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import abc
class Acl(object):
"""
The Acl class holds all the functions assocaiated with the
Access Control list.
Attributes:
None
"""
__metaclass__ = abc.ABCMeta
def __init__(self, callback):
"""
ACL init function.
Args:
callback: Callback function that will be called for each action.
Returns:
ACL Object
Raises:
ValueError
"""
self._callback = callback
@abc.abstractmethod
def create_acl(self, **parameters):
"""
Create an Access Control List.
Args:
parameters contains:
address_type (str): ACL address type, ip or ipv6 or mac.
acl_type (str): ACL type, extended or standard.
acl_name (str): Unique name for ACL.
Returns:
Return value of `string` message.
Examples:
"""
return
@abc.abstractmethod
def delete_acl(self, **parameters):
"""
Delete Access Control List.
Args:
parameters contains:
acl_name (str): Name of the access list.
Returns:
Return value of `string` message.
Examples:
"""
return
@abc.abstractmethod
def parse_params_for_add_l2_acl_rule(self, **parameters):
"""
Parse params of rule to be added to l2 Access Control List.
Args:
parameters contains:
acl_name (str): Name of the access list.
seq_id: integer
action: string enum: - deny - permit
source: string: Source filter, can be 'any' or 'host', or the
actual MAC in HHHH.HHHH.HHHH format.
srchost: string: The source MAC in HHHH.HHHH.HHHH format.
The value is required only when the source is 'host'.
src_mac_addr_mask: string : Mask for the source MAC in
HHHH.HHHH.HHHH format.
dst: string: Destination filter, can be 'any' or 'host',
or the actual MAC of the destination in
HHHH.HHHH.HHHH format.
dsthost: string : Destination MAC in HHHH.HHHH.HHHH format.
The value is required only when the dst is 'host'
dst_mac_addr_mask: string: Mask for the destination MAC
in HHHH.HHHH.HHHH format.
vlan: VLAN IDs - 'any' or 1-4096
ethertype: EtherType, can be 'arp', 'fcoe', 'ipv4-15' 'ipv6' or
custom value between 1536 and 65535.
arp_guard: string : Enables arp-guard for the rule
drop_precedence_force: Matches the specified value against the
drop_precedence value of the packet to filter.
Allowed values are 0 through 2.
log: Enables the logging
mirror: Enables mirror for the rule
priority
priority_force
priority_mapping
Returns:
Return value of `string` message.
Examples:
"""
return
@abc.abstractmethod
def add_l2_acl_rule(self, **parameters):
"""
Delete Access Control List.
Args:
parameters contains:
acl_name (str): Name of the access list.
seq_id: integer
action: string enum: - deny - permit - hard-drop
source: string: Source filter, can be 'any' or 'host',
or the actual MAC in HHHH.HHHH.HHHH format.
src_mac_addr_mask: string : Mask for the source MAC in
HHHH.HHHH.HHHH format.
dst: string: Destination filter, can be 'any' or 'host',
or the actual MAC of the destination in
HHHH.HHHH.HHHH format.
dst_mac_addr_mask: Mask for the destination MAC in
HHHH.HHHH.HHHH format.
vlan: VLAN IDs - 'any' or 1-4096.
ethertype: EtherType can be 'arp', 'fcoe', 'ipv4-15', 'ipv6' or
custom value between 1536 and 65535.
arp_guard: string : Enables arp-guard for the rule
drop_precedence_force: string : Matches the specified value
against the drop_precedence value of the packet to filter.
Allowed values are 0 through 2.
log: Enables the logging
mirror: Enables mirror for the rule
Returns:
Return value of `string` message.
Examples:
"""
return
@abc.abstractmethod
def delete_l2_acl_rule(self, **parameters):
"""
Delete Rule from Access Control List.
Args:
parameters contains:
acl_name: Name of the access list.
seq_id: Sequence number of the rule. For add operation,
if not specified, the rule is added at the end of the list.
Returns:
Return value of `string` message.
Examples:
"""
return
@abc.abstractmethod
def is_valid_seq_id(self, seq_id, acl_name):
"""
is_valid_seq_id checks for existane of seq_id.
Args:
acl_name (str): Name of the access list.
seq_id, action, source, srchost,
Returns:
Return True
Raises:
Exception, ValueError for invalid seq_id.
Examples:
"""
return
@abc.abstractmethod
def apply_acl(self, **parameters):
"""
Apply Access Control List on interface.
Args:
parameters contains:
address_type (str): ACL address type, ip or ipv6 or mac.
acl_name: Name of the access list.
intf_type: - ethernet, ve
intf_name: array of slot/port or ve interfaces
acl_direction: Direction of ACL binding on the specified
interface
Returns:
Return True
Raises:
Exception, ValueError for invalid seq_id.
"""
return
@abc.abstractmethod
def remove_acl(self, **parameters):
"""
Apply Access Control List on interface.
Args:
parameters contains:
address_type (str): ACL address type, ip or ipv6 or mac.
acl_name: Name of the access list.
intf_type: - ethernet, ve
intf_name: array of slot/port or ve interfaces
acl_direction: Direction of ACL binding on the specified
interface
Returns:
Return True
Raises:
Exception, ValueError for invalid seq_id.
"""
return
@abc.abstractmethod
def add_ipv4_rule_acl(self, **parameters):
"""
Add rules to Access Control List of ipv4.
Args:
parameters contains:
acl_name: (string) Name of the access list
seq_id: (integer) Sequence number of the rule,
if not specified, the rule is added at the end of the list.
Valid range is 0 to 4294967290
action: (string) Action performed by ACL rule
- permit
- deny
protocol_type: (string) Type of IP packets to be filtered
based on protocol. Valid values are <0-255> or key words
tcp, udp, icmp or ip
source: (string) Source address filters
{ any | S_IPaddress/mask(0.0.0.255) |
host,S_IPaddress } [ source-operator [ S_port-numbers ] ]
destination: (string) Destination address filters
{ any | S_IPaddress/mask(0.0.0.255) |
host,S_IPaddress } [ source-operator [ S_port-numbers ] ]
dscp: (string) Matches the specified value against the DSCP
value of the packet to filter.
Allowed values are 0 through 63.
drop_precedence_force: (string) Matches the drop_precedence
value of the packet. Allowed values are 0 through 2.
urg: (string) Enables urg for the rule
ack: (string) Enables ack for the rule
push: (string) Enables push for the rule
fin: (string) Enables fin for the rule
rst: (string) Enables rst for the rule
sync: (string) Enables sync for the rule
vlan_id: (integer) VLAN interface to which the ACL is bound
count: (string) Enables statistics for the rule
log: (string) Enables logging for the rule
(Available for permit or deny only)
mirror: (string) Enables mirror for the rule
copy_sflow: (string) Enables copy-sflow for the rule
dscp-marking: (string) dscp-marking number is used to mark the
DSCP value in the incoming packet with the value you
specify to filter. Allowed values are 0 through 63.
fragment: (string) Use fragment keyword to allow the ACL to
filter fragmented packets. Use the non-fragment keyword to
filter non-fragmented packets.
Allowed values are- fragment, non-fragment
precedence: (integer) Match packets with given precedence value
Allowed value in range 0 to 7.
option: (string) Match match IP option packets.
supported values are:
any, eol, extended-security, ignore, loose-source-route
no-op, record-route, router-alert, security, streamid,
strict-source-route, timestamp
Allowed value in decimal <0-255>.
suppress-rpf-drop: (boolean) Permit packets that fail RPF check
priority: (integer) set priority
priority-force: (integer) force packet outgoing priority.
priority-mapping: (integer) map incoming packet priority.
tos: (integer) Match packets with given TOS value.
Allowed value in decimal <0-15>.
Returns:
Return True
Raises:
Exception, ValueError for invalid seq_id.
"""
return
def delete_ipv4_acl_rule(self, **parameters):
"""
Delete Rule from Access Control List.
Args:
parameters contains:
acl_name: Name of the access list.
seq_id: Sequence number of the rule. For add operation,
if not specified, the rule is added at the end of the list.
Returns:
Return value of `string` message.
Raise:
Raises ValueError, Exception
Examples:
"""
return
@abc.abstractmethod
def add_ipv6_rule_acl(self, **parameters):
"""
Add rules to Access Control List of ipv6.
Args:
parameters contains:
acl_name(string): Name of the access list
seq_id(integer): Sequence number of the rule,
if not specified, the rule is added
at the end of the list. Valid range is 0 to 4294967290
action(string): Action performed by ACL rule
- permit (default)
- deny
- hard-drop
protocol_type(string): Type of IP packets to be filtered based
on protocol. Valid values are 0 through 255 or key words
tcp, udp, icmp or ip
source(string): Source address filters
{ any | S_IPaddress mask | host S_IPaddress }
[ source-operator [ S_port-numbers ] ]
destination(string):Destination address filters
{ any | S_IPaddress mask | host S_IPaddress }
[ source-operator [ S_port-numbers ] ]
dscp(string): Matches the specified value against the DSCP
value of the packet to filter.
Can be either a numerical value or DSCP name
drop_precedence_force(string): Matches the drop_precedence
value of the packet. Allowed values are 0 through 2.
urg(string): Enables urg for the rule
ack(string): Enables ack for the rule
push(string): Enables push for the rule
fin(string): Enables fin for the rule
rst(string): Enables rst for the rule
sync(string): Enables sync for the rule
vlan_id:(integer): VLAN interface to which the ACL is bound
count(string): Enables statistics for the rule
log(string): Enables logging for the rule
mirror(string): Enables mirror for the rule
copy_sflow(string): Enables copy-sflow for the rule
Returns:
Return True
Raises:
Exception, ValueError for invalid seq_id.
"""
return
def delete_ipv6_acl_rule(self, **parameters):
"""
Delete Rule from Access Control List.
Args:
parameters contains:
acl_name: Name of the access list.
seq_id: Sequence number of the rule. For add operation,
if not specified, the rule is added at the end of the list.
Returns:
Return value of `string` message.
Raise:
Raises ValueError, Exception
Examples:
"""
return
@abc.abstractmethod
def add_ipv4_rule_acl_bulk(self, **kwargs):
"""
Add ACL rule to an existing IPv4 ACL.
Args:
acl_name (str): Name of the access list.
acl_rules (array): List of ACL sequence rules.
Returns:
True, False or None for Success, failure and no-change respectively
for each seq_ids.
Examples:
>>> from pyswitch.device import Device
>>> with Device(conn=conn, auth=auth,
connection_type='NETCONF') as dev:
>>> print dev.acl.create_acl(acl_name='Acl_1',
acl_type='standard',
address_type='ip')
>>> print dev.acl.add_ip_acl_rule(acl_name='Acl_1',
acl_rules = [{"seq_id": 10, "action": "permit",
"source": "host 192.168.0.3")
"""
return
@abc.abstractmethod
def delete_ipv4_acl_rule_bulk(self, **kwargs):
"""
Delete ACL rules from IPv4 ACL.
Args:
acl_name (str): Name of the access list.
acl_rules (string): Range of ACL sequence rules.
Returns:
True, False or None for Success, failure and no-change respectively
for each seq_ids.
Examples:
>>> from pyswitch.device import Device
>>> with Device(conn=conn, auth=auth,
connection_type='NETCONF') as dev:
>>> print dev.acl.create_acl(acl_name='Acl_1',
acl_type='standard',
address_type='ip')
>>> print dev.acl.add_ip_acl_rule(acl_name='Acl_1',
acl_rules = [{"seq_id": 10, "action": "permit",
"source": "host 192.168.0.3")
"""
return
def _process_cli_output(self, method, config, output):
"""
Parses CLI response from switch.
Args:
output string contains the response from switch.
Returns:
None
Raises:
ValueError, Exception
"""
ret = None
for line in output.split('\n'):
if 'Invalid input ' in line or 'error' in line.lower() or \
'Incomplete command' in line or \
'cannot be used as an ACL name' in line or \
'name can\'t be more than 255 characters' in line:
ret = method + ' [ ' + config + ' ]: failed ' + line
break
if ret:
raise ValueError(ret)
ret = method + ' : Successful'
return ret
def _is_parameter_supported(self, supported_params, parameters):
received_params = [k for k, v in parameters.iteritems() if v]
unsupported_params = list(set(received_params) - set(supported_params))
if len(unsupported_params) > 0:
raise ValueError("unsupported parameters provided: {}"
.format(unsupported_params))
@abc.abstractmethod
def add_l2_acl_rule_bulk(self, **kwargs):
"""
Add ACL rule to an existing L2 ACL.
Args:
acl_name (str): Name of the access list.
acl_rules (array): List of ACL sequence rules.
Returns:
True, False or None for Success, failure and no-change respectively
for each seq_ids.
Examples:
>>> from pyswitch.device import Device
>>> with Device(conn=conn, auth=auth,
connection_type='NETCONF') as dev:
>>> print dev.acl.create_acl(acl_name='Acl_1',
acl_type='standard',
address_type='mac')
>>> print dev.acl.add_mac_acl_rule(acl_name='Acl_1', seq_id=20,
action='permit',
source='host',
srchost='2222.2222.2222')
"""
return
@abc.abstractmethod
def delete_l2_acl_rule_bulk(self, **kwargs):
"""
Delete ACL rules from MAC ACL.
Args:
acl_name (str): Name of the access list.
seq_id(string): Range of ACL sequences seq_id="10,30-40"
Returns:
True, False or None for Success, failure and no-change respectively
for each seq_ids.
Examples:
>>> from pyswitch.device import Device
>>> with Device(conn=conn, auth=auth,
connection_type='NETCONF') as dev:
>>> print dev.acl.create_acl(acl_name='Acl_1',
acl_type='standard',
address_type='ip')
>>> print dev.acl.delete_l2_acl_rule_bulk(acl_name='Acl_1',
seq_id="10,30-40")
"""
return
@abc.abstractmethod
def get_acl_rules(self, **kwargs):
"""
Returns the number of congiured rules
Args:
acl_name (str): Name of the access list.
Returns:
Number of rules configured,
Examples:
>>> from pyswitch.device import Device
>>> with Device(conn=conn, auth=auth,
connection_type='NETCONF') as dev:
>>> print dev.acl.get_acl_rules(acl_name='Acl_1',
seq_id='all')
"""
return
| 40.211462
| 79
| 0.533297
| 2,277
| 20,347
| 4.66491
| 0.147123
| 0.016475
| 0.022595
| 0.022595
| 0.745152
| 0.725946
| 0.716438
| 0.709094
| 0.699774
| 0.681887
| 0
| 0.015494
| 0.400501
| 20,347
| 505
| 80
| 40.291089
| 0.855304
| 0.718927
| 0
| 0.438356
| 0
| 0
| 0.051424
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.273973
| false
| 0
| 0.013699
| 0
| 0.561644
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
1dbf83713d27a53a82b2c5eb943039eef8c71165
| 117
|
py
|
Python
|
pal/gadget/cxx/__init__.py
|
mars-research/pal
|
5977394cda8750ff5dcb89c2bf193ec1ef4cd137
|
[
"MIT"
] | 26
|
2020-01-06T23:53:17.000Z
|
2022-02-01T08:58:21.000Z
|
pal/gadget/cxx/__init__.py
|
mars-research/pal
|
5977394cda8750ff5dcb89c2bf193ec1ef4cd137
|
[
"MIT"
] | 30
|
2019-11-13T00:55:22.000Z
|
2022-01-06T08:09:35.000Z
|
pal/gadget/cxx/__init__.py
|
mars-research/pal
|
5977394cda8750ff5dcb89c2bf193ec1ef4cd137
|
[
"MIT"
] | 14
|
2019-11-15T16:56:22.000Z
|
2021-12-22T10:14:17.000Z
|
from .function_definition import function_definition
from .namespace import namespace
from .extern_c import extern_c
| 29.25
| 52
| 0.871795
| 16
| 117
| 6.125
| 0.4375
| 0.367347
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 117
| 3
| 53
| 39
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
1dc6205189c32122e541f56722a29149bf8bcfbf
| 236
|
py
|
Python
|
spylib/__init__.py
|
SatelCreative/satel-python-shopify
|
c77171be6871551a0a454800c89bca88df800874
|
[
"MIT"
] | 8
|
2021-07-21T23:12:47.000Z
|
2022-02-09T17:42:39.000Z
|
spylib/__init__.py
|
SatelCreative/satel-python-shopify
|
c77171be6871551a0a454800c89bca88df800874
|
[
"MIT"
] | 56
|
2021-06-21T22:45:29.000Z
|
2022-02-28T16:20:05.000Z
|
spylib/__init__.py
|
SatelCreative/satel-python-shopify
|
c77171be6871551a0a454800c89bca88df800874
|
[
"MIT"
] | 1
|
2021-06-20T08:21:02.000Z
|
2021-06-20T08:21:02.000Z
|
"""A library to facilitate interfacing with Shopify's API"""
__version__ = '0.4'
from .token import OfflineTokenABC, OnlineTokenABC, PrivateTokenABC, Token
__all__ = ['OfflineTokenABC', 'OnlineTokenABC', 'PrivateTokenABC', 'Token']
| 26.222222
| 75
| 0.758475
| 24
| 236
| 7.125
| 0.791667
| 0.339181
| 0.51462
| 0.573099
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009615
| 0.118644
| 236
| 8
| 76
| 29.5
| 0.8125
| 0.228814
| 0
| 0
| 0
| 0
| 0.295455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
1dd4d8008a1fb653dafa825e3cf79764fc0298f5
| 7,135
|
py
|
Python
|
paypalpayoutssdk/payouts/payouts_post_request.py
|
arao6/Payouts-Python-SDK
|
de16b710a118e8d382549e7a254546e2d28d477a
|
[
"BSD-Source-Code"
] | null | null | null |
paypalpayoutssdk/payouts/payouts_post_request.py
|
arao6/Payouts-Python-SDK
|
de16b710a118e8d382549e7a254546e2d28d477a
|
[
"BSD-Source-Code"
] | null | null | null |
paypalpayoutssdk/payouts/payouts_post_request.py
|
arao6/Payouts-Python-SDK
|
de16b710a118e8d382549e7a254546e2d28d477a
|
[
"BSD-Source-Code"
] | null | null | null |
# This class was generated on Mon, 23 Dec 2019 12:39:22 IST by version 0.1.0-dev+904328-dirty of Braintree SDK Generator
# payouts_post_request.py
# @version 0.1.0-dev+904328-dirty
# @type request
# @data H4sIAAAAAAAC/+xc3XPbNhJ/v79ih72HeIYSnc+2nrkH104n6qWOL3Y703E9MkSuRNQgwABgHE0n//sNvihSpGTFUZT2Tm8WPvcLi90flv4zOiMFRkdRSeai0mpYCqWjODpFlUpaaip4dBSdSCQaFRCYEJ3m4AYPYcRB5wg/Xbw5A4nvKlQaJiKbx1ASZYbfKOQZyrGdNs6RZChvgPAMCIcbqrFQN0CkJPMhXOa4YnyGU8pRQS7uQAvICc8Y2p0DIXZuc7l6zmIU2P7h79Xh4dN0IhP7B/4mKkgJh4LchoHKbCI4gpBQCIkgMaUlRa7r2Uykt+8qodEt4lqVloLPXMuZ0KiOXHvS7ADXVrHmTEabv0r345zMzwmDTKACLjSUUqSoFGRVyWhKdE3uEEZTmIsKVIkpnc6B+E1SkXkCW2Klmadr0Q86JxruiIJKYQbU6ZURpeHpIWRkrmLbcnw+Aol/YKqdZIPS76jOjUpRSiM0VIrM/KIqF3du8ILwTybPWAzlKasya4Wvji9fvjm+AEb5rVGWWV1IOqOcsKBtS5LpUKT49B29opOyRzFe2hJTpO8NbfDq8vK8s8Vz1+BVy7lfsaXqJo9KE10pMC2x3cCYpSJTZHOQqOW8K/Htspe0zbDXKM1BO/enxBhDbZ2qKkshNUwqyjLTQozvgEc/nB1YlpT1FgSmla7kYg2JDIlqMCyszyGMzY3Bv6cZwg9nboVglsvMuoMyOCdSc5SDY60lnVRmmcGox5aCBJ13WafoH4UEyqdCFo4ZMjGGtQUS3NYxKAxLEcglTv/1e5RkIlUJKWkicYoSeYrm18BTrZJvcq3L8HPgVlK/R25la4htBpXfnnjNw6UAhkQGdoSsJ5Ag6hhSwTVJtVGLhNKxBQXhZIbSTOmh3NCljpLk7u5uWJJ5SdgwFUVSqeQOJ6QsVVKUZeLXGpRSzCQpAuHe2XkBwrnvbZO+gdEmbceaLHvqKI7+U6GcnxNJCtQoVXR0dR1Hr6ysWq1/Rpfz0lyNSkvKZ1Ec/UokJROG/spcq/Qojv6N8w1Htm/bKI6OzR3mtj+Mo7dIsjeczaOjKWEKTcO7ikrM6oZzKUqUmqKKjnjF2Md4U+rfekPqp7jV26bSeAKF8j1KUFpIVHCLcwVTIcOVMdwGI9dx9KOQxbK+zonOl9s8sW63mn8Xu3iHA35MVxypHTZ2l8dY1sMWIlk1oisWNzJcRH7k50pjYY+elZHGosuHDXJadIeWNp3H3AdKYgqUZ/Q9zarF5elCpTbFL9ZSrGW1kuBjplFyI5IzoenUxADGnf6MOhdZlwUSho95Y/i4CMMXrN03cimKJaW5exSQLKPuloHmPCjsLaMFSCRpDtbZC3/r2rveuMoZkRnlM9A5VUZcBfJt6jYXHLsiKX3zgvXQ0rU+2wO8KiZori+gWpmbVXCaEgaUO5E59q9eDh+/eOYHG65KRji4G+/6UdOjU10NKdeJxDS5HLx9eTKwUxPkB9vjfpWvSkXFtZyPzd3UPpPtjp6j6AZAShgz/JmB8Ojk5CDejWjAEVFMKMcMGPKZzs2RM0Z1cmLjWvNnvatXW1EpbcOqCcLMehNpomkOj59DRmfUBP2XPfNSwRVVWpktyKI3Q6Wp++UlcHZ6cmB3V9XEyMxM9ms8ujj7NJV2j/6GVxB+0MiVObFu65Zqezq76q0HeeKHu7k7g2D76O72dcleUttfwBZXnpP/aRO9/njd0PZJJU28Pe+5kQojnvbVE5p6fI5fx+EcdpwNiwhMKSc8pYSBloQrkppZMagqzYE4eIURnlrkwV8tkFW4Awe7YL3hXBeNXSavdC4RB2lOJEmN7kcXbwbPnjz+dsG+UeP1I5fQGHueSaveJKMSU2OiSidh8MBmeMmOHM97wqr2RRJauozanhjucprmUNBZbiz+aB2Kc8zt8TWZklG7Z5GiAkZvEW5+Ov/txmEjRKI9QnpemlPP5jCVzigIG65Nyo8hw5QWhNUz+ve6PDtt7KWqiY30LMRjQxtRKcIznavh+nTKpMIBfjDCDydRTGtCSkZSVN7SW0bgEt2rk2abgoExGTAms+TClBgKOUuoEtagBm0bGea6YFs6/Rt4eqHbhuIb+nIhnqEcOAyOosVA0EqjGWOqIVw4pERBVZrw8tnh4SEcX5yMRlAfJmVdx2PTwwUfLPfu6JYLQW+L/0ZjVwZ1mOxvlRAew4mQElUpeKZCSH1To6pjPS/xxh20gPGEpAl+Jh98j5h6iT1+8i0oymcMB5O5xq8imAbly+Jpd/UKyQ0xxx6H8KvjWx1tjAwvo08vfz4eve5ifPb6rTjyVM5LjRlgQShbbAgEHIcbifahiGEHKnv15uzlBsQ205jezTcD4XsxeI9inhMGxv1NxIcukNkL8/XSvqzRFdDTZ4js+Lfz49fj0ekKsS2E5pkiqY3n1klvPW5WP4+Mpr2C6Hmm6YqlBuz7VmgflB7E32NkGLcwCagUPmg98XBGYqg4Q/ue1SAFcqJs0C7u+MO5s283D5awKKj+bPFarKPJmI3svfIezKH12Tv3x3eEMdQrPHLduc4nu0E7Itwr2Uh9TNvQVqdrg4hjdBpOPFxKkt62Xz85EB48g3H6aq40FtsBaRu8Xliq4Af7UOxg9ZWMN627j/ul/pUi8C9FWeDVzfCh6GfAsOtATTH5A9MeIHkBLo4bb0ft3HHVkC6DDaiyMTQGOgXC53HjSco/rOzIcie8C8Yt2npASf+wo41dBoAhNnnuqgfD2KUtJkxRis44ZiFyDJxCnZ7Zl0PClIAJQm0MNqNuRZXbfTmEH0OBQN8jIVWtZ76rz9s75NHrHgbdo9sgvLCRxoo0O9iRYdgYc+wrANqA3lJPD5pnRrSrB0KUhiZzuMuRLxd0QCqKkqFGDzeF2VQtXsetH2CsWcexS2GoKriKZWEsenpcm+sDRrnjQNcSWiGZgBi13F6fjFpL3yOoRrZgjliZE14VKGnayR2ePH/+ddOyzXP1UOdjy0o0FgOG79E+BqGyQEkqeEo0cqIX1TA+e7JGRj7QoipWgaluHapcBv83S09HpzaL8d43Q65NcBHKfUKYtJzgG1eIH4gxsxg2zE93GmOFUpjVYUZPlHW8pRjrsxPWfWHYvjBsN4Vh1r8ZbdZ3hrtL7M0iOJuD4Km/KR5oTdQbk6GU8rpapBel6Knf2UK2cv3RjFOl4ArvKRdxg7pupeVHNqoCcSttvQxkVYq1Mre6N6lqJ1FBZRJ1JbnzAYEZc8TI0vgaub2w5ztDTSjzzwIczHUiv1xpzMp0xfLsDlKPMOqO1aGCG7Ld56mXxin2RI2muU1m3dQTNFvP2shAvrw0M5xUs+W7tNHYJdJfWf5Vm8HodAi/GB9gH62ElMhc9lRWshQKtx0qWUnDqbPFPoZCR5Of0La6cKqRnTftXAe1fFIF1YNUMaXI2noILX1VB0WNg9pR7mynxHrjVUR/qbiMKrX0Bhta+uA5ovwL53bJ3OQJsHG0xiauaNPc7eyS/8vbkfGUzkSsB13ABdbyA7ZBlT/OgdPM5CWG5t28i/el7esS9lY8l6Gr0lAP1NGDqTZ2vVSCU/TTm1cF4QOJJDPTY6g4fVe1D8bW6N7EtHwtaW9u0u1b5VYHM+QorRWNTlvZ/3bl7wOOrwrw7gLZ3UNbe2jLQlsuea3RngXI89Whri8H8QRJh1KOvwFKs723sD1Os8dp9jjNXwenud9vaFrg2CEtbaex1NH1GJmxY1vcTAv0t4w7oMYhhFvJ2+IEZ4TbMu2rkc1hUS8tsFyMrYVgakhRT20xY64Llshp+vTp0++/UWgrNgfPhy8Otp7G+CgNt4ct+NR2jyt8QnK7lZxxWgt/KWl01hmyxX1uuI3c0OeC7RTRpYYPxx02zanuwad8Lmj3H/eBVSsGbIRcrTvke8Tqr4ZYLch8bWKn5s4dgo1zadtJaFltGOHD8oBLXTVDNbWuDMU9Cajkm5xoFEQN7IyDL29RucRpi0vf0Pc1oIuVQBM5Qw2/vH1tv4e3//zDBWeO65QwFofnfdvjvikNURtVcGU89CUWpZkxcL5ZY3ZvBPDi+beHB1aa7tW8lDhYBB5xXXhpa+T/eRPDzaOb2MYZNwc3zUzT/dMTw+tNyHRvcV6H1oZXwW1RsjkzVhlAahE4HkOKq6qJMorn2jbv6ED1fNG74tNdw6qN0b0a6s9Q9Arl7SwPZ0vJN+v/WsomOnU0YrLvew3l+ffffVeHis8OwidI9pt7ZYvb+ALvc4mUUXTFSTGhs0pUis2XLl6FBeGapio4VWeGF4hwZd3JW0+hWvokh3BiaXOleCbHUImZOwgsLf8cfvj0z3TWvdde7x3f3vHtHd/e8f2/Ob44OhFcI/f/1iQipcPgqODJH8o6v1dal/5/aRxF528uLiP3/1Gioyh5/zjxoJBKPNoYxdHFLS3rbV9+KDHVmF1Y+OpEZBgdPTl8/PEf/wUAAP//
# DO NOT EDIT
import paypalhttp
import sys
try:
from urllib import quote # Python 2.X
except ImportError:
from urllib.parse import quote # Python 3+
class PayoutsPostRequest:
"""
Creates a batch payout. In the JSON request body, pass a `sender_batch_header` and an `items` array. The `sender_batch_header` defines how to handle the payout. The `items` array defines the payout items.<br/>You can make payouts to one or more recipients.<blockquote><strong>Notes:</strong> <ul><li><p>PayPal does not process duplicate payouts. If you specify a <code>sender_batch_id</code> that was used in the last 30 days, the API rejects the request with an error message that shows the duplicate <code>sender_batch_id</code> and includes a HATEOAS link to the original payout with the same <code>sender_batch_id</code>.</p><p>If you receive an HTTP <code>5<i>nn</i></code> status code, you can safely retry the request with the same <code>sender_batch_id</code>.</p></li><li><p>The Payouts API does not support build notation (BN) codes. In a future Payouts release, you can optionally provide BN codes in the <code>PayPal-Partner-Attribution-Id</code> request header.</p><p>For information about the <code>PayPal-Partner-Attribution-Id</code> header, see <a href="/docs/api/reference/api-requests/#http-request-headers">HTTP request headers</a>. To learn about or request a BN code, contact your partner manager or see <a href="https://www.paypal.com/us/webapps/mpp/partner-program">PayPal Partner Program</a>.</p></li></ul></blockquote>
"""
def __init__(self):
self.verb = "POST"
self.path = "/v1/payments/payouts?"
self.headers = {}
self.headers["Content-Type"] = "application/json"
self.body = None
def pay_pal_partner_attribution_id(self, pay_pal_partner_attribution_id):
self.headers["PayPal-Partner-Attribution-Id"] = str(pay_pal_partner_attribution_id)
def pay_pal_request_id(self, pay_pal_request_id):
self.headers["PayPal-Request-Id"] = str(pay_pal_request_id)
def request_body(self, create_payout_request):
self.body = create_payout_request
sys.stdout.write('----POST PAYOUT RESPONSE----')
sys.stdout.write(repr(self.headers))
sys.stdout.write(repr(self.verb))
sys.stdout.write(repr(self.body)) # same as print
sys.stdout.write('----POST PAYOUT RESPONSE----')
sys.stdout.flush()
return self
| 165.930233
| 4,484
| 0.877365
| 535
| 7,135
| 11.618692
| 0.528972
| 0.010618
| 0.019305
| 0.01094
| 0.079311
| 0.053089
| 0.043436
| 0.02381
| 0.010618
| 0
| 0
| 0.108294
| 0.060406
| 7,135
| 42
| 4,485
| 169.880952
| 0.818914
| 0.849895
| 0
| 0.076923
| 1
| 0
| 0.148042
| 0.047755
| 0
| 1
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.192308
| 0
| 0.423077
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1de64d87bbb01796702fe48ace190cec97b6e3c2
| 35,760
|
py
|
Python
|
raiden/tests/integration/network/proxies/test_token_network.py
|
jomuel/raiden
|
8e3a1e57ebc112171cc6236e09657f32a89f8f3b
|
[
"MIT"
] | null | null | null |
raiden/tests/integration/network/proxies/test_token_network.py
|
jomuel/raiden
|
8e3a1e57ebc112171cc6236e09657f32a89f8f3b
|
[
"MIT"
] | 1
|
2019-12-06T16:34:40.000Z
|
2019-12-06T16:34:40.000Z
|
raiden/tests/integration/network/proxies/test_token_network.py
|
jomuel/raiden
|
8e3a1e57ebc112171cc6236e09657f32a89f8f3b
|
[
"MIT"
] | null | null | null |
import random
import pytest
from eth_utils import decode_hex, encode_hex, to_canonical_address, to_checksum_address
from raiden.constants import (
EMPTY_BALANCE_HASH,
EMPTY_HASH,
EMPTY_SIGNATURE,
GENESIS_BLOCK_NUMBER,
LOCKSROOT_OF_NO_LOCKS,
STATE_PRUNING_AFTER_BLOCKS,
)
from raiden.exceptions import (
BrokenPreconditionError,
InvalidChannelID,
InvalidSettleTimeout,
RaidenRecoverableError,
RaidenUnrecoverableError,
SamePeerAddress,
)
from raiden.network.proxies.proxy_manager import ProxyManager, ProxyManagerMetadata
from raiden.network.rpc.client import JSONRPCClient
from raiden.tests.integration.network.proxies import BalanceProof
from raiden.tests.utils.factories import make_address
from raiden.utils.signer import LocalSigner
from raiden.utils.typing import T_ChannelID
from raiden_contracts.constants import (
TEST_SETTLE_TIMEOUT_MAX,
TEST_SETTLE_TIMEOUT_MIN,
MessageTypeId,
)
SIGNATURE_SIZE_IN_BITS = 520
def test_token_network_deposit_race(
token_network_proxy, private_keys, token_proxy, web3, contract_manager
):
assert token_network_proxy.settlement_timeout_min() == TEST_SETTLE_TIMEOUT_MIN
assert token_network_proxy.settlement_timeout_max() == TEST_SETTLE_TIMEOUT_MAX
token_network_address = to_canonical_address(token_network_proxy.proxy.contract.address)
c1_client = JSONRPCClient(web3, private_keys[1])
c2_client = JSONRPCClient(web3, private_keys[2])
proxy_manager = ProxyManager(
rpc_client=c1_client,
contract_manager=contract_manager,
metadata=ProxyManagerMetadata(
token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER,
filters_start_at=GENESIS_BLOCK_NUMBER,
),
)
c1_token_network_proxy = proxy_manager.token_network(token_network_address)
token_proxy.transfer(c1_client.address, 10)
channel_identifier = c1_token_network_proxy.new_netting_channel(
partner=c2_client.address,
settle_timeout=TEST_SETTLE_TIMEOUT_MIN,
given_block_identifier="latest",
)
assert channel_identifier is not None
c1_token_network_proxy.set_total_deposit(
given_block_identifier="latest",
channel_identifier=channel_identifier,
total_deposit=2,
partner=c2_client.address,
)
with pytest.raises(BrokenPreconditionError):
c1_token_network_proxy.set_total_deposit(
given_block_identifier="latest",
channel_identifier=channel_identifier,
total_deposit=1,
partner=c2_client.address,
)
def test_token_network_proxy(
token_network_proxy, private_keys, token_proxy, chain_id, web3, contract_manager
):
assert token_network_proxy.settlement_timeout_min() == TEST_SETTLE_TIMEOUT_MIN
assert token_network_proxy.settlement_timeout_max() == TEST_SETTLE_TIMEOUT_MAX
token_network_address = to_canonical_address(token_network_proxy.proxy.contract.address)
c1_signer = LocalSigner(private_keys[1])
c1_client = JSONRPCClient(web3, private_keys[1])
c1_proxy_manager = ProxyManager(
rpc_client=c1_client,
contract_manager=contract_manager,
metadata=ProxyManagerMetadata(
token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER,
filters_start_at=GENESIS_BLOCK_NUMBER,
),
)
c2_client = JSONRPCClient(web3, private_keys[2])
c2_proxy_manager = ProxyManager(
rpc_client=c2_client,
contract_manager=contract_manager,
metadata=ProxyManagerMetadata(
token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER,
filters_start_at=GENESIS_BLOCK_NUMBER,
),
)
c2_signer = LocalSigner(private_keys[2])
c1_token_network_proxy = c1_proxy_manager.token_network(token_network_address)
c2_token_network_proxy = c2_proxy_manager.token_network(token_network_address)
initial_token_balance = 100
token_proxy.transfer(c1_client.address, initial_token_balance)
token_proxy.transfer(c2_client.address, initial_token_balance)
initial_balance_c1 = token_proxy.balance_of(c1_client.address)
assert initial_balance_c1 == initial_token_balance
initial_balance_c2 = token_proxy.balance_of(c2_client.address)
assert initial_balance_c2 == initial_token_balance
# instantiating a new channel - test basic assumptions
assert (
c1_token_network_proxy.get_channel_identifier_or_none(
participant1=c1_client.address,
participant2=c2_client.address,
block_identifier="latest",
)
is None
)
msg = "Hex encoded addresses are not supported, an assertion must be raised"
with pytest.raises(AssertionError):
c1_token_network_proxy.get_channel_identifier(
participant1=to_checksum_address(c1_client.address),
participant2=to_checksum_address(c2_client.address),
block_identifier="latest",
)
pytest.fail(msg)
msg = "Zero is not a valid channel_identifier identifier, an exception must be raised."
with pytest.raises(InvalidChannelID):
assert c1_token_network_proxy.channel_is_opened(
participant1=c1_client.address,
participant2=c2_client.address,
block_identifier="latest",
channel_identifier=0,
)
pytest.fail(msg)
msg = "Zero is not a valid channel_identifier identifier. an exception must be raised."
with pytest.raises(InvalidChannelID):
assert c1_token_network_proxy.channel_is_closed(
participant1=c1_client.address,
participant2=c2_client.address,
block_identifier="latest",
channel_identifier=0,
)
pytest.fail(msg)
msg = (
"Opening a channel with a settle_timeout lower then token "
"network's minimum will fail. This must be validated and the "
"transaction must not be sent."
)
with pytest.raises(InvalidSettleTimeout):
c1_token_network_proxy.new_netting_channel(
partner=c2_client.address,
settle_timeout=TEST_SETTLE_TIMEOUT_MIN - 1,
given_block_identifier="latest",
)
pytest.fail(msg)
# Using exactly the minimal timeout must succeed
c1_token_network_proxy.new_netting_channel(
partner=make_address(),
settle_timeout=TEST_SETTLE_TIMEOUT_MIN,
given_block_identifier="latest",
)
msg = (
"Opening a channel with a settle_timeout larger then token "
"network's maximum will fail. This must be validated and the "
"transaction must not be sent."
)
with pytest.raises(InvalidSettleTimeout):
c1_token_network_proxy.new_netting_channel(
partner=c2_client.address,
settle_timeout=TEST_SETTLE_TIMEOUT_MAX + 1,
given_block_identifier="latest",
)
pytest.fail(msg)
# Using exactly the maximal timeout must succeed
c1_token_network_proxy.new_netting_channel(
partner=make_address(),
settle_timeout=TEST_SETTLE_TIMEOUT_MAX,
given_block_identifier="latest",
)
msg = (
"Opening a channel with itself is not allow. This must be validated and "
"the transaction must not be sent."
)
with pytest.raises(SamePeerAddress):
c1_token_network_proxy.new_netting_channel(
partner=c1_client.address,
settle_timeout=TEST_SETTLE_TIMEOUT_MIN,
given_block_identifier="latest",
)
pytest.fail(msg)
msg = "Trying a deposit to an inexisting channel must fail."
with pytest.raises(BrokenPreconditionError):
c1_token_network_proxy.set_total_deposit(
given_block_identifier="latest",
channel_identifier=100,
total_deposit=1,
partner=c2_client.address,
)
pytest.fail(msg)
empty_balance_proof = BalanceProof(
channel_identifier=100,
token_network_address=c1_token_network_proxy.address,
balance_hash=encode_hex(EMPTY_BALANCE_HASH),
nonce=0,
chain_id=chain_id,
transferred_amount=0,
)
closing_data = (
empty_balance_proof.serialize_bin(msg_type=MessageTypeId.BALANCE_PROOF) + EMPTY_SIGNATURE
)
msg = "Trying to close an inexisting channel must fail."
match = "The channel was not open at the provided block"
with pytest.raises(RaidenUnrecoverableError, match=match):
c1_token_network_proxy.close(
channel_identifier=100,
partner=c2_client.address,
balance_hash=EMPTY_HASH,
nonce=0,
additional_hash=EMPTY_HASH,
non_closing_signature=EMPTY_SIGNATURE,
closing_signature=c1_signer.sign(data=closing_data),
given_block_identifier="latest",
)
pytest.fail(msg)
channel_identifier = c1_token_network_proxy.new_netting_channel(
partner=c2_client.address,
settle_timeout=TEST_SETTLE_TIMEOUT_MIN,
given_block_identifier="latest",
)
msg = "new_netting_channel did not return a valid channel id"
assert isinstance(channel_identifier, T_ChannelID), msg
msg = "multiple channels with the same peer are not allowed"
with pytest.raises(BrokenPreconditionError):
c1_token_network_proxy.new_netting_channel(
partner=c2_client.address,
settle_timeout=TEST_SETTLE_TIMEOUT_MIN,
given_block_identifier="latest",
)
pytest.fail(msg)
assert (
c1_token_network_proxy.get_channel_identifier_or_none(
participant1=c1_client.address,
participant2=c2_client.address,
block_identifier="latest",
)
is not None
)
assert (
c1_token_network_proxy.channel_is_opened(
participant1=c1_client.address,
participant2=c2_client.address,
block_identifier="latest",
channel_identifier=channel_identifier,
)
is True
)
msg = "set_total_deposit must fail if the amount exceed the account's balance"
with pytest.raises(BrokenPreconditionError):
c1_token_network_proxy.set_total_deposit(
given_block_identifier="latest",
channel_identifier=channel_identifier,
total_deposit=initial_token_balance + 1,
partner=c2_client.address,
)
pytest.fail(msg)
msg = "set_total_deposit must fail with a negative amount"
with pytest.raises(BrokenPreconditionError):
c1_token_network_proxy.set_total_deposit(
given_block_identifier="latest",
channel_identifier=channel_identifier,
total_deposit=-1,
partner=c2_client.address,
)
pytest.fail(msg)
msg = "set_total_deposit must fail with a zero amount"
with pytest.raises(BrokenPreconditionError):
c1_token_network_proxy.set_total_deposit(
given_block_identifier="latest",
channel_identifier=channel_identifier,
total_deposit=0,
partner=c2_client.address,
)
pytest.fail(msg)
c1_token_network_proxy.set_total_deposit(
given_block_identifier="latest",
channel_identifier=channel_identifier,
total_deposit=10,
partner=c2_client.address,
)
transferred_amount = 3
balance_proof = BalanceProof(
channel_identifier=channel_identifier,
token_network_address=to_checksum_address(token_network_address),
nonce=1,
chain_id=chain_id,
transferred_amount=transferred_amount,
)
signature = c1_signer.sign(data=balance_proof.serialize_bin())
balance_proof.signature = encode_hex(signature)
signature_number = int.from_bytes(signature, "big")
bit_to_change = random.randint(0, SIGNATURE_SIZE_IN_BITS - 1)
signature_number_bit_flipped = signature_number ^ (2 ** bit_to_change)
invalid_signatures = [
EMPTY_SIGNATURE,
b"\x11" * 65,
signature_number_bit_flipped.to_bytes(len(signature), "big"),
]
msg = "close must fail if the closing_signature is invalid"
for invalid_signature in invalid_signatures:
closing_data = (
balance_proof.serialize_bin(msg_type=MessageTypeId.BALANCE_PROOF) + invalid_signature
)
with pytest.raises(RaidenUnrecoverableError):
c2_token_network_proxy.close(
channel_identifier=channel_identifier,
partner=c1_client.address,
balance_hash=decode_hex(balance_proof.balance_hash),
nonce=balance_proof.nonce,
additional_hash=decode_hex(balance_proof.additional_hash),
non_closing_signature=invalid_signature,
closing_signature=c2_signer.sign(data=closing_data),
given_block_identifier="latest",
)
pytest.fail(msg)
blocknumber_prior_to_close = c2_client.block_number()
closing_data = balance_proof.serialize_bin(msg_type=MessageTypeId.BALANCE_PROOF) + decode_hex(
balance_proof.signature
)
c2_token_network_proxy.close(
channel_identifier=channel_identifier,
partner=c1_client.address,
balance_hash=decode_hex(balance_proof.balance_hash),
nonce=balance_proof.nonce,
additional_hash=decode_hex(balance_proof.additional_hash),
non_closing_signature=decode_hex(balance_proof.signature),
closing_signature=c2_signer.sign(data=closing_data),
given_block_identifier="latest",
)
assert (
c1_token_network_proxy.channel_is_closed(
participant1=c1_client.address,
participant2=c2_client.address,
block_identifier="latest",
channel_identifier=channel_identifier,
)
is True
)
assert (
c1_token_network_proxy.get_channel_identifier_or_none(
participant1=c1_client.address,
participant2=c2_client.address,
block_identifier="latest",
)
is not None
)
msg = (
"given_block_identifier is the block at which the transaction is being "
"sent. If the channel is already closed at that block the client code "
"has a programming error. An exception is raised for that."
)
with pytest.raises(RaidenUnrecoverableError):
c2_token_network_proxy.close(
channel_identifier=channel_identifier,
partner=c1_client.address,
balance_hash=decode_hex(balance_proof.balance_hash),
nonce=balance_proof.nonce,
additional_hash=decode_hex(balance_proof.additional_hash),
non_closing_signature=decode_hex(balance_proof.signature),
closing_signature=c2_signer.sign(data=closing_data),
given_block_identifier="latest",
)
pytest.fail(msg)
msg = (
"The channel cannot be closed two times. If it was not closed at "
"given_block_identifier but it is closed at the time the proxy is "
"called an exception must be raised."
)
with pytest.raises(RaidenRecoverableError):
c2_token_network_proxy.close(
channel_identifier=channel_identifier,
partner=c1_client.address,
balance_hash=decode_hex(balance_proof.balance_hash),
nonce=balance_proof.nonce,
additional_hash=decode_hex(balance_proof.additional_hash),
non_closing_signature=decode_hex(balance_proof.signature),
closing_signature=c2_signer.sign(data=closing_data),
given_block_identifier=blocknumber_prior_to_close,
)
pytest.fail(msg)
msg = "depositing to a closed channel must fail"
match = "closed"
with pytest.raises(RaidenRecoverableError, match=match):
c2_token_network_proxy.set_total_deposit(
given_block_identifier=blocknumber_prior_to_close,
channel_identifier=channel_identifier,
total_deposit=20,
partner=c1_client.address,
)
pytest.fail(msg)
c1_proxy_manager.wait_until_block(
target_block_number=c1_proxy_manager.client.block_number() + TEST_SETTLE_TIMEOUT_MIN
)
invalid_transferred_amount = 1
msg = "settle with invalid transferred_amount data must fail"
with pytest.raises(BrokenPreconditionError):
c2_token_network_proxy.settle(
channel_identifier=channel_identifier,
transferred_amount=invalid_transferred_amount,
locked_amount=0,
locksroot=LOCKSROOT_OF_NO_LOCKS,
partner=c1_client.address,
partner_transferred_amount=transferred_amount,
partner_locked_amount=0,
partner_locksroot=LOCKSROOT_OF_NO_LOCKS,
given_block_identifier="latest",
)
pytest.fail(msg)
c2_token_network_proxy.settle(
channel_identifier=channel_identifier,
transferred_amount=0,
locked_amount=0,
locksroot=LOCKSROOT_OF_NO_LOCKS,
partner=c1_client.address,
partner_transferred_amount=transferred_amount,
partner_locked_amount=0,
partner_locksroot=LOCKSROOT_OF_NO_LOCKS,
given_block_identifier="latest",
)
assert (
c1_token_network_proxy.get_channel_identifier_or_none(
participant1=c1_client.address,
participant2=c2_client.address,
block_identifier="latest",
)
is None
)
assert token_proxy.balance_of(c1_client.address) == (initial_balance_c1 - transferred_amount)
assert token_proxy.balance_of(c2_client.address) == (initial_balance_c2 + transferred_amount)
msg = "depositing to a settled channel must fail"
with pytest.raises(BrokenPreconditionError):
c1_token_network_proxy.set_total_deposit(
given_block_identifier="latest",
channel_identifier=channel_identifier,
total_deposit=10,
partner=c2_client.address,
)
pytest.fail(msg)
def test_token_network_proxy_update_transfer(
token_network_proxy, private_keys, token_proxy, chain_id, web3, contract_manager
):
"""Tests channel lifecycle, with `update_transfer` before settling"""
token_network_address = to_canonical_address(token_network_proxy.proxy.contract.address)
c1_client = JSONRPCClient(web3, private_keys[1])
c1_proxy_manager = ProxyManager(
rpc_client=c1_client,
contract_manager=contract_manager,
metadata=ProxyManagerMetadata(
token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER,
filters_start_at=GENESIS_BLOCK_NUMBER,
),
)
c1_signer = LocalSigner(private_keys[1])
c2_client = JSONRPCClient(web3, private_keys[2])
c2_proxy_manager = ProxyManager(
rpc_client=c2_client,
contract_manager=contract_manager,
metadata=ProxyManagerMetadata(
token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER,
filters_start_at=GENESIS_BLOCK_NUMBER,
),
)
c1_token_network_proxy = c1_proxy_manager.token_network(token_network_address)
c2_token_network_proxy = c2_proxy_manager.token_network(token_network_address)
# create a channel
channel_identifier = c1_token_network_proxy.new_netting_channel(
partner=c2_client.address, settle_timeout=10, given_block_identifier="latest"
)
# deposit to the channel
initial_balance = 100
token_proxy.transfer(c1_client.address, initial_balance)
token_proxy.transfer(c2_client.address, initial_balance)
initial_balance_c1 = token_proxy.balance_of(c1_client.address)
assert initial_balance_c1 == initial_balance
initial_balance_c2 = token_proxy.balance_of(c2_client.address)
assert initial_balance_c2 == initial_balance
c1_token_network_proxy.set_total_deposit(
given_block_identifier="latest",
channel_identifier=channel_identifier,
total_deposit=10,
partner=c2_client.address,
)
c2_token_network_proxy.set_total_deposit(
given_block_identifier="latest",
channel_identifier=channel_identifier,
total_deposit=10,
partner=c1_client.address,
)
# balance proof signed by c1
transferred_amount_c1 = 1
transferred_amount_c2 = 3
balance_proof_c1 = BalanceProof(
channel_identifier=channel_identifier,
token_network_address=to_checksum_address(token_network_address),
nonce=1,
chain_id=chain_id,
transferred_amount=transferred_amount_c1,
)
balance_proof_c1.signature = encode_hex(
LocalSigner(private_keys[1]).sign(data=balance_proof_c1.serialize_bin())
)
# balance proof signed by c2
balance_proof_c2 = BalanceProof(
channel_identifier=channel_identifier,
token_network_address=to_checksum_address(token_network_address),
nonce=2,
chain_id=chain_id,
transferred_amount=transferred_amount_c2,
)
balance_proof_c2.signature = encode_hex(
LocalSigner(private_keys[2]).sign(data=balance_proof_c2.serialize_bin())
)
non_closing_data = balance_proof_c1.serialize_bin(
msg_type=MessageTypeId.BALANCE_PROOF_UPDATE
) + decode_hex(balance_proof_c1.signature)
non_closing_signature = LocalSigner(c2_client.privkey).sign(data=non_closing_data)
with pytest.raises(RaidenUnrecoverableError) as exc:
c2_token_network_proxy.update_transfer(
channel_identifier=channel_identifier,
partner=c1_client.address,
balance_hash=decode_hex(balance_proof_c1.balance_hash),
nonce=balance_proof_c1.nonce,
additional_hash=decode_hex(balance_proof_c1.additional_hash),
closing_signature=decode_hex(balance_proof_c1.signature),
non_closing_signature=non_closing_signature,
given_block_identifier="latest",
)
assert "not in a closed state" in str(exc)
# close by c1
closing_data = balance_proof_c2.serialize_bin(
msg_type=MessageTypeId.BALANCE_PROOF
) + decode_hex(balance_proof_c2.signature)
c1_token_network_proxy.close(
channel_identifier=channel_identifier,
partner=c2_client.address,
balance_hash=decode_hex(balance_proof_c2.balance_hash),
nonce=balance_proof_c2.nonce,
additional_hash=decode_hex(balance_proof_c2.additional_hash),
non_closing_signature=decode_hex(balance_proof_c2.signature),
closing_signature=c1_signer.sign(data=closing_data),
given_block_identifier="latest",
)
# update transfer with completely invalid closing signature
with pytest.raises(RaidenUnrecoverableError) as excinfo:
c2_token_network_proxy.update_transfer(
channel_identifier=channel_identifier,
partner=c1_client.address,
balance_hash=decode_hex(balance_proof_c1.balance_hash),
nonce=balance_proof_c1.nonce,
additional_hash=decode_hex(balance_proof_c1.additional_hash),
closing_signature=b"",
non_closing_signature=b"",
given_block_identifier="latest",
)
assert str(excinfo.value) == "Couldn't verify the balance proof signature"
# using invalid non-closing signature
# Usual mistake when calling update Transfer - balance proof signature is missing in the data
non_closing_data = balance_proof_c1.serialize_bin(msg_type=MessageTypeId.BALANCE_PROOF_UPDATE)
non_closing_signature = LocalSigner(c2_client.privkey).sign(data=non_closing_data)
with pytest.raises(RaidenUnrecoverableError):
c2_token_network_proxy.update_transfer(
channel_identifier=channel_identifier,
partner=c1_client.address,
balance_hash=decode_hex(balance_proof_c1.balance_hash),
nonce=balance_proof_c1.nonce,
additional_hash=decode_hex(balance_proof_c1.additional_hash),
closing_signature=decode_hex(balance_proof_c1.signature),
non_closing_signature=non_closing_signature,
given_block_identifier="latest",
)
non_closing_data = balance_proof_c1.serialize_bin(
msg_type=MessageTypeId.BALANCE_PROOF_UPDATE
) + decode_hex(balance_proof_c1.signature)
non_closing_signature = LocalSigner(c2_client.privkey).sign(data=non_closing_data)
c2_token_network_proxy.update_transfer(
channel_identifier=channel_identifier,
partner=c1_client.address,
balance_hash=decode_hex(balance_proof_c1.balance_hash),
nonce=balance_proof_c1.nonce,
additional_hash=decode_hex(balance_proof_c1.additional_hash),
closing_signature=decode_hex(balance_proof_c1.signature),
non_closing_signature=non_closing_signature,
given_block_identifier="latest",
)
with pytest.raises(BrokenPreconditionError) as exc:
c1_token_network_proxy.settle(
channel_identifier=channel_identifier,
transferred_amount=transferred_amount_c1,
locked_amount=0,
locksroot=LOCKSROOT_OF_NO_LOCKS,
partner=c2_client.address,
partner_transferred_amount=transferred_amount_c2,
partner_locked_amount=0,
partner_locksroot=LOCKSROOT_OF_NO_LOCKS,
given_block_identifier="latest",
)
assert "cannot be settled before settlement window is over" in str(exc)
c1_proxy_manager.wait_until_block(
target_block_number=c1_proxy_manager.client.block_number() + 10
)
# settling with an invalid amount
with pytest.raises(BrokenPreconditionError):
c1_token_network_proxy.settle(
channel_identifier=channel_identifier,
transferred_amount=2,
locked_amount=0,
locksroot=LOCKSROOT_OF_NO_LOCKS,
partner=c2_client.address,
partner_transferred_amount=2,
partner_locked_amount=0,
partner_locksroot=LOCKSROOT_OF_NO_LOCKS,
given_block_identifier="latest",
)
# proper settle
c1_token_network_proxy.settle(
channel_identifier=channel_identifier,
transferred_amount=transferred_amount_c1,
locked_amount=0,
locksroot=LOCKSROOT_OF_NO_LOCKS,
partner=c2_client.address,
partner_transferred_amount=transferred_amount_c2,
partner_locked_amount=0,
partner_locksroot=LOCKSROOT_OF_NO_LOCKS,
given_block_identifier="latest",
)
assert token_proxy.balance_of(c2_client.address) == (
initial_balance_c2 + transferred_amount_c1 - transferred_amount_c2
)
assert token_proxy.balance_of(c1_client.address) == (
initial_balance_c1 + transferred_amount_c2 - transferred_amount_c1
)
# Already settled
with pytest.raises(BrokenPreconditionError) as exc:
c2_token_network_proxy.set_total_deposit(
given_block_identifier="latest",
channel_identifier=channel_identifier,
total_deposit=20,
partner=c1_client.address,
)
assert "getChannelIdentifier returned 0" in str(exc)
def test_query_pruned_state(token_network_proxy, private_keys, web3, contract_manager):
"""A test for https://github.com/raiden-network/raiden/issues/3566
If pruning limit blocks pass make sure that can_query_state_for_block returns False.
"""
token_network_address = to_canonical_address(token_network_proxy.proxy.contract.address)
c1_client = JSONRPCClient(web3, private_keys[1])
c1_proxy_manager = ProxyManager(
rpc_client=c1_client,
contract_manager=contract_manager,
metadata=ProxyManagerMetadata(
token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER,
filters_start_at=GENESIS_BLOCK_NUMBER,
),
)
c2_client = JSONRPCClient(web3, private_keys[2])
c1_token_network_proxy = c1_proxy_manager.token_network(token_network_address)
# create a channel and query the state at the current block hash
channel_identifier = c1_token_network_proxy.new_netting_channel(
partner=c2_client.address, settle_timeout=10, given_block_identifier="latest"
)
block = c1_client.web3.eth.getBlock("latest")
block_number = int(block["number"])
block_hash = bytes(block["hash"])
channel_id = c1_token_network_proxy.get_channel_identifier(
participant1=c1_client.address, participant2=c2_client.address, block_identifier=block_hash
)
assert channel_id == channel_identifier
assert c1_client.can_query_state_for_block(block_hash)
# wait until state pruning kicks in
target_block = block_number + STATE_PRUNING_AFTER_BLOCKS + 1
c1_proxy_manager.wait_until_block(target_block_number=target_block)
# and now query again for the old block identifier and see we can't query
assert not c1_client.can_query_state_for_block(block_hash)
def test_token_network_actions_at_pruned_blocks(
token_network_proxy, private_keys, token_proxy, web3, chain_id, contract_manager
):
token_network_address = to_canonical_address(token_network_proxy.proxy.contract.address)
c1_client = JSONRPCClient(web3, private_keys[1])
c1_proxy_manager = ProxyManager(
rpc_client=c1_client,
contract_manager=contract_manager,
metadata=ProxyManagerMetadata(
token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER,
filters_start_at=GENESIS_BLOCK_NUMBER,
),
)
c1_token_network_proxy = c1_proxy_manager.token_network(token_network_address)
c2_client = JSONRPCClient(web3, private_keys[2])
c2_proxy_manager = ProxyManager(
rpc_client=c2_client,
contract_manager=contract_manager,
metadata=ProxyManagerMetadata(
token_network_registry_deployed_at=GENESIS_BLOCK_NUMBER,
filters_start_at=GENESIS_BLOCK_NUMBER,
),
)
c2_token_network_proxy = c2_proxy_manager.token_network(token_network_address)
initial_token_balance = 100
token_proxy.transfer(c1_client.address, initial_token_balance)
token_proxy.transfer(c2_client.address, initial_token_balance)
initial_balance_c1 = token_proxy.balance_of(c1_client.address)
assert initial_balance_c1 == initial_token_balance
initial_balance_c2 = token_proxy.balance_of(c2_client.address)
assert initial_balance_c2 == initial_token_balance
# create a channel
settle_timeout = STATE_PRUNING_AFTER_BLOCKS + 10
channel_identifier = c1_token_network_proxy.new_netting_channel(
partner=c2_client.address, settle_timeout=settle_timeout, given_block_identifier="latest"
)
# Now wait until this block becomes pruned
pruned_number = c1_proxy_manager.client.block_number()
c1_proxy_manager.wait_until_block(
target_block_number=pruned_number + STATE_PRUNING_AFTER_BLOCKS
)
# deposit with given block being pruned
c1_token_network_proxy.set_total_deposit(
given_block_identifier=pruned_number,
channel_identifier=channel_identifier,
total_deposit=2,
partner=c2_client.address,
)
# balance proof signed by c1
transferred_amount_c1 = 1
balance_proof_c1 = BalanceProof(
channel_identifier=channel_identifier,
token_network_address=to_checksum_address(token_network_address),
nonce=1,
chain_id=chain_id,
transferred_amount=transferred_amount_c1,
)
balance_proof_c1.signature = encode_hex(
LocalSigner(private_keys[1]).sign(data=balance_proof_c1.serialize_bin())
)
non_closing_data = balance_proof_c1.serialize_bin(
msg_type=MessageTypeId.BALANCE_PROOF_UPDATE
) + decode_hex(balance_proof_c1.signature)
non_closing_signature = LocalSigner(c2_client.privkey).sign(data=non_closing_data)
# close channel with given block being pruned
empty_balance_proof = BalanceProof(
channel_identifier=channel_identifier,
token_network_address=c1_token_network_proxy.address,
balance_hash=encode_hex(EMPTY_BALANCE_HASH),
nonce=0,
chain_id=chain_id,
transferred_amount=0,
)
closing_data = (
empty_balance_proof.serialize_bin(msg_type=MessageTypeId.BALANCE_PROOF) + EMPTY_SIGNATURE
)
c1_token_network_proxy.close(
channel_identifier=channel_identifier,
partner=c2_client.address,
balance_hash=EMPTY_HASH,
nonce=0,
additional_hash=EMPTY_HASH,
non_closing_signature=EMPTY_SIGNATURE,
closing_signature=LocalSigner(c1_client.privkey).sign(data=closing_data),
given_block_identifier=pruned_number,
)
close_pruned_number = c1_proxy_manager.client.block_number()
assert (
c1_token_network_proxy.channel_is_closed(
participant1=c1_client.address,
participant2=c2_client.address,
block_identifier="latest",
channel_identifier=channel_identifier,
)
is True
)
assert (
c1_token_network_proxy.get_channel_identifier_or_none(
participant1=c1_client.address,
participant2=c2_client.address,
block_identifier="latest",
)
is not None
)
c1_proxy_manager.wait_until_block(
target_block_number=close_pruned_number + STATE_PRUNING_AFTER_BLOCKS
)
# update transfer with given block being pruned
c2_token_network_proxy.update_transfer(
channel_identifier=channel_identifier,
partner=c1_client.address,
balance_hash=decode_hex(balance_proof_c1.balance_hash),
nonce=balance_proof_c1.nonce,
additional_hash=decode_hex(balance_proof_c1.additional_hash),
closing_signature=decode_hex(balance_proof_c1.signature),
non_closing_signature=non_closing_signature,
given_block_identifier=close_pruned_number,
)
# update transfer
c1_proxy_manager.wait_until_block(target_block_number=close_pruned_number + settle_timeout)
# Test that settling will fail because at closed_pruned_number
# the settlement period isn't over.
with pytest.raises(BrokenPreconditionError):
c1_token_network_proxy.settle(
channel_identifier=channel_identifier,
transferred_amount=transferred_amount_c1,
locked_amount=0,
locksroot=LOCKSROOT_OF_NO_LOCKS,
partner=c2_client.address,
partner_transferred_amount=0,
partner_locked_amount=0,
partner_locksroot=LOCKSROOT_OF_NO_LOCKS,
given_block_identifier=close_pruned_number,
)
settle_block_number = close_pruned_number + settle_timeout
# Wait until the settle block is pruned
c1_proxy_manager.wait_until_block(
target_block_number=settle_block_number + STATE_PRUNING_AFTER_BLOCKS + 1
)
c1_token_network_proxy.settle(
channel_identifier=channel_identifier,
transferred_amount=transferred_amount_c1,
locked_amount=0,
locksroot=LOCKSROOT_OF_NO_LOCKS,
partner=c2_client.address,
partner_transferred_amount=0,
partner_locked_amount=0,
partner_locksroot=LOCKSROOT_OF_NO_LOCKS,
# Settle is block number is pruned, we should not fail at pre-conditions.
given_block_identifier=settle_block_number,
)
assert token_proxy.balance_of(c2_client.address) == (
initial_balance_c2 + transferred_amount_c1 - 0
)
assert token_proxy.balance_of(c1_client.address) == (
initial_balance_c1 + 0 - transferred_amount_c1
)
| 38.701299
| 99
| 0.713563
| 4,158
| 35,760
| 5.720539
| 0.06734
| 0.062558
| 0.057891
| 0.038342
| 0.834777
| 0.815017
| 0.792819
| 0.778063
| 0.752165
| 0.735853
| 0
| 0.01624
| 0.221672
| 35,760
| 923
| 100
| 38.743229
| 0.838357
| 0.036857
| 0
| 0.670823
| 0
| 0
| 0.059485
| 0.001279
| 0
| 0
| 0
| 0
| 0.046135
| 1
| 0.006234
| false
| 0
| 0.014963
| 0
| 0.021197
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
381ba5e541cfd6a27d5aa92f55bba113860ae269
| 4,306
|
py
|
Python
|
desktop/core/ext-py/py4j-0.9/src/py4j/finalizer.py
|
kokosing/hue
|
2307f5379a35aae9be871e836432e6f45138b3d9
|
[
"Apache-2.0"
] | 5,079
|
2015-01-01T03:39:46.000Z
|
2022-03-31T07:38:22.000Z
|
desktop/core/ext-py/py4j-0.9/src/py4j/finalizer.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 1,623
|
2015-01-01T08:06:24.000Z
|
2022-03-30T19:48:52.000Z
|
desktop/core/ext-py/py4j-0.9/src/py4j/finalizer.py
|
zks888/hue
|
93a8c370713e70b216c428caa2f75185ef809deb
|
[
"Apache-2.0"
] | 2,033
|
2015-01-04T07:18:02.000Z
|
2022-03-28T19:55:47.000Z
|
# -*- coding: UTF-8 -*-
"""
Module that defines a Finalizer class responsible for registering and cleaning
finalizer
Created on Mar 7, 2010
:author: Barthelemy Dagenais
"""
from __future__ import unicode_literals, absolute_import
from threading import RLock
from py4j.compat import items
class ThreadSafeFinalizer(object):
"""A `ThreadSafeFinalizer` is a global class used to register weak
reference finalizers (i.e., a weak reference with a callback).
This class is useful when one wants to register a finalizer of an object
with circular references. The finalizer of an object with circular
references might never be called if the object's finalizer is kept by the
same object.
For example, if object A refers to B and B refers to A, A should not keep a
weak reference to itself.
`ThreadSafeFinalizer` is thread-safe and uses reentrant lock on each
operation."""
finalizers = {}
lock = RLock()
@classmethod
def add_finalizer(cls, id, weak_ref):
"""Registers a finalizer with an id.
:param id: The id of the object referenced by the weak reference.
:param weak_ref: The weak reference to register.
"""
with cls.lock:
cls.finalizers[id] = weak_ref
@classmethod
def remove_finalizer(cls, id):
"""Removes a finalizer associated with this id.
:param id: The id of the object for which the finalizer will be
deleted.
"""
with cls.lock:
cls.finalizers.pop(id, None)
@classmethod
def clear_finalizers(cls, clear_all=False):
"""Removes all registered finalizers.
:param clear_all: If `True`, all finalizers are deleted. Otherwise,
only the finalizers from an empty weak reference are deleted
(i.e., weak references pointing to inexistent objects).
"""
with cls.lock:
if clear_all:
cls.finalizers.clear()
else:
for id, ref in items(cls.finalizers):
if ref() is None:
cls.finalizers.pop(id, None)
class Finalizer(object):
"""A `Finalizer` is a global class used to register weak reference finalizers
(i.e., a weak reference with a callback).
This class is useful when one wants to register a finalizer of an object
with circular references. The finalizer of an object with circular
references might never be called if the object's finalizer is kept by the
same object.
For example, if object A refers to B and B refers to A, A should not keep a
weak reference to itself.
`Finalizer` is not thread-safe and should only be used by single-threaded
programs."""
finalizers = {}
@classmethod
def add_finalizer(cls, id, weak_ref):
"""Registers a finalizer with an id.
:param id: The id of the object referenced by the weak reference.
:param weak_ref: The weak reference to register.
"""
cls.finalizers[id] = weak_ref
@classmethod
def remove_finalizer(cls, id):
"""Removes a finalizer associated with this id.
:param id: The id of the object for which the finalizer will be
deleted.
"""
cls.finalizers.pop(id, None)
@classmethod
def clear_finalizers(cls, clear_all=False):
"""Removes all registered finalizers.
:param clear_all: If `True`, all finalizers are deleted. Otherwise,
only the finalizers from an empty weak reference are deleted (i.e.,
weak references pointing to inexistent objects).
"""
if clear_all:
cls.finalizers.clear()
else:
for id, ref in items(cls.finalizers):
if ref() is None:
cls.finalizers.pop(id, None)
def clear_finalizers(clear_all=False):
"""Removes all registered finalizers in :class:`ThreadSafeFinalizer` and
:class:`Finalizer`.
:param clear_all: If `True`, all finalizers are deleted. Otherwise, only
the finalizers from an empty weak reference are deleted (i.e., weak
references pointing to inexistent objects).
"""
ThreadSafeFinalizer.clear_finalizers(clear_all)
Finalizer.clear_finalizers(clear_all)
| 31.896296
| 81
| 0.65281
| 569
| 4,306
| 4.885765
| 0.200351
| 0.060791
| 0.020144
| 0.027338
| 0.780576
| 0.772662
| 0.772662
| 0.757194
| 0.757194
| 0.757194
| 0
| 0.002247
| 0.276591
| 4,306
| 134
| 82
| 32.134328
| 0.890209
| 0.586856
| 0
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.071429
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
382086fa44cd4df6a6f8e481f543377adb1187cb
| 179
|
py
|
Python
|
comvex/efficientnet_v2/__init__.py
|
shrenik-jain/ComVEX
|
93622de3a4771cda13b14f8bba52990eb47c2409
|
[
"Apache-2.0"
] | 29
|
2021-06-14T08:27:43.000Z
|
2022-02-07T13:40:27.000Z
|
comvex/efficientnet_v2/__init__.py
|
shrenik-jain/ComVEX
|
93622de3a4771cda13b14f8bba52990eb47c2409
|
[
"Apache-2.0"
] | 3
|
2021-11-23T16:11:51.000Z
|
2021-12-21T17:24:36.000Z
|
comvex/efficientnet_v2/__init__.py
|
shrenik-jain/ComVEX
|
93622de3a4771cda13b14f8bba52990eb47c2409
|
[
"Apache-2.0"
] | 3
|
2021-06-27T08:18:57.000Z
|
2021-12-17T07:29:59.000Z
|
from .model import EfficientNetV2Base, FusedMBConvXd, EfficientNetV2Backbone, EfficientNetV2WithLinearClassifier
from .config import EfficientNetV2BaseConfig, EfficientNetV2Config
| 89.5
| 112
| 0.905028
| 12
| 179
| 13.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029762
| 0.061453
| 179
| 2
| 113
| 89.5
| 0.934524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
69f5672a6f104a8033f436835623b729cbdc4343
| 35
|
py
|
Python
|
JudgeLight/__init__.py
|
MeiK-h/JudgeLight
|
bebf37fbea7f259b7ea4a3b0ecad146b74f40abb
|
[
"MIT"
] | 5
|
2017-07-26T03:28:50.000Z
|
2017-08-04T09:17:54.000Z
|
JudgeLight/__init__.py
|
MeiK-h/JudgeLight
|
bebf37fbea7f259b7ea4a3b0ecad146b74f40abb
|
[
"MIT"
] | null | null | null |
JudgeLight/__init__.py
|
MeiK-h/JudgeLight
|
bebf37fbea7f259b7ea4a3b0ecad146b74f40abb
|
[
"MIT"
] | 1
|
2020-10-08T16:10:38.000Z
|
2020-10-08T16:10:38.000Z
|
from .JudgeLight import JudgeLight
| 17.5
| 34
| 0.857143
| 4
| 35
| 7.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0e088649cfcd3588b2ce397e4f772541749e43de
| 164
|
py
|
Python
|
account/admin.py
|
vinaykakkad/DIM
|
9096e3703b0edda85914e4b25092670dd44e6d99
|
[
"MIT"
] | 2
|
2021-06-02T03:54:23.000Z
|
2021-12-17T04:49:08.000Z
|
account/admin.py
|
vinaykakkad/DIM
|
9096e3703b0edda85914e4b25092670dd44e6d99
|
[
"MIT"
] | 2
|
2021-06-02T03:49:39.000Z
|
2021-09-22T18:43:45.000Z
|
account/admin.py
|
vinaykakkad/DIM
|
9096e3703b0edda85914e4b25092670dd44e6d99
|
[
"MIT"
] | 1
|
2021-07-21T16:07:07.000Z
|
2021-07-21T16:07:07.000Z
|
from django.contrib import admin
from .models import Account, Profile, Skill
admin.site.register(Account)
admin.site.register(Profile)
admin.site.register(Skill)
| 20.5
| 43
| 0.810976
| 23
| 164
| 5.782609
| 0.478261
| 0.203008
| 0.383459
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091463
| 164
| 7
| 44
| 23.428571
| 0.892617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
3892357d3f5dd8868b47894e96efbf04d12a904a
| 2,856
|
py
|
Python
|
tests/test_sample_index_helper.py
|
mozjay0619/fast-scboot
|
c46bcb0ca5aa066fce6396bf2ee0c6b27d26acfb
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_sample_index_helper.py
|
mozjay0619/fast-scboot
|
c46bcb0ca5aa066fce6396bf2ee0c6b27d26acfb
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_sample_index_helper.py
|
mozjay0619/fast-scboot
|
c46bcb0ca5aa066fce6396bf2ee0c6b27d26acfb
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest
from src.fast_scboot.c.sample_index_helper import count_clusts, make_index_matrix
# def test_make_index_matrix():
# strat_array = np.asarray([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
# clust_array = np.asarray([0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 6])
# clust_val = np.asarray([0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 6])
# array = np.squeeze(np.dstack([strat_array, clust_array, clust_val])).astype(np.int32)
# result = make_index_matrix(array, 7)
# answer = np.asarray(
# [
# [0, 0, 0, 0, 1],
# [0, 1, 1, 1, 2],
# [1, 2, 2, 3, 1],
# [1, 3, 3, 4, 1],
# [1, 4, 4, 5, 2],
# [2, 5, 5, 7, 1],
# [2, 6, 6, 8, 3],
# ]
# )
# assert np.all(np.isclose(result, answer))
# strat_array = np.asarray([0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
# clust_array = np.asarray([0, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7])
# array = np.squeeze(np.dstack([strat_array, clust_array, clust_array])).astype(np.int32)
# result = make_index_matrix(array, 8)
# answer = np.asarray(
# [
# [0, 0, 0, 0, 1],
# [0, 1, 1, 1, 2],
# [1, 2, 2, 3, 1],
# [1, 3, 3, 4, 1],
# [1, 4, 4, 5, 2],
# [2, 5, 5, 7, 1],
# [2, 6, 6, 8, 2],
# [2, 7, 7, 10, 1],
# ]
# )
# assert np.all(np.isclose(result, answer))
def test_count_clust_array():
strat_array = np.asarray([0, 0, 1, 1, 1]).astype(np.int32)
clust_array = np.asarray([0, 1, 2, 2, 3]).astype(np.int32)
result = count_clusts(strat_array, clust_array, 2, len(clust_array))
answer = np.asarray([2, 2])
assert np.all(np.isclose(result, answer))
strat_array = np.asarray([0, 0, 1, 1, 1]).astype(np.int32)
clust_array = np.asarray([0, 1, 2, 3, 4]).astype(np.int32)
result = count_clusts(strat_array, clust_array, 2, len(clust_array))
answer = np.asarray([2, 3])
assert np.all(np.isclose(result, answer))
strat_array = np.asarray([0, 0, 1, 1, 2]).astype(np.int32)
clust_array = np.asarray([0, 1, 2, 3, 4]).astype(np.int32)
result = count_clusts(strat_array, clust_array, 3, len(clust_array))
answer = np.asarray([2, 2, 1])
assert np.all(np.isclose(result, answer))
strat_array = np.asarray([0, 0, 1, 1, 1]).astype(np.int32)
clust_array = np.asarray([0, 1, 3, 3, 3]).astype(np.int32)
result = count_clusts(strat_array, clust_array, 2, len(clust_array))
answer = np.asarray([2, 1])
assert np.all(np.isclose(result, answer))
strat_array = np.arange(5).astype(np.int32)
clust_array = np.arange(5).astype(np.int32)
result = count_clusts(strat_array, clust_array, 5, len(clust_array))
answer = np.asarray([1, 1, 1, 1, 1])
assert np.all(np.isclose(result, answer))
| 29.443299
| 93
| 0.545868
| 471
| 2,856
| 3.18896
| 0.095541
| 0.037284
| 0.099867
| 0.11984
| 0.88482
| 0.88482
| 0.86285
| 0.840213
| 0.724368
| 0.724368
| 0
| 0.104057
| 0.266457
| 2,856
| 96
| 94
| 29.75
| 0.612888
| 0.434524
| 0
| 0.448276
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 1
| 0.034483
| false
| 0
| 0.103448
| 0
| 0.137931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
389fd118f909ba88f3daefab2818d0375297fdbc
| 25,830
|
py
|
Python
|
tests/test_config_flow.py
|
mattsch/tesla
|
ecb376539e2046692bbce4374fd541fc22790979
|
[
"Apache-2.0"
] | null | null | null |
tests/test_config_flow.py
|
mattsch/tesla
|
ecb376539e2046692bbce4374fd541fc22790979
|
[
"Apache-2.0"
] | null | null | null |
tests/test_config_flow.py
|
mattsch/tesla
|
ecb376539e2046692bbce4374fd541fc22790979
|
[
"Apache-2.0"
] | null | null | null |
"""Test the Tesla config flow."""
import datetime
from unittest.mock import AsyncMock, patch
from aiohttp import web
from homeassistant import config_entries, data_entry_flow
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_TOKEN,
CONF_USERNAME,
HTTP_NOT_FOUND,
HTTP_UNAUTHORIZED,
)
from homeassistant.data_entry_flow import UnknownFlow
from homeassistant.helpers.network import NoURLAvailableError
from homeassistant.setup import async_setup_component
import pytest
from pytest_homeassistant_custom_component.common import MockConfigEntry
from teslajsonpy import TeslaException
import voluptuous as vol
from yarl import URL
from custom_components.tesla_custom.config_flow import (
TeslaAuthorizationCallbackView,
TeslaAuthorizationProxyView,
validate_input,
)
from custom_components.tesla_custom.const import (
AUTH_CALLBACK_PATH,
AUTH_PROXY_PATH,
CONF_EXPIRATION,
CONF_WAKE_ON_START,
DEFAULT_SCAN_INTERVAL,
DEFAULT_WAKE_ON_START,
DOMAIN,
ERROR_URL_NOT_DETECTED,
MIN_SCAN_INTERVAL,
)
HA_URL = "https://homeassistant.com"
TEST_USERNAME = "test-username"
TEST_TOKEN = "test-token"
TEST_ACCESS_TOKEN = "test-access-token"
TEST_VALID_EXPIRATION = datetime.datetime.now().timestamp() * 2
TEST_INVALID_EXPIRATION = 0
# pytestmark = pytest.mark.skip(reason="unable to override core component")
# @pytest.mark.skip(reason="unable to override core component")
async def test_warning_form(hass):
"""Test we get the warning form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
# "type": RESULT_TYPE_FORM,
# "flow_id": self.flow_id,
# "handler": self.handler,
# "step_id": step_id,
# "data_schema": data_schema,
# "errors": errors,
# "description_placeholders": description_placeholders,
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["handler"] == DOMAIN
assert result["step_id"] == "user"
assert result["data_schema"] == vol.Schema({})
assert result["errors"] == {}
assert result["description_placeholders"] == {}
return result
# @pytest.mark.skip(reason="unable to override core component")
async def test_reauth_warning_form(hass):
"""Test we get the warning form on reauth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_REAUTH}
)
# "type": RESULT_TYPE_FORM,
# "flow_id": self.flow_id,
# "handler": self.handler,
# "step_id": step_id,
# "data_schema": data_schema,
# "errors": errors,
# "description_placeholders": description_placeholders,
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["handler"] == DOMAIN
assert result["step_id"] == "user"
assert result["data_schema"] == vol.Schema({})
assert result["errors"] == {}
assert result["description_placeholders"] == {}
return result
@pytest.mark.skip(reason="hass fixture does not support http views")
async def test_external_url(hass):
"""Test we get the external url after submitting once."""
result = await test_warning_form(hass)
flow_id = result["flow_id"]
with patch(
"custom_components.tesla_custom.config_flow.get_url",
return_value=HA_URL,
):
result = await hass.config_entries.flow.async_configure(
flow_id,
user_input={},
)
# "type": RESULT_TYPE_EXTERNAL_STEP,
# "flow_id": self.flow_id,
# "handler": self.handler,
# "step_id": step_id,
# "url": url,
# "description_placeholders": description_placeholders,
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["flow_id"] == flow_id
assert result["handler"] == DOMAIN
assert result["step_id"] == "check_proxy"
callback_url: str = str(
URL(HA_URL).with_path(AUTH_CALLBACK_PATH).with_query({"flow_id": flow_id})
)
assert result["url"] == str(
URL(HA_URL)
.with_path(AUTH_PROXY_PATH)
.with_query({"config_flow_id": flow_id, "callback_url": callback_url})
)
assert result["description_placeholders"] is None
return result
# @pytest.mark.skip(reason="unable to override core component")
async def test_external_url_no_hass_url_exception(hass):
"""Test we handle case with no detectable hass external url."""
result = await test_warning_form(hass)
flow_id = result["flow_id"]
with patch(
"custom_components.tesla_custom.config_flow.get_url",
side_effect=NoURLAvailableError,
):
result = await hass.config_entries.flow.async_configure(
flow_id,
user_input={},
)
# "type": RESULT_TYPE_EXTERNAL_STEP,
# "flow_id": self.flow_id,
# "handler": self.handler,
# "step_id": step_id,
# "url": url,
# "description_placeholders": description_placeholders,
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["handler"] == DOMAIN
assert result["step_id"] == "user"
assert result["data_schema"] == vol.Schema({})
assert result["errors"] == {"base": ERROR_URL_NOT_DETECTED}
assert result["description_placeholders"] == {}
@pytest.mark.skip(reason="hass fixture does not support http views")
async def test_external_url_callback(hass):
"""Test we get the processing of callback_url."""
result = await test_external_url(hass)
flow_id = result["flow_id"]
result = await hass.config_entries.flow.async_configure(
flow_id=flow_id,
user_input={
CONF_USERNAME: TEST_USERNAME,
CONF_TOKEN: TEST_TOKEN,
CONF_ACCESS_TOKEN: TEST_ACCESS_TOKEN,
CONF_EXPIRATION: TEST_VALID_EXPIRATION,
},
)
# "type": RESULT_TYPE_EXTERNAL_STEP_DONE,
# "flow_id": self.flow_id,
# "handler": self.handler,
# "step_id": next_step_id,
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP_DONE
assert result["flow_id"] == flow_id
assert result["handler"] == DOMAIN
assert result["step_id"] == "finish_oauth"
return result
@pytest.mark.skip(reason="hass fixture does not support http views")
async def test_finish_oauth(hass):
"""Test config entry after finishing oauth."""
result = await test_external_url_callback(hass)
flow_id = result["flow_id"]
with patch(
"custom_components.tesla_custom.config_flow.TeslaAPI.connect",
return_value={
"refresh_token": TEST_TOKEN,
CONF_ACCESS_TOKEN: TEST_ACCESS_TOKEN,
CONF_EXPIRATION: TEST_VALID_EXPIRATION,
},
):
result = await hass.config_entries.flow.async_configure(
flow_id=flow_id,
user_input={},
)
# "version": self.VERSION,
# "type": RESULT_TYPE_CREATE_ENTRY,
# "flow_id": self.flow_id,
# "handler": self.handler,
# "title": title,
# "data": data,
# "description": description,
# "description_placeholders": description_placeholders,
assert result["version"] == 1
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["flow_id"] == flow_id
assert result["handler"] == DOMAIN
assert result["title"] == TEST_USERNAME
assert result["data"] == {
CONF_TOKEN: TEST_TOKEN,
CONF_ACCESS_TOKEN: TEST_ACCESS_TOKEN,
CONF_EXPIRATION: TEST_VALID_EXPIRATION,
}
assert result["description"] is None
assert result["description_placeholders"] is None
return result
@pytest.mark.skip(reason="hass fixture does not support http views")
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth error."""
result = await test_external_url_callback(hass)
flow_id = result["flow_id"]
with patch(
"custom_components.tesla_custom.config_flow.TeslaAPI.connect",
side_effect=TeslaException(code=HTTP_UNAUTHORIZED),
):
result = await hass.config_entries.flow.async_configure(
flow_id=flow_id,
user_input={},
)
# "type": RESULT_TYPE_ABORT,
# "flow_id": flow_id,
# "handler": handler,
# "reason": reason,
# "description_placeholders": description_placeholders,
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["flow_id"] == flow_id
assert result["handler"] == DOMAIN
assert result["reason"] == "invalid_auth"
assert result["description_placeholders"] is None
@pytest.mark.skip(reason="hass fixture does not support http views")
async def test_form_login_failed(hass):
"""Test we handle invalid auth error."""
result = await test_external_url_callback(hass)
flow_id = result["flow_id"]
with patch(
"custom_components.tesla_custom.config_flow.TeslaAPI.connect",
return_value={},
):
result = await hass.config_entries.flow.async_configure(
flow_id=flow_id,
user_input={},
)
# "type": RESULT_TYPE_ABORT,
# "flow_id": flow_id,
# "handler": handler,
# "reason": reason,
# "description_placeholders": description_placeholders,
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["flow_id"] == flow_id
assert result["handler"] == DOMAIN
assert result["reason"] == "login_failed"
assert result["description_placeholders"] is None
@pytest.mark.skip(reason="hass fixture does not support http views")
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await test_external_url_callback(hass)
flow_id = result["flow_id"]
with patch(
"custom_components.tesla_custom.config_flow.TeslaAPI.connect",
side_effect=TeslaException(code=HTTP_NOT_FOUND),
):
result = await hass.config_entries.flow.async_configure(
flow_id=flow_id,
user_input={},
)
# "type": RESULT_TYPE_ABORT,
# "flow_id": flow_id,
# "handler": handler,
# "reason": reason,
# "description_placeholders": description_placeholders,
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["flow_id"] == flow_id
assert result["handler"] == DOMAIN
assert result["reason"] == "cannot_connect"
assert result["description_placeholders"] is None
@pytest.mark.skip(reason="hass fixture does not support http views")
async def test_form_repeat_identifier(hass):
"""Test we handle repeat identifiers.
Repeats are identified if the title and tokens are identical. Otherwise they are
replaced.
"""
entry = MockConfigEntry(
domain=DOMAIN,
title=TEST_USERNAME,
data={
CONF_TOKEN: TEST_TOKEN,
CONF_ACCESS_TOKEN: TEST_ACCESS_TOKEN,
CONF_EXPIRATION: TEST_VALID_EXPIRATION,
},
options=None,
)
entry.add_to_hass(hass)
result = await test_external_url_callback(hass)
flow_id = result["flow_id"]
with patch(
"custom_components.tesla_custom.config_flow.TeslaAPI.connect",
return_value={
"refresh_token": TEST_TOKEN,
CONF_ACCESS_TOKEN: TEST_ACCESS_TOKEN,
CONF_EXPIRATION: TEST_VALID_EXPIRATION,
},
):
result = await hass.config_entries.flow.async_configure(
flow_id=flow_id,
user_input={},
)
# "type": RESULT_TYPE_ABORT,
# "flow_id": flow_id,
# "handler": handler,
# "reason": reason,
# "description_placeholders": description_placeholders,
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["flow_id"] == flow_id
assert result["handler"] == DOMAIN
assert result["reason"] == "already_configured"
assert result["description_placeholders"] is None
@pytest.mark.skip(reason="hass fixture does not support http views")
async def test_form_second_identifier(hass):
"""Test we can create another entry with a different name.
Repeats are identified if the title and tokens are identical. Otherwise they are
replaced.
"""
entry = MockConfigEntry(
domain=DOMAIN,
title="OTHER_USERNAME",
data={
CONF_TOKEN: TEST_TOKEN,
CONF_ACCESS_TOKEN: TEST_ACCESS_TOKEN,
CONF_EXPIRATION: TEST_VALID_EXPIRATION,
},
options=None,
)
entry.add_to_hass(hass)
await test_finish_oauth(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 2
@pytest.mark.skip(reason="hass fixture does not support http views")
async def test_form_reauth(hass):
"""Test we handle reauth."""
entry = MockConfigEntry(
domain=DOMAIN,
title=TEST_USERNAME,
data={
CONF_TOKEN: TEST_TOKEN,
CONF_ACCESS_TOKEN: TEST_ACCESS_TOKEN,
CONF_EXPIRATION: TEST_INVALID_EXPIRATION,
},
options=None,
)
entry.add_to_hass(hass)
result = await test_external_url_callback(hass)
flow_id = result["flow_id"]
with patch(
"custom_components.tesla_custom.config_flow.TeslaAPI.connect",
return_value={
"refresh_token": TEST_TOKEN,
CONF_ACCESS_TOKEN: TEST_ACCESS_TOKEN,
CONF_EXPIRATION: TEST_VALID_EXPIRATION,
},
):
result = await hass.config_entries.flow.async_configure(
flow_id=flow_id,
user_input={
# CONF_TOKEN: TEST_TOKEN,
# CONF_ACCESS_TOKEN: TEST_ACCESS_TOKEN,
# CONF_EXPIRATION: TEST_VALID_EXPIRATION,
},
)
# "type": RESULT_TYPE_ABORT,
# "flow_id": flow_id,
# "handler": handler,
# "reason": reason,
# "description_placeholders": description_placeholders,
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["flow_id"] == flow_id
assert result["handler"] == DOMAIN
assert result["reason"] == "reauth_successful"
assert result["description_placeholders"] is None
# @pytest.mark.skip(reason="unable to override core component")
async def test_import(hass):
"""Test import step results in warning form."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_PASSWORD: "test-password", CONF_USERNAME: "test-username"},
)
# "type": RESULT_TYPE_FORM,
# "flow_id": self.flow_id,
# "handler": self.handler,
# "step_id": step_id,
# "data_schema": data_schema,
# "errors": errors,
# "description_placeholders": description_placeholders,
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["data_schema"] == vol.Schema({})
assert result["description_placeholders"] == {}
async def test_option_flow(hass):
"""Test config flow options."""
entry = MockConfigEntry(domain=DOMAIN, data={}, options=None)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_SCAN_INTERVAL: 350, CONF_WAKE_ON_START: True},
)
assert result["type"] == "create_entry"
assert result["data"] == {CONF_SCAN_INTERVAL: 350, CONF_WAKE_ON_START: True}
async def test_option_flow_defaults(hass):
"""Test config flow options."""
entry = MockConfigEntry(domain=DOMAIN, data={}, options=None)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == "create_entry"
assert result["data"] == {
CONF_SCAN_INTERVAL: DEFAULT_SCAN_INTERVAL,
CONF_WAKE_ON_START: DEFAULT_WAKE_ON_START,
}
async def test_option_flow_input_floor(hass):
"""Test config flow options."""
entry = MockConfigEntry(domain=DOMAIN, data={}, options=None)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == "form"
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_SCAN_INTERVAL: 1}
)
assert result["type"] == "create_entry"
assert result["data"] == {
CONF_SCAN_INTERVAL: MIN_SCAN_INTERVAL,
CONF_WAKE_ON_START: DEFAULT_WAKE_ON_START,
}
@pytest.fixture
async def callback_view(hass):
"""Generate registered callback_view fixture."""
await async_setup_component(hass, DOMAIN, {})
await hass.async_start()
hass.http.register_view(TeslaAuthorizationCallbackView)
return proxy_view
@pytest.mark.skip(reason="unable to override core component")
async def test_callback_view_invalid_query(hass, aiohttp_client, callback_view):
"""Test callback view with invalid query."""
client = await aiohttp_client(hass.http.app)
resp = await client.get(AUTH_CALLBACK_PATH)
assert resp.status == 400
resp = await client.get(
AUTH_CALLBACK_PATH, params={"api_password": "test-password"}
)
assert resp.status == 400
# https://alandtse-test.duckdns.org/auth/tesla/callback?flow_id=7c0bdd32efca42c9bc8ce9c27f431f12&code=67443912fda4a307767a47081c55085650db40069aabd293da57185719c2&username=alandtse@gmail.com&domain=auth.tesla.com
resp = await client.get(AUTH_CALLBACK_PATH, params={"flow_id": 1234})
assert resp.status == 400
with patch(
"custom_components.tesla_custom.async_setup_entry", side_effect=KeyError
):
resp = await client.get(AUTH_CALLBACK_PATH, params={"flow_id": 1234})
assert resp.status == 400
@pytest.mark.skip(reason="unable to override core component")
async def test_callback_view_keyerror(hass, aiohttp_client, callback_view):
"""Test callback view with keyerror."""
client = await aiohttp_client(hass.http.app)
with patch(
"custom_components.tesla_custom.async_setup_entry", side_effect=KeyError
):
resp = await client.get(AUTH_CALLBACK_PATH, params={"flow_id": 1234})
assert resp.status == 400
@pytest.mark.skip(reason="unable to override core component")
async def test_callback_view_unknownflow(hass, aiohttp_client, callback_view):
"""Test callback view with unknownflow."""
client = await aiohttp_client(hass.http.app)
with patch(
"custom_components.tesla_custom.async_setup_entry", side_effect=UnknownFlow
):
resp = await client.get(AUTH_CALLBACK_PATH, params={"flow_id": 1234})
assert resp.status == 400
@pytest.mark.skip(reason="unable to override core component")
async def test_callback_view_success(hass, aiohttp_client, callback_view):
"""Test callback view with success response."""
result = await test_external_url(hass)
flow_id = result["flow_id"]
client = await aiohttp_client(hass.http.app)
with patch("custom_components.tesla_custom.async_setup_entry", return_value=True):
resp = await client.get(AUTH_CALLBACK_PATH, params={"flow_id": flow_id})
assert resp.status == 200
assert (
"<script>window.close()</script>Success! This window can be closed"
in await resp.text()
)
@pytest.fixture
async def proxy_view(hass):
"""Generate registered proxy_view fixture."""
await async_setup_component(hass, DOMAIN, {})
await hass.async_start()
mock_handler = AsyncMock(return_value=web.Response(text="Success"))
proxy_view = TeslaAuthorizationProxyView(mock_handler)
hass.http.register_view(proxy_view)
return proxy_view
@pytest.fixture
async def proxy_view_with_flow(hass, proxy_view):
"""Generate registered proxy_view fixture with running flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
flow_id = result["flow_id"]
return flow_id
@pytest.mark.skip(reason="unable to override core component")
async def test_proxy_view_invalid_auth(hass, aiohttp_client, proxy_view):
"""Test proxy view request results in auth error."""
client = await aiohttp_client(hass.http.app)
for method in ("get", "post", "delete", "put", "patch", "head", "options"):
resp = await getattr(client, method)(AUTH_PROXY_PATH)
assert resp.status in [403, 401]
@pytest.mark.skip(reason="unable to override core component")
async def test_proxy_view_valid_auth_get(hass, aiohttp_client, proxy_view_with_flow):
"""Test proxy view get request results in valid response."""
flow_id = proxy_view_with_flow
client = await aiohttp_client(hass.http.app)
resp = await client.get(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 200
@pytest.mark.skip(reason="unable to override core component")
async def test_proxy_view_valid_auth_post(hass, aiohttp_client, proxy_view_with_flow):
"""Test proxy view post request results in valid response."""
flow_id = proxy_view_with_flow
client = await aiohttp_client(hass.http.app)
resp = await client.post(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 200
@pytest.mark.skip(reason="unable to override core component")
async def test_proxy_view_valid_auth_delete(hass, aiohttp_client, proxy_view_with_flow):
"""Test proxy view delete request results in valid response."""
flow_id = proxy_view_with_flow
client = await aiohttp_client(hass.http.app)
resp = await client.delete(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 200
@pytest.mark.skip(reason="unable to override core component")
async def test_proxy_view_valid_auth_put(hass, aiohttp_client, proxy_view_with_flow):
"""Test proxy view put request results in valid response."""
flow_id = proxy_view_with_flow
client = await aiohttp_client(hass.http.app)
resp = await client.put(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 200
@pytest.mark.skip(reason="unable to override core component")
async def test_proxy_view_valid_auth_patch(hass, aiohttp_client, proxy_view_with_flow):
"""Test proxy view patch request results in valid response."""
flow_id = proxy_view_with_flow
client = await aiohttp_client(hass.http.app)
resp = await client.patch(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 200
@pytest.mark.skip(reason="unable to override core component")
async def test_proxy_view_valid_auth_head(hass, aiohttp_client, proxy_view_with_flow):
"""Test proxy view head request results in valid response."""
flow_id = proxy_view_with_flow
client = await aiohttp_client(hass.http.app)
resp = await client.head(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 200
@pytest.mark.skip(reason="unable to override core component")
async def test_proxy_view_valid_auth_options(
hass, aiohttp_client, proxy_view_with_flow
):
"""Test proxy view options request results in valid response."""
flow_id = proxy_view_with_flow
client = await aiohttp_client(hass.http.app)
resp = await client.options(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 403
@pytest.mark.skip(reason="unable to override core component")
async def test_proxy_view_invalid_auth_after_reset(
hass, aiohttp_client, proxy_view, proxy_view_with_flow
):
"""Test proxy view request results in invalid auth response after reset."""
flow_id = proxy_view_with_flow
client = await aiohttp_client(hass.http.app)
resp = await client.get(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 200
proxy_view.reset()
hass.config_entries.flow.async_abort(flow_id)
resp = await client.get(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 401
resp = await client.post(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 401
resp = await client.delete(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 401
resp = await client.put(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 401
resp = await client.patch(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 401
resp = await client.head(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 401
resp = await client.options(AUTH_PROXY_PATH, params={"config_flow_id": flow_id})
assert resp.status == 403
async def test_validate_input_no_controller(
hass,
):
"""Test validate input."""
user_input = {
CONF_USERNAME: TEST_USERNAME,
CONF_TOKEN: TEST_TOKEN,
CONF_ACCESS_TOKEN: TEST_ACCESS_TOKEN,
CONF_EXPIRATION: TEST_VALID_EXPIRATION,
}
with patch(
"custom_components.tesla_custom.config_flow.TeslaAPI.connect",
return_value={
"refresh_token": TEST_TOKEN,
CONF_ACCESS_TOKEN: TEST_ACCESS_TOKEN,
CONF_EXPIRATION: TEST_VALID_EXPIRATION,
},
):
assert await validate_input(hass, user_input) == {
"refresh_token": TEST_TOKEN,
CONF_ACCESS_TOKEN: TEST_ACCESS_TOKEN,
CONF_EXPIRATION: TEST_VALID_EXPIRATION,
}
| 35.238745
| 216
| 0.692296
| 3,246
| 25,830
| 5.215342
| 0.069316
| 0.046784
| 0.022447
| 0.026936
| 0.819659
| 0.795794
| 0.781913
| 0.774647
| 0.766495
| 0.745585
| 0
| 0.007736
| 0.199303
| 25,830
| 732
| 217
| 35.286885
| 0.810802
| 0.101587
| 0
| 0.649899
| 0
| 0
| 0.142395
| 0.047293
| 0
| 0
| 0
| 0
| 0.207243
| 1
| 0
| false
| 0.006036
| 0.034205
| 0
| 0.050302
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
38aa805f84406f479aa2fefb87377a961948324e
| 44
|
py
|
Python
|
ratelimit/backends/__init__.py
|
abersheeran/asgi-ratelim
|
e4614a230747bdbbcd996933c1f1ca4d0bc8a7c2
|
[
"Apache-2.0"
] | 136
|
2020-06-08T10:38:19.000Z
|
2022-03-24T14:45:51.000Z
|
ratelimit/backends/__init__.py
|
abersheeran/asgi-ratelim
|
e4614a230747bdbbcd996933c1f1ca4d0bc8a7c2
|
[
"Apache-2.0"
] | 38
|
2020-07-12T15:35:15.000Z
|
2022-03-25T03:27:45.000Z
|
ratelimit/backends/__init__.py
|
abersheeran/asgi-ratelim
|
e4614a230747bdbbcd996933c1f1ca4d0bc8a7c2
|
[
"Apache-2.0"
] | 15
|
2021-01-19T13:48:37.000Z
|
2022-03-18T02:34:52.000Z
|
from .base import BaseBackend # noqa: F401
| 22
| 43
| 0.75
| 6
| 44
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 0.181818
| 44
| 1
| 44
| 44
| 0.833333
| 0.227273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2a1a59a03bb0936f07a0d4ff6bed65ee595f5016
| 4,127
|
py
|
Python
|
openpathsampling/tests/test_range_logic.py
|
bolhuis/openpathsampling
|
4a12af0ee1143cdbc272b10a8c7cbea735566ce1
|
[
"MIT"
] | 64
|
2016-07-06T13:38:51.000Z
|
2022-03-30T15:58:01.000Z
|
openpathsampling/tests/test_range_logic.py
|
bolhuis/openpathsampling
|
4a12af0ee1143cdbc272b10a8c7cbea735566ce1
|
[
"MIT"
] | 601
|
2016-06-13T10:22:01.000Z
|
2022-03-25T00:10:40.000Z
|
openpathsampling/tests/test_range_logic.py
|
hejung/openpathsampling
|
e8b091c92916561954542d40d17d7241b203d1ad
|
[
"MIT"
] | 45
|
2016-11-10T11:17:53.000Z
|
2022-02-13T11:50:26.000Z
|
from builtins import object
from nose.tools import assert_equal, assert_not_equal, raises
from nose.plugins.skip import Skip, SkipTest
from openpathsampling.range_logic import *
class TestRangeLogic(object):
def test_range_and(self):
assert_equal(range_and(1, 3, 2, 4), [(2, 3)])
assert_equal(range_and(2, 4, 1, 3), [(2, 3)])
assert_equal(range_and(1, 2, 3, 4), None)
assert_equal(range_and(3, 4, 1, 2), None)
assert_equal(range_and(1, 4, 2, 3), [(2, 3)])
assert_equal(range_and(2, 3, 1, 4), [(2, 3)])
assert_equal(range_and(1, 2, 1, 2), 1)
def test_range_or(self):
assert_equal(range_or(1, 3, 2, 4), [(1, 4)])
assert_equal(range_or(2, 4, 1, 3), [(1, 4)])
assert_equal(range_or(1, 2, 3, 4), [(1, 2), (3, 4)])
assert_equal(range_or(3, 4, 1, 2), [(3, 4), (1, 2)])
assert_equal(range_or(1, 4, 2, 3), [(1, 4)])
assert_equal(range_or(2, 3, 1, 4), [(1, 4)])
assert_equal(range_or(1, 2, 1, 2), 1)
def test_range_sub(self):
assert_equal(range_sub(1, 3, 2, 4), [(1, 2)])
assert_equal(range_sub(2, 4, 1, 3), [(3, 4)])
assert_equal(range_sub(1, 2, 3, 4), 1)
assert_equal(range_sub(3, 4, 1, 2), 1)
assert_equal(range_sub(1, 4, 2, 3), [(1, 2), (3, 4)])
assert_equal(range_sub(2, 3, 1, 4), None)
assert_equal(range_sub(1, 2, 1, 2), None)
assert_equal(range_sub(0.1, 0.4, 0.1, 0.3), [(0.3, 0.4)])
class TestPeriodicRangeLogic(object):
def test_periodic_order(self):
# orders without wrapping
assert_equal(periodic_ordering(1, 2, 3, 4), [0, 1, 2, 3])
assert_equal(periodic_ordering(1, 3, 2, 4), [0, 2, 1, 3])
assert_equal(periodic_ordering(4, 3, 2, 1), [0, 3, 2, 1])
assert_equal(periodic_ordering(1, 2, 1, 2), [0, 2, 1, 3])
assert_equal(periodic_ordering(2, 4, 1, 3), [1, 3, 0, 2])
assert_equal(periodic_ordering(1, 2, 4, 3), [1, 2, 0, 3])
def test_periodic_and(self):
assert_equal(periodic_range_and(0.1, 0.3, 0.2, 0.4), [(0.2, 0.3)])
assert_equal(periodic_range_and(0.2, 0.4, 0.1, 0.3), [(0.2, 0.3)])
assert_equal(periodic_range_and(1, 2, 3, 4), None)
assert_equal(periodic_range_and(3, 4, 1, 2), None)
assert_equal(periodic_range_and(1, 4, 2, 3), [(2, 3)])
assert_equal(periodic_range_and(2, 3, 1, 4), [(2, 3)])
assert_equal(periodic_range_and(1, 2, 1, 2), 1)
assert_equal(periodic_range_and(1, 2, 2, 1), None)
assert_equal(periodic_range_and(2, 1, 1, 4), [(2, 4)])
assert_equal(periodic_range_and(0.1, 0.4, 0.3, 0.2),
[(0.1, 0.2), (0.3, 0.4)])
def test_periodic_or(self):
assert_equal(periodic_range_or(0.1, 0.3, 0.2, 0.4), [(0.1, 0.4)])
assert_equal(periodic_range_or(0.2, 0.4, 0.1, 0.3), [(0.1, 0.4)])
assert_equal(periodic_range_or(1, 2, 3, 4), [(1, 2), (3, 4)])
assert_equal(periodic_range_or(3, 4, 1, 2), [(3, 4), (1, 2)])
assert_equal(periodic_range_or(1, 4, 2, 3), [(1, 4)])
assert_equal(periodic_range_or(2, 3, 1, 4), [(1, 4)])
assert_equal(periodic_range_or(1, 2, 1, 2), 1)
assert_equal(periodic_range_or(1, 2, 2, 1), -1)
assert_equal(periodic_range_or(0.1, 0.4, 0.3, 0.2), -1)
assert_equal(periodic_range_or(2, 1, 1, 4), -1)
def test_periodic_sub(self):
assert_equal(periodic_range_sub(0.1, 0.3, 0.2, 0.4), [(0.1, 0.2)])
assert_equal(periodic_range_sub(0.2, 0.4, 0.1, 0.3), [(0.3, 0.4)])
assert_equal(periodic_range_sub(1, 2, 3, 4), 1)
assert_equal(periodic_range_sub(3, 4, 1, 2), 1)
assert_equal(periodic_range_sub(1, 4, 2, 3), [(1, 2), (3, 4)])
assert_equal(periodic_range_sub(2, 3, 1, 4), None)
assert_equal(periodic_range_sub(1, 2, 1, 2), None)
assert_equal(periodic_range_sub(1, 2, 2, 1), 1)
assert_equal(periodic_range_sub(2, 1, 1, 4), [(4, 1)])
assert_equal(periodic_range_sub(0.1, 0.4, 0.3, 0.2), [(0.2, 0.3)])
assert_equal(periodic_range_sub(0.1, 0.4, 0.1, 0.3), [(0.3, 0.4)])
| 47.436782
| 74
| 0.579113
| 762
| 4,127
| 2.917323
| 0.048556
| 0.296896
| 0.316239
| 0.334683
| 0.82726
| 0.77238
| 0.682861
| 0.618983
| 0.471435
| 0.214575
| 0
| 0.130612
| 0.228253
| 4,127
| 86
| 75
| 47.988372
| 0.567347
| 0.005573
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.821918
| 1
| 0.09589
| false
| 0
| 0.054795
| 0
| 0.178082
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2a3a7bb1b4cb34c62f74e5913b58166f5e1df5de
| 57
|
py
|
Python
|
__init__.py
|
jesisca-tandi/nn-manual
|
9798de2741d9a1679a909491dc3f069a87606274
|
[
"MIT"
] | null | null | null |
__init__.py
|
jesisca-tandi/nn-manual
|
9798de2741d9a1679a909491dc3f069a87606274
|
[
"MIT"
] | null | null | null |
__init__.py
|
jesisca-tandi/nn-manual
|
9798de2741d9a1679a909491dc3f069a87606274
|
[
"MIT"
] | null | null | null |
from . import nn
from . import models
from . import utils
| 19
| 20
| 0.754386
| 9
| 57
| 4.777778
| 0.555556
| 0.697674
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192982
| 57
| 3
| 21
| 19
| 0.934783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aa6beff1c59e7fdd9d26343a684692388b1f8016
| 193
|
py
|
Python
|
pyintercept/__init__.py
|
caioariede/pyintercept
|
19039ce3038521bf32aaafe207024adeb0096749
|
[
"MIT"
] | 32
|
2015-07-20T21:13:26.000Z
|
2018-04-05T13:53:28.000Z
|
pyintercept/__init__.py
|
caioariede/pyintercept
|
19039ce3038521bf32aaafe207024adeb0096749
|
[
"MIT"
] | 2
|
2019-07-23T17:38:06.000Z
|
2020-02-27T13:38:02.000Z
|
pyintercept/__init__.py
|
caioariede/pyintercept
|
19039ce3038521bf32aaafe207024adeb0096749
|
[
"MIT"
] | 3
|
2015-08-09T14:48:38.000Z
|
2020-02-27T12:58:46.000Z
|
from .handlers.json_handler import json
from .handlers.pickle_handler import pickle
from .handlers.print_handler import print_
from .handlers.pdb_handler import pdb
locals()['print'] = print_
| 27.571429
| 43
| 0.818653
| 27
| 193
| 5.62963
| 0.333333
| 0.315789
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103627
| 193
| 6
| 44
| 32.166667
| 0.878613
| 0
| 0
| 0
| 0
| 0
| 0.025907
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.8
| 0
| 0.8
| 0.4
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aaa9b3a5323a4a07baf6473932a736653e125446
| 29
|
py
|
Python
|
textcounts/__init__.py
|
staeiou/textcounts
|
933fa504d5e55574ff7e51817458d3d71d3c2795
|
[
"MIT"
] | null | null | null |
textcounts/__init__.py
|
staeiou/textcounts
|
933fa504d5e55574ff7e51817458d3d71d3c2795
|
[
"MIT"
] | null | null | null |
textcounts/__init__.py
|
staeiou/textcounts
|
933fa504d5e55574ff7e51817458d3d71d3c2795
|
[
"MIT"
] | null | null | null |
from textcounts.core import *
| 29
| 29
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103448
| 29
| 1
| 29
| 29
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
aac72f8485bc09f1971623cd927b13d4e49da039
| 174
|
py
|
Python
|
khrapi/Vendor.py
|
EllAyling/khrbinding-generator
|
12215fae7754dafc29e8134625cbbf4408b8ccf7
|
[
"MIT"
] | 5
|
2018-05-03T16:47:24.000Z
|
2021-05-22T21:18:10.000Z
|
khrapi/Vendor.py
|
EllAyling/khrbinding-generator
|
12215fae7754dafc29e8134625cbbf4408b8ccf7
|
[
"MIT"
] | 22
|
2018-04-01T18:41:53.000Z
|
2022-01-11T13:56:49.000Z
|
khrapi/Vendor.py
|
EllAyling/khrbinding-generator
|
12215fae7754dafc29e8134625cbbf4408b8ccf7
|
[
"MIT"
] | 1
|
2020-10-22T10:23:40.000Z
|
2020-10-22T10:23:40.000Z
|
class Vendor:
def __init__(self, token, name):
self.token = token
self.name = name
def __lt__(self, other):
return self.token < other.token
| 19.333333
| 39
| 0.597701
| 22
| 174
| 4.363636
| 0.454545
| 0.28125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.304598
| 174
| 8
| 40
| 21.75
| 0.793388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.166667
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
2aad3d4148618b0ee803b3187f70c722e300ca7d
| 2,104
|
py
|
Python
|
python/DeepSeaScene/SubpassDependency.py
|
akb825/DeepSea
|
fff790d0a472cf2f9f89de653e0b4470ce605d24
|
[
"Apache-2.0"
] | 5
|
2018-11-17T23:13:22.000Z
|
2021-09-30T13:37:04.000Z
|
python/DeepSeaScene/SubpassDependency.py
|
akb825/DeepSea
|
fff790d0a472cf2f9f89de653e0b4470ce605d24
|
[
"Apache-2.0"
] | null | null | null |
python/DeepSeaScene/SubpassDependency.py
|
akb825/DeepSea
|
fff790d0a472cf2f9f89de653e0b4470ce605d24
|
[
"Apache-2.0"
] | 2
|
2019-09-23T12:23:35.000Z
|
2020-04-07T05:31:06.000Z
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: DeepSeaScene
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SubpassDependency(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 28
# SubpassDependency
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SubpassDependency
def SrcSubpass(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0))
# SubpassDependency
def SrcStages(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(4))
# SubpassDependency
def SrcAccess(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(8))
# SubpassDependency
def DstSubpass(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(12))
# SubpassDependency
def DstStages(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(16))
# SubpassDependency
def DstAccess(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(20))
# SubpassDependency
def RegionDependency(self): return self._tab.Get(flatbuffers.number_types.BoolFlags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(24))
def CreateSubpassDependency(builder, srcSubpass, srcStages, srcAccess, dstSubpass, dstStages, dstAccess, regionDependency):
builder.Prep(4, 28)
builder.Pad(3)
builder.PrependBool(regionDependency)
builder.PrependUint32(dstAccess)
builder.PrependUint32(dstStages)
builder.PrependUint32(dstSubpass)
builder.PrependUint32(srcAccess)
builder.PrependUint32(srcStages)
builder.PrependUint32(srcSubpass)
return builder.Offset()
| 45.73913
| 156
| 0.775665
| 241
| 2,104
| 6.593361
| 0.257261
| 0.066079
| 0.193833
| 0.07489
| 0.451227
| 0.451227
| 0.451227
| 0.451227
| 0.424795
| 0.3927
| 0
| 0.022295
| 0.125951
| 2,104
| 45
| 157
| 46.755556
| 0.841762
| 0.111217
| 0
| 0
| 1
| 0
| 0.002152
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.357143
| false
| 0.214286
| 0.107143
| 0.285714
| 0.607143
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
2aba60ecd289c064f194010c21a3e0b1077b2ff6
| 1,860
|
py
|
Python
|
tests/test_document.py
|
biobakery/anadama2_test
|
46d06f7efc24ae067a1b6cc2841eda0c2a328daf
|
[
"MIT"
] | 4
|
2020-06-08T22:10:48.000Z
|
2021-07-27T13:57:43.000Z
|
tests/test_document.py
|
biobakery/anadama2_test
|
46d06f7efc24ae067a1b6cc2841eda0c2a328daf
|
[
"MIT"
] | null | null | null |
tests/test_document.py
|
biobakery/anadama2_test
|
46d06f7efc24ae067a1b6cc2841eda0c2a328daf
|
[
"MIT"
] | 1
|
2020-09-10T08:29:22.000Z
|
2020-09-10T08:29:22.000Z
|
# -*- coding: utf-8 -*-
import os
import shutil
import unittest
import optparse
import anadama2.document
class TestPweaveDocument(unittest.TestCase):
def test_filter_zero_rows(self):
doc = anadama2.document.PweaveDocument()
names=["s1","s2","s3"]
data=[[0,0,1],[0,0,0],[1,0,0]]
filtered_names, filtered_data = doc.filter_zero_rows(names,data)
self.assertEqual(filtered_names,["s1","s3"])
for x,y in zip(filtered_data, [[0,0,1],[1,0,0]]):
self.assertListEqual(x,y)
def test_filter_zero_rows_no_zeros(self):
doc = anadama2.document.PweaveDocument()
names=["s1","s2","s3"]
data=[[0,0,1],[0,1,0],[1,0,0]]
filtered_names, filtered_data = doc.filter_zero_rows(names,data)
self.assertEqual(filtered_names,["s1","s2","s3"])
for x,y in zip(filtered_data, [[0,0,1],[0,1,0],[1,0,0]]):
self.assertListEqual(x,y)
def test_filter_zero_columns(self):
doc = anadama2.document.PweaveDocument()
names=["s1","s2","s3"]
data=[[0,0,1],[0,0,0],[1,0,0]]
filtered_names, filtered_data = doc.filter_zero_columns(names,data)
self.assertEqual(filtered_names,["s1","s3"])
for x,y in zip(filtered_data, [[0,1],[0,0],[1,0]]):
self.assertListEqual(x,y)
def test_filter_zero_columns_no_zeros(self):
doc = anadama2.document.PweaveDocument()
names=["s1","s2","s3"]
data=[[0,0,1],[0,1,0],[1,0,0]]
filtered_names, filtered_data = doc.filter_zero_columns(names,data)
self.assertEqual(filtered_names,["s1","s2","s3"])
for x,y in zip(filtered_data,[[0,0,1],[0,1,0],[1,0,0]]):
self.assertListEqual(x,y)
if __name__ == "__main__":
unittest.main()
| 31
| 75
| 0.581183
| 263
| 1,860
| 3.927757
| 0.159696
| 0.036786
| 0.052275
| 0.03485
| 0.859632
| 0.835431
| 0.835431
| 0.835431
| 0.835431
| 0.835431
| 0
| 0.066525
| 0.240323
| 1,860
| 59
| 76
| 31.525424
| 0.664544
| 0.01129
| 0
| 0.65
| 0
| 0
| 0.028307
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.1
| false
| 0
| 0.125
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
63049e701b93e3a41a8c527aa9db6d1fc825f548
| 88
|
py
|
Python
|
dusted/geom/__init__.py
|
AlexMorson/dustforce-tas-editor
|
80546ca525ba215252c23a74807857e9c7c2566c
|
[
"MIT"
] | 1
|
2021-03-20T07:43:33.000Z
|
2021-03-20T07:43:33.000Z
|
dusted/geom/__init__.py
|
AlexMorson/dustforce-tas-editor
|
80546ca525ba215252c23a74807857e9c7c2566c
|
[
"MIT"
] | null | null | null |
dusted/geom/__init__.py
|
AlexMorson/dustforce-tas-editor
|
80546ca525ba215252c23a74807857e9c7c2566c
|
[
"MIT"
] | null | null | null |
from dusted.geom.tiles import tile_outlines
from dusted.geom.lines import decimate_line
| 29.333333
| 43
| 0.863636
| 14
| 88
| 5.285714
| 0.714286
| 0.27027
| 0.378378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 88
| 2
| 44
| 44
| 0.925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
632ee7444cde67702473fc63f12c982d6007d003
| 13,893
|
py
|
Python
|
realpy/tests/test_UCB/test_ucb.py
|
REAL-Pipetting/REinforced-Automaton-Learning-REAL-Pipetting
|
b2e8ceb31846bb3d485ab48e0065a1460229444f
|
[
"MIT"
] | null | null | null |
realpy/tests/test_UCB/test_ucb.py
|
REAL-Pipetting/REinforced-Automaton-Learning-REAL-Pipetting
|
b2e8ceb31846bb3d485ab48e0065a1460229444f
|
[
"MIT"
] | null | null | null |
realpy/tests/test_UCB/test_ucb.py
|
REAL-Pipetting/REinforced-Automaton-Learning-REAL-Pipetting
|
b2e8ceb31846bb3d485ab48e0065a1460229444f
|
[
"MIT"
] | null | null | null |
"""Testing functions for GP-UCB class."""
# testing imports
import unittest
import unittest.mock as mock
# standard imports
import numpy as np
# our package imports
import realpy.UCB.ucb as ucb
class TestGPUCB(unittest.TestCase):
"""Test GP UCB class."""
def test___init__(self):
"""Test initialization of the GP UCB class."""
# initialize
mocked_env = mock.MagicMock(name='env',
return_value="Batch")
n = 10
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.GPUCB(meshgrid, mocked_env, beta=5)
# test assignment of attributes
self.assertTrue(hasattr(subject, 'meshgrid'))
self.assertTrue(hasattr(subject, 'environment'))
self.assertTrue(hasattr(subject, 'beta'))
self.assertTrue(hasattr(subject, 'input_dimension'))
self.assertTrue(hasattr(subject, 'X_grid'))
# attributes that get updated during learning
self.assertTrue(hasattr(subject, 'mu'))
self.assertTrue(hasattr(subject, 'sigma'))
self.assertTrue(hasattr(subject, 'X'))
self.assertTrue(hasattr(subject, 'Y'))
self.assertTrue(hasattr(subject, 'T'))
# test appropraite shapes
self.assertEqual(subject.input_dimension, 2)
self.assertEqual(subject.X_grid.shape, (n**2, 2))
self.assertEqual(subject.mu.shape, (n**2,))
self.assertEqual(subject.sigma.shape, (n**2,))
# test values
self.assertEqual(subject.environment, mocked_env)
self.assertEqual(subject.beta, 5)
def test_argmax_ucb(self):
"""Test getting the argmax of the UCB."""
# initialize
mocked_env = mock.MagicMock(name='env',
return_value="Batch")
n = 10
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.GPUCB(meshgrid, mocked_env, beta=1)
# making the max occur at index 5 by adjucting the mean
subject.mu[5] = 1
return_val = subject.argmax_ucb()
self.assertEqual(return_val, 5)
# making the max occur at index 10 by adjusting the std
subject.sigma[10] = 3
return_val = subject.argmax_ucb()
self.assertEqual(return_val, 10)
@unittest.mock.patch('sklearn.gaussian_process.GaussianProcessRegressor')
def test_learn(self, mocked_gp):
"""Test the learning function."""
# initialize
mocked_env = mock.MagicMock(name='env',
return_value="Batch")
n = 5
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.GPUCB(meshgrid, mocked_env, beta=1)
# set up mocked functions
subject.sample = mock.MagicMock(name='sample')
mocked_gp.return_value = mock.MagicMock(name='Mocked GP')
mocked_gp.return_value.predict.return_value = ('Mu', 'Sigma')
# making the max occur at index 5 by adjucting the mean
subject.mu[5] = 1
subject.learn()
# test appropriate sampling
subject.sample.called_with(subject.X_grid[5])
# test GP correctly called
mocked_gp.return_value.fit.assert_called_with(subject.X, subject.Y)
self.assertEqual(subject.mu, 'Mu')
self.assertEqual(subject.sigma, 'Sigma')
# check time step increase
self.assertEqual(subject.T, 1)
def test_sample(self):
"""Test the environment sampling."""
# initialize
mocked_env = mock.MagicMock(name='env')
mocked_env.sample = mock.MagicMock(name='env_sample',
return_value="Batch")
n = 3
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.GPUCB(meshgrid, mocked_env, beta=1)
subject.sample(2)
subject.environment.sample.assert_called_with(2)
self.assertEqual(subject.Y[0], "Batch")
class TestBatchGPUCB(unittest.TestCase):
"""Test Batch GP UCB class."""
def test_inheritence(self):
"""Ensure the subclass class inherits from parent class."""
self.assertTrue(issubclass(ucb.BatchGPUCB, ucb.GPUCB))
def test___init__(self):
"""Test initialization of the Batch GP UCB class."""
# initialize
mocked_env = mock.MagicMock(name='env')
n = 5
batch_size = 3
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.BatchGPUCB(batch_size, meshgrid, mocked_env, beta=1)
# test assignment of additional attribute
self.assertTrue(hasattr(subject, 'batch_size'))
self.assertEqual(subject.batch_size, batch_size)
def test_argsort_ucb(self):
"""Test getting the argsort of the UCB."""
# initialize
mocked_env = mock.MagicMock(name='env',
return_value="Batch")
n = 5
batch_size = 3
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.BatchGPUCB(batch_size, meshgrid, mocked_env, beta=1)
# making the max occur at starting at index 2 for an entire batch
subject.mu[2:2 + batch_size] = 1
return_val = subject.argsort_ucb()
expected_val = np.arange(2, 2 + batch_size)
np.testing.assert_array_equal(return_val, expected_val)
@unittest.mock.patch('sklearn.gaussian_process.GaussianProcessRegressor')
def test_learn(self, mocked_gp):
"""Test the learning function."""
# initialize
mocked_env = mock.MagicMock(name='env',
return_value="Batch")
n = 5
batch_size = 3
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.BatchGPUCB(batch_size, meshgrid, mocked_env, beta=1)
# set up mocked functions
subject.batch_sample = mock.MagicMock(name='batch sample')
mocked_gp.return_value = mock.MagicMock(name='Mocked GP')
mocked_gp.return_value.predict.return_value = ('Mu', 'Sigma')
subject.learn()
# test GP correctly called
self.assertTrue(mocked_gp.return_value.fit.called)
self.assertEqual(subject.mu, 'Mu')
self.assertEqual(subject.sigma, 'Sigma')
# check time step increase
self.assertEqual(subject.T, 1)
# test second time step
subject = ucb.BatchGPUCB(batch_size, meshgrid, mocked_env, beta=1)
subject.T = 1
subject.learn()
# check time step increase
self.assertEqual(subject.T, 2)
def test_batch_sample(self):
"""Test the environment sampling."""
# initialize
mocked_env = mock.MagicMock(name='env')
mocked_env.sample = mock.MagicMock(name='env_sample',
return_value="Batch")
n = 5
batch_size = 3
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.BatchGPUCB(batch_size, meshgrid, mocked_env, beta=1)
indices = [2, 4, 5]
subject.batch_sample(indices)
self.assertEqual(subject.Y[0], ["Batch", "Batch", "Batch"])
@unittest.mock.patch('smt.sampling_methods.LHS')
def test_latin_hypercube_sample(self, mocked_lhs):
"""Test the Latin hypercube sampling on first iteration of learn."""
mocked_env = mock.MagicMock(name='env')
mocked_env.sample = mock.MagicMock(name='env_sample',
return_value="Batch")
mocked_lhs.return_value = mock.MagicMock(name='LH_sample',
return_value=[[3.0, 2.1],
[0.4, 1.1],
[0.0, 1.9]])
n = 5
batch_size = 3
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.BatchGPUCB(batch_size, meshgrid, mocked_env, beta=1)
subject.latin_hypercube_sample()
self.assertEqual(list(subject.X[0][0]), [3, 2])
self.assertEqual(list(subject.X[0][1]), [0, 1])
self.assertEqual(list(subject.X[0][2]), [0, 2])
name, args, kwargs = mocked_lhs.mock_calls[0]
self.assertEqual(kwargs['xlimits'].tolist(), [[0, n - 1], [0, n - 1]])
class TestBatchGPUCBv2(unittest.TestCase):
"""Test Batch GP UCBv2 class."""
def test_inheritence(self):
"""Ensure the subclass class inherits from parent class."""
self.assertTrue(issubclass(ucb.BatchGPUCBv2, ucb.BatchGPUCB))
def test___init__(self):
"""Test initialization of the Batch GP UCBv2 class."""
# initialize
mocked_env = mock.MagicMock(name='env')
n = 5
batch_size = 3
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.BatchGPUCBv2(batch_size, meshgrid, mocked_env, beta=1)
# test assignment of additional attribute
self.assertTrue(hasattr(subject, 'batch_size'))
self.assertEqual(subject.batch_size, batch_size)
def test_get_best_ucb(self):
"""Test getting the best_idx of the UCB."""
# initialize
mocked_env = mock.MagicMock(name='env',
return_value="Batch")
n = 5
batch_size = 3
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.BatchGPUCBv2(batch_size, meshgrid, mocked_env, beta=1)
# making the max occur at starting at index 2 for an entire batch
subject.mu[2:2 + batch_size] = 1
subject.mu[3] = 10
subject.mu[0] = 9
return_val = subject.get_best_ucb()
expected_val = 3
self.assertEqual(return_val, expected_val)
return_val = subject.get_best_ucb()
expected_val = 0
self.assertEqual(return_val, expected_val)
@unittest.mock.patch('sklearn.gaussian_process.GaussianProcessRegressor')
def test_learn(self, mocked_gp):
"""Test the learning function."""
# initialize
mocked_env = mock.MagicMock(name='env',
return_value="Batch")
n = 5
batch_size = 3
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.BatchGPUCBv2(batch_size, meshgrid, mocked_env, beta=1)
# set up mocked functions
subject.batch_sample = mock.MagicMock(name='batch sample')
mocked_gp.return_value = mock.MagicMock(name='Mocked GP')
mocked_gp.return_value.predict.return_value = ('Mu', 'Sigma')
subject.learn()
# test GP correctly called
self.assertTrue(mocked_gp.return_value.fit.called)
self.assertEqual(subject.mu, 'Mu')
self.assertEqual(subject.sigma, 'Sigma')
# check time step increase
self.assertEqual(subject.T, 1)
# test second time step
subject = ucb.BatchGPUCB(batch_size, meshgrid, mocked_env, beta=1)
subject.T = 1
subject.learn()
# check time step increase
self.assertEqual(subject.T, 2)
def test_batch_sample(self):
"""Test the environment sampling."""
# initialize
mocked_env = mock.MagicMock(name='env')
mocked_env.sample = mock.MagicMock(name='env_sample',
return_value="Batch")
n = 5
batch_size = 3
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.BatchGPUCBv2(batch_size, meshgrid, mocked_env, beta=1)
subject.to_exclude.append(1)
indices = [2, 4, 5]
subject.batch_sample(indices)
self.assertEqual(subject.Y, ["Batch", "Batch", "Batch"])
self.assertEqual(subject.to_exclude, [])
def test_false_sample(self):
"""Test generating the hallucinated ('false') sample."""
mocked_env = mock.MagicMock(name='env',
return_value="Batch")
n = 5
batch_size = 3
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.BatchGPUCBv2(batch_size, meshgrid, mocked_env, beta=1)
subject.false_sample(0)
self.assertEqual(subject.X[-1], [0, 0])
self.assertEqual(subject.Y[-1], 0)
class TestBatchGPUCBv3(unittest.TestCase):
"""Test Batch GP UCBv3 class."""
def test_inheritence(self):
"""Ensure the subclass class inherits from parent class."""
self.assertTrue(issubclass(ucb.BatchGPUCBv3, ucb.BatchGPUCBv2))
def test___init__(self):
"""Test initialization of the Batch GP UCBv3 class."""
# initialize
mocked_env = mock.MagicMock(name='env')
n = 5
batch_size = 3
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.BatchGPUCBv3(batch_size, meshgrid, mocked_env, beta=1)
# test assignment of additional attribute
self.assertTrue(hasattr(subject, 'batch_size'))
self.assertEqual(subject.batch_size, batch_size)
def test_batch_sample(self):
"""Test the environment sampling."""
mocked_env = mock.MagicMock(name='env')
mocked_env.sample = mock.MagicMock(name='env_sample',
return_value="Batch")
n = 5
batch_size = 3
coeffs = np.arange(n)
meshgrid = np.meshgrid(coeffs, coeffs)
subject = ucb.BatchGPUCBv3(batch_size, meshgrid, mocked_env, beta=1)
subject.to_exclude.append(1)
indices = [2, 4, 5]
subject.batch_sample(indices)
self.assertEqual(subject.Y, ["Batch", "Batch", "Batch"])
self.assertEqual(subject.to_exclude, [])
| 38.591667
| 78
| 0.603469
| 1,637
| 13,893
| 4.984728
| 0.095907
| 0.044118
| 0.058333
| 0.051471
| 0.790319
| 0.757843
| 0.733333
| 0.733333
| 0.719608
| 0.702574
| 0
| 0.01601
| 0.28518
| 13,893
| 359
| 79
| 38.699164
| 0.805659
| 0.142014
| 0
| 0.733871
| 0
| 0
| 0.049583
| 0.014543
| 0
| 0
| 0
| 0
| 0.229839
| 1
| 0.076613
| false
| 0
| 0.016129
| 0
| 0.108871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
2d4993ea3eb08132a1090f741ec9e42413058676
| 363
|
py
|
Python
|
numpyro/contrib/tfp/__init__.py
|
ahoho/numpyro
|
64e94e346c51a6c0c1ba51aa7b608e73513f158f
|
[
"Apache-2.0"
] | null | null | null |
numpyro/contrib/tfp/__init__.py
|
ahoho/numpyro
|
64e94e346c51a6c0c1ba51aa7b608e73513f158f
|
[
"Apache-2.0"
] | null | null | null |
numpyro/contrib/tfp/__init__.py
|
ahoho/numpyro
|
64e94e346c51a6c0c1ba51aa7b608e73513f158f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
try:
import tensorflow_probability.substrates.jax as tfp # noqa: F401
except ImportError as e:
raise ImportError("To use this module, please install TensorFlow Probability. It can be"
" installed with `pip install tensorflow_probability`") from e
| 40.333333
| 92
| 0.730028
| 47
| 363
| 5.595745
| 0.808511
| 0.239544
| 0.212928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017301
| 0.203857
| 363
| 8
| 93
| 45.375
| 0.892734
| 0.247934
| 0
| 0
| 0
| 0
| 0.446097
| 0.085502
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2d81d9b6daf26f28b055039c49b230eb3e778aab
| 24
|
py
|
Python
|
GPy/kern/src/__init__.py
|
ekalosak/GPy
|
ff82f12c3d321bfc3ce6615447fad25aea9de6bd
|
[
"BSD-3-Clause"
] | 1,685
|
2015-01-03T14:46:25.000Z
|
2022-03-30T02:41:35.000Z
|
GPy/kern/src/__init__.py
|
ekalosak/GPy
|
ff82f12c3d321bfc3ce6615447fad25aea9de6bd
|
[
"BSD-3-Clause"
] | 778
|
2015-01-15T18:21:25.000Z
|
2022-03-30T14:52:32.000Z
|
GPy/kern/src/__init__.py
|
ekalosak/GPy
|
ff82f12c3d321bfc3ce6615447fad25aea9de6bd
|
[
"BSD-3-Clause"
] | 584
|
2015-01-06T06:30:43.000Z
|
2022-03-29T13:05:33.000Z
|
from . import psi_comp
| 8
| 22
| 0.75
| 4
| 24
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208333
| 24
| 2
| 23
| 12
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2d9882d554aa7d158c448a7600a5f7cf7e67a1dd
| 48
|
py
|
Python
|
wlanpi_core/schemas/speedtest/__init__.py
|
WLAN-Pi/wlanpi-core
|
7c626dac990c8240b45813ab5878041d1f3569c4
|
[
"BSD-3-Clause"
] | 1
|
2021-09-07T05:30:02.000Z
|
2021-09-07T05:30:02.000Z
|
wlanpi_core/schemas/speedtest/__init__.py
|
WLAN-Pi/wlanpi-core
|
7c626dac990c8240b45813ab5878041d1f3569c4
|
[
"BSD-3-Clause"
] | 13
|
2021-09-12T15:42:03.000Z
|
2022-02-21T22:20:54.000Z
|
wlanpi_core/schemas/speedtest/__init__.py
|
WLAN-Pi/wlanpi-core
|
7c626dac990c8240b45813ab5878041d1f3569c4
|
[
"BSD-3-Clause"
] | null | null | null |
from .ookla_speedtest_cli import OoklaSpeedtest
| 24
| 47
| 0.895833
| 6
| 48
| 6.833333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 48
| 1
| 48
| 48
| 0.931818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
2dc0050538461b9fd7d1901b9b1e013b1e907fe1
| 69
|
py
|
Python
|
neurondm/neurondm/__init__.py
|
tmsincomb/pyontutils
|
dad24e7178d8d8cd3bd60d53b9039952fa7a5a1e
|
[
"MIT"
] | 11
|
2017-05-12T08:50:03.000Z
|
2022-01-22T20:23:25.000Z
|
neurondm/neurondm/__init__.py
|
tmsincomb/pyontutils
|
dad24e7178d8d8cd3bd60d53b9039952fa7a5a1e
|
[
"MIT"
] | 81
|
2016-02-25T07:39:15.000Z
|
2022-02-17T20:20:27.000Z
|
neurondm/neurondm/__init__.py
|
tmsincomb/pyontutils
|
dad24e7178d8d8cd3bd60d53b9039952fa7a5a1e
|
[
"MIT"
] | 257
|
2017-07-18T19:32:22.000Z
|
2022-02-03T17:26:18.000Z
|
from .core import *
from .core import __all__
__version__ = '0.1.4'
| 13.8
| 25
| 0.710145
| 11
| 69
| 3.727273
| 0.727273
| 0.390244
| 0.682927
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.173913
| 69
| 4
| 26
| 17.25
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.072464
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
93281b3ee23a1584cc48730b1fd11b727ca1d822
| 85
|
py
|
Python
|
src/ostorlab/cli/auth/revoke/__init__.py
|
bbhunter/ostorlab
|
968fe4e5b927c0cd159594c13b73f95b71150154
|
[
"Apache-2.0"
] | 113
|
2022-02-21T09:30:14.000Z
|
2022-03-31T21:54:26.000Z
|
src/ostorlab/cli/auth/revoke/__init__.py
|
bbhunter/ostorlab
|
968fe4e5b927c0cd159594c13b73f95b71150154
|
[
"Apache-2.0"
] | 2
|
2022-02-25T10:56:55.000Z
|
2022-03-24T13:08:06.000Z
|
src/ostorlab/cli/auth/revoke/__init__.py
|
bbhunter/ostorlab
|
968fe4e5b927c0cd159594c13b73f95b71150154
|
[
"Apache-2.0"
] | 20
|
2022-02-28T14:25:04.000Z
|
2022-03-30T23:01:11.000Z
|
"""Module for the auth revoke command"""
from ostorlab.cli.auth.revoke import revoke
| 28.333333
| 43
| 0.776471
| 13
| 85
| 5.076923
| 0.769231
| 0.30303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117647
| 85
| 2
| 44
| 42.5
| 0.88
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fa74e67d2d298300a98df2d11ff18e77ec388176
| 29
|
py
|
Python
|
matlab_lib/__init__.py
|
chungks603/dereverberation-end-to-end
|
9e9d42432a7948c283f65ad01a0aedf910b0b2ad
|
[
"MIT"
] | 31
|
2019-10-12T16:17:38.000Z
|
2022-03-12T12:15:25.000Z
|
matlab_lib/__init__.py
|
chungks603/dereverberation-end-to-end
|
9e9d42432a7948c283f65ad01a0aedf910b0b2ad
|
[
"MIT"
] | 1
|
2022-01-03T12:40:33.000Z
|
2022-01-03T13:15:09.000Z
|
matlab_lib/__init__.py
|
chungks603/dereverberation-end-to-end
|
9e9d42432a7948c283f65ad01a0aedf910b0b2ad
|
[
"MIT"
] | 5
|
2019-10-08T08:30:20.000Z
|
2022-03-12T12:15:44.000Z
|
from .eval import Evaluation
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fac0fe0fa234d71777a325d049c5b47331e917d7
| 6,580
|
py
|
Python
|
app_asset/migrations/0001_initial.py
|
sivarki/hjarnuc
|
4acc9437af0f0fdc44d68dd0d6923e1039a4911b
|
[
"Apache-2.0"
] | null | null | null |
app_asset/migrations/0001_initial.py
|
sivarki/hjarnuc
|
4acc9437af0f0fdc44d68dd0d6923e1039a4911b
|
[
"Apache-2.0"
] | null | null | null |
app_asset/migrations/0001_initial.py
|
sivarki/hjarnuc
|
4acc9437af0f0fdc44d68dd0d6923e1039a4911b
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1.1 on 2018-12-27 07:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Host',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('host_ip', models.CharField(max_length=64, unique=True)),
('host_remove_port', models.CharField(max_length=64, null=True)),
('host_user', models.CharField(max_length=128)),
('host_passwd', models.CharField(max_length=256)),
('host_type', models.CharField(max_length=64)),
('host_msg', models.CharField(max_length=256)),
('serial_num', models.CharField(max_length=256, null=True)),
('purchase_date', models.CharField(max_length=128, null=True)),
('overdue_date', models.CharField(max_length=128, null=True)),
('creat_time', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='HostDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('host_name', models.CharField(max_length=128, null=True)),
('mem_size', models.CharField(max_length=128, null=True)),
('swap_size', models.CharField(max_length=64, null=True)),
('cpu_model', models.CharField(max_length=128, null=True)),
('cpu_nums', models.CharField(max_length=128, null=True)),
('disk_info', models.TextField(null=True)),
('interface', models.TextField(null=True)),
('os_type', models.CharField(max_length=128, null=True)),
('kernel_version', models.CharField(max_length=128, null=True)),
('os_version', models.CharField(max_length=128, null=True)),
('product_name', models.CharField(max_length=128, null=True)),
('host_status', models.CharField(max_length=32, null=True)),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_asset.Host')),
],
),
migrations.CreateModel(
name='HostGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('host_group_name', models.CharField(max_length=64, unique=True)),
('host_group_msg', models.CharField(max_length=256, null=True)),
],
),
migrations.CreateModel(
name='IDC',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idc_name', models.CharField(max_length=64, unique=True)),
('idc_msg', models.CharField(max_length=128, null=True)),
('idc_admin', models.CharField(max_length=128, null=True)),
('idc_admin_phone', models.CharField(max_length=128, null=True)),
('idc_admin_email', models.CharField(max_length=128, null=True)),
],
),
migrations.CreateModel(
name='Netwk',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('netwk_ip', models.CharField(max_length=64, unique=True)),
('netwk_remove_port', models.CharField(max_length=64, null=True)),
('netwk_user', models.CharField(max_length=128)),
('netwk_passwd', models.CharField(max_length=256)),
('netwk_type', models.CharField(max_length=64)),
('netwk_msg', models.CharField(max_length=256)),
('serial_num', models.CharField(max_length=256, null=True)),
('purchase_date', models.CharField(max_length=128, null=True)),
('overdue_date', models.CharField(max_length=128, null=True)),
('creat_time', models.DateTimeField(auto_now_add=True)),
('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_asset.HostGroup')),
('idc', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_asset.IDC')),
],
),
migrations.CreateModel(
name='software',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('server_name', models.CharField(max_length=256)),
('server_version', models.CharField(max_length=512, null=True)),
('server_port', models.CharField(max_length=1026, null=True)),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app_asset.Host')),
],
),
migrations.CreateModel(
name='Supplier',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('supplier', models.CharField(max_length=128, unique=True)),
('supplier_head', models.CharField(max_length=128, null=True)),
('supplier_head_phone', models.CharField(max_length=128, null=True)),
('supplier_head_email', models.CharField(max_length=128, null=True)),
],
),
migrations.AddField(
model_name='netwk',
name='supplier',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_asset.Supplier'),
),
migrations.AddField(
model_name='host',
name='group',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_asset.HostGroup'),
),
migrations.AddField(
model_name='host',
name='idc',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_asset.IDC'),
),
migrations.AddField(
model_name='host',
name='supplier',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='app_asset.Supplier'),
),
]
| 51.40625
| 128
| 0.585258
| 716
| 6,580
| 5.175978
| 0.139665
| 0.169995
| 0.203994
| 0.271991
| 0.844846
| 0.798435
| 0.722342
| 0.665947
| 0.575283
| 0.464382
| 0
| 0.027471
| 0.269757
| 6,580
| 127
| 129
| 51.811024
| 0.743809
| 0.006839
| 0
| 0.5
| 1
| 0
| 0.11771
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.016667
| 0.016667
| 0
| 0.05
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
faf61f4916ca5fe8979c8fafce85675f2a598eb3
| 2,596
|
py
|
Python
|
meilisearch/tests/settings/test_settings_ranking_rules_meilisearch.py
|
jrinder42/meilisearch-python
|
d2d4a02c9b8fd23b542a8713902d57e50662e7bb
|
[
"MIT"
] | 159
|
2019-12-10T15:17:13.000Z
|
2022-03-29T15:42:50.000Z
|
meilisearch/tests/settings/test_settings_ranking_rules_meilisearch.py
|
jrinder42/meilisearch-python
|
d2d4a02c9b8fd23b542a8713902d57e50662e7bb
|
[
"MIT"
] | 296
|
2019-12-17T15:46:51.000Z
|
2022-03-31T09:20:13.000Z
|
meilisearch/tests/settings/test_settings_ranking_rules_meilisearch.py
|
jrinder42/meilisearch-python
|
d2d4a02c9b8fd23b542a8713902d57e50662e7bb
|
[
"MIT"
] | 30
|
2020-01-06T10:24:31.000Z
|
2022-03-20T15:54:13.000Z
|
NEW_RANKING_RULES = ['typo', 'exactness']
DEFAULT_RANKING_RULES = [
'words',
'typo',
'proximity',
'attribute',
'sort',
'exactness'
]
def test_get_ranking_rules_default(empty_index):
"""Tests getting the default ranking rules."""
response = empty_index().get_ranking_rules()
assert isinstance(response, list)
for rule in DEFAULT_RANKING_RULES:
assert rule in response
def test_update_ranking_rules(empty_index):
"""Tests changing the ranking rules."""
index = empty_index()
response = index.update_ranking_rules(NEW_RANKING_RULES)
assert isinstance(response, dict)
assert 'updateId' in response
index.wait_for_pending_update(response['updateId'])
response = index.get_ranking_rules()
assert isinstance(response, list)
for rule in NEW_RANKING_RULES:
assert rule in response
def test_update_ranking_rules_none(empty_index):
"""Tests updating the ranking rules at null."""
index = empty_index()
# Update the settings first
response = index.update_ranking_rules(NEW_RANKING_RULES)
update = index.wait_for_pending_update(response['updateId'])
assert update['status'] == 'processed'
# Check the settings have been correctly updated
response = index.get_ranking_rules()
for rule in NEW_RANKING_RULES:
assert rule in response
# Launch test to update at null the setting
response = index.update_ranking_rules(None)
assert isinstance(response, dict)
assert 'updateId' in response
index.wait_for_pending_update(response['updateId'])
response = index.get_ranking_rules()
assert isinstance(response, list)
for rule in DEFAULT_RANKING_RULES:
assert rule in response
def test_reset_ranking_rules(empty_index):
"""Tests resetting the ranking rules setting to its default value."""
index = empty_index()
# Update the settings first
response = index.update_ranking_rules(NEW_RANKING_RULES)
update = index.wait_for_pending_update(response['updateId'])
assert update['status'] == 'processed'
# Check the settings have been correctly updated
response = index.get_ranking_rules()
assert isinstance(response, list)
for rule in NEW_RANKING_RULES:
assert rule in response
# Check the reset of the settings
response = index.reset_ranking_rules()
assert isinstance(response, dict)
assert 'updateId' in response
index.wait_for_pending_update(response['updateId'])
response = index.get_ranking_rules()
for rule in DEFAULT_RANKING_RULES:
assert rule in response
| 36.055556
| 73
| 0.725732
| 328
| 2,596
| 5.496951
| 0.158537
| 0.199667
| 0.1198
| 0.066556
| 0.781475
| 0.740987
| 0.740987
| 0.740987
| 0.72213
| 0.72213
| 0
| 0
| 0.191448
| 2,596
| 71
| 74
| 36.56338
| 0.85898
| 0.154468
| 0
| 0.714286
| 0
| 0
| 0.067804
| 0
| 0
| 0
| 0
| 0
| 0.321429
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8785c5934fb08022e988231f5dae50d6119491fa
| 6,728
|
py
|
Python
|
rf4ce/autognuradio/ieee802_15_4_oqpsk_phy.py
|
arunmagesh/rf4ce-tools
|
d86bbe8071bf63571535d53a6406103c16fddb6e
|
[
"MIT"
] | 8
|
2018-06-02T05:55:56.000Z
|
2019-06-20T21:16:44.000Z
|
rf4ce/autognuradio/ieee802_15_4_oqpsk_phy.py
|
arunmagesh/rf4ce-tools
|
d86bbe8071bf63571535d53a6406103c16fddb6e
|
[
"MIT"
] | 1
|
2021-04-14T17:22:47.000Z
|
2021-04-25T18:40:23.000Z
|
rf4ce/autognuradio/ieee802_15_4_oqpsk_phy.py
|
courk/rf4ce-tools
|
d86bbe8071bf63571535d53a6406103c16fddb6e
|
[
"MIT"
] | 3
|
2020-09-05T21:11:49.000Z
|
2021-04-25T17:55:58.000Z
|
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: IEEE802.15.4 OQPSK PHY
# Generated: Wed Jul 12 20:16:06 2017
##################################################
from gnuradio import analog
from gnuradio import blocks
from gnuradio import digital
from gnuradio import filter
from gnuradio import gr
from gnuradio.filter import firdes
from math import sin, pi
import foo
import ieee802_15_4
import math
import pmt
class ieee802_15_4_oqpsk_phy(gr.hier_block2):
def __init__(self):
gr.hier_block2.__init__(
self, "IEEE802.15.4 OQPSK PHY",
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
gr.io_signature(1, 1, gr.sizeof_gr_complex*1),
)
self.message_port_register_hier_in("txin")
self.message_port_register_hier_out("rxout")
##################################################
# Variables
##################################################
self.samp_rate = samp_rate = 4000000
##################################################
# Blocks
##################################################
self.single_pole_iir_filter_xx_0 = filter.single_pole_iir_filter_ff(0.00016, 1)
self.ieee802_15_4_packet_sink_0 = ieee802_15_4.packet_sink(10)
self.ieee802_15_4_access_code_prefixer_0 = ieee802_15_4.access_code_prefixer()
self.foo_burst_tagger_0 = foo.burst_tagger(pmt.intern("pdu_length"), 128)
self.digital_clock_recovery_mm_xx_0 = digital.clock_recovery_mm_ff(2, 0.000225, 0.5, 0.03, 0.0002)
self.digital_chunks_to_symbols_xx_0 = digital.chunks_to_symbols_bc(([(1+1j), (-1+1j), (1-1j), (-1+1j), (1+1j), (-1-1j), (-1-1j), (1+1j), (-1+1j), (-1+1j), (-1-1j), (1-1j), (-1-1j), (1-1j), (1+1j), (1-1j), (1-1j), (-1-1j), (1+1j), (-1-1j), (1-1j), (-1+1j), (-1+1j), (1-1j), (-1-1j), (-1-1j), (-1+1j), (1+1j), (-1+1j), (1+1j), (1-1j), (1+1j), (-1+1j), (-1+1j), (-1-1j), (1-1j), (-1-1j), (1-1j), (1+1j), (1-1j), (1+1j), (-1+1j), (1-1j), (-1+1j), (1+1j), (-1-1j), (-1-1j), (1+1j), (-1-1j), (-1-1j), (-1+1j), (1+1j), (-1+1j), (1+1j), (1-1j), (1+1j), (1-1j), (-1-1j), (1+1j), (-1-1j), (1-1j), (-1+1j), (-1+1j), (1-1j), (-1-1j), (1-1j), (1+1j), (1-1j), (1+1j), (-1+1j), (1-1j), (-1+1j), (1+1j), (-1-1j), (-1-1j), (1+1j), (-1+1j), (-1+1j), (-1-1j), (1-1j), (-1+1j), (1+1j), (1-1j), (1+1j), (1-1j), (-1-1j), (1+1j), (-1-1j), (1-1j), (-1+1j), (-1+1j), (1-1j), (-1-1j), (-1-1j), (-1+1j), (1+1j), (1+1j), (-1-1j), (-1-1j), (1+1j), (-1+1j), (-1+1j), (-1-1j), (1-1j), (-1-1j), (1-1j), (1+1j), (1-1j), (1+1j), (-1+1j), (1-1j), (-1+1j), (1-1j), (-1+1j), (-1+1j), (1-1j), (-1-1j), (-1-1j), (-1+1j), (1+1j), (-1+1j), (1+1j), (1-1j), (1+1j), (1-1j), (-1-1j), (1+1j), (-1-1j), (1+1j), (1-1j), (1+1j), (-1+1j), (1-1j), (-1+1j), (1+1j), (-1-1j), (-1-1j), (1+1j), (-1+1j), (-1+1j), (-1-1j), (1-1j), (-1-1j), (1-1j), (1-1j), (1+1j), (1-1j), (-1-1j), (1+1j), (-1-1j), (1-1j), (-1+1j), (-1+1j), (1-1j), (-1-1j), (-1-1j), (-1+1j), (1+1j), (-1+1j), (1+1j), (-1-1j), (1+1j), (-1+1j), (-1+1j), (-1-1j), (1-1j), (-1-1j), (1-1j), (1+1j), (1-1j), (1+1j), (-1+1j), (1-1j), (-1+1j), (1+1j), (-1-1j), (-1+1j), (1-1j), (-1-1j), (-1-1j), (-1+1j), (1+1j), (-1+1j), (1+1j), (1-1j), (1+1j), (1-1j), (-1-1j), (1+1j), (-1-1j), (1-1j), (-1+1j), (-1-1j), (1-1j), (-1-1j), (1-1j), (1+1j), (1-1j), (1+1j), (-1+1j), (1-1j), (-1+1j), (1+1j), (-1-1j), (-1-1j), (1+1j), (-1+1j), (-1+1j), (-1+1j), (1+1j), (-1+1j), (1+1j), (1-1j), (1+1j), (1-1j), (-1-1j), (1+1j), (-1-1j), (1-1j), (-1+1j), (-1+1j), (1-1j), (-1-1j), (-1-1j), (1-1j), (-1+1j), (1+1j), (-1-1j), (-1-1j), (1+1j), (-1+1j), (-1+1j), (-1-1j), (1-1j), (-1-1j), (1-1j), (1+1j), (1-1j), (1+1j), (-1+1j), (1+1j), (-1-1j), (1-1j), (-1+1j), (-1+1j), (1-1j), (-1-1j), (-1-1j), (-1+1j), (1+1j), (-1+1j), (1+1j), (1-1j), (1+1j), (1-1j), (-1-1j)]), 16)
self.blocks_vector_source_x_0 = blocks.vector_source_c([0, sin(pi/4), 1, sin(3*pi/4)], True, 1, [])
self.blocks_sub_xx_0 = blocks.sub_ff(1)
self.blocks_repeat_0 = blocks.repeat(gr.sizeof_gr_complex*1, 4)
self.blocks_pdu_to_tagged_stream_0_0_0 = blocks.pdu_to_tagged_stream(blocks.byte_t, 'pdu_length')
self.blocks_packed_to_unpacked_xx_0 = blocks.packed_to_unpacked_bb(4, gr.GR_LSB_FIRST)
self.blocks_multiply_xx_0 = blocks.multiply_vcc(1)
self.blocks_float_to_complex_0 = blocks.float_to_complex(1)
self.blocks_delay_0 = blocks.delay(gr.sizeof_float*1, 2)
self.blocks_complex_to_float_0 = blocks.complex_to_float(1)
self.analog_quadrature_demod_cf_0 = analog.quadrature_demod_cf(1)
##################################################
# Connections
##################################################
self.msg_connect((self.ieee802_15_4_access_code_prefixer_0, 'out'), (self.blocks_pdu_to_tagged_stream_0_0_0, 'pdus'))
self.msg_connect((self.ieee802_15_4_packet_sink_0, 'out'), (self, 'rxout'))
self.msg_connect((self, 'txin'), (self.ieee802_15_4_access_code_prefixer_0, 'in'))
self.connect((self.analog_quadrature_demod_cf_0, 0), (self.blocks_sub_xx_0, 0))
self.connect((self.analog_quadrature_demod_cf_0, 0), (self.single_pole_iir_filter_xx_0, 0))
self.connect((self.blocks_complex_to_float_0, 1), (self.blocks_delay_0, 0))
self.connect((self.blocks_complex_to_float_0, 0), (self.blocks_float_to_complex_0, 0))
self.connect((self.blocks_delay_0, 0), (self.blocks_float_to_complex_0, 1))
self.connect((self.blocks_float_to_complex_0, 0), (self.foo_burst_tagger_0, 0))
self.connect((self.blocks_multiply_xx_0, 0), (self.blocks_complex_to_float_0, 0))
self.connect((self.blocks_packed_to_unpacked_xx_0, 0), (self.digital_chunks_to_symbols_xx_0, 0))
self.connect((self.blocks_pdu_to_tagged_stream_0_0_0, 0), (self.blocks_packed_to_unpacked_xx_0, 0))
self.connect((self.blocks_repeat_0, 0), (self.blocks_multiply_xx_0, 1))
self.connect((self.blocks_sub_xx_0, 0), (self.digital_clock_recovery_mm_xx_0, 0))
self.connect((self.blocks_vector_source_x_0, 0), (self.blocks_multiply_xx_0, 0))
self.connect((self.digital_chunks_to_symbols_xx_0, 0), (self.blocks_repeat_0, 0))
self.connect((self.digital_clock_recovery_mm_xx_0, 0), (self.ieee802_15_4_packet_sink_0, 0))
self.connect((self.foo_burst_tagger_0, 0), (self, 0))
self.connect((self, 0), (self.analog_quadrature_demod_cf_0, 0))
self.connect((self.single_pole_iir_filter_xx_0, 0), (self.blocks_sub_xx_0, 1))
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
| 78.232558
| 2,258
| 0.555886
| 1,188
| 6,728
| 2.895623
| 0.101852
| 0.223256
| 0.296512
| 0.444767
| 0.750581
| 0.667442
| 0.581977
| 0.462209
| 0.389535
| 0.290116
| 0
| 0.130828
| 0.14566
| 6,728
| 85
| 2,259
| 79.152941
| 0.467641
| 0.021403
| 0
| 0.032787
| 1
| 0
| 0.011658
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04918
| false
| 0
| 0.180328
| 0.016393
| 0.262295
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
87ca7d31fb401c6de897fa3864d8aef3dc2ab21b
| 2,135
|
py
|
Python
|
modules/dbnd/test_dbnd/py3only/test_decorator_spec_py3only.py
|
hugovk/dbnd
|
59cd2a63a88e3bf6022bf8a4e74e6e10b183abcd
|
[
"Apache-2.0"
] | null | null | null |
modules/dbnd/test_dbnd/py3only/test_decorator_spec_py3only.py
|
hugovk/dbnd
|
59cd2a63a88e3bf6022bf8a4e74e6e10b183abcd
|
[
"Apache-2.0"
] | null | null | null |
modules/dbnd/test_dbnd/py3only/test_decorator_spec_py3only.py
|
hugovk/dbnd
|
59cd2a63a88e3bf6022bf8a4e74e6e10b183abcd
|
[
"Apache-2.0"
] | null | null | null |
from dbnd import parameter
from dbnd._core.decorator.task_decorator_spec import build_task_decorator_spec
class TestTaskDecoratorSpecPy3(object):
def test_annotations(self):
def with_annotations(a: int, b: str, **kwargs: str) -> int:
pass
decorator_spec = build_task_decorator_spec(with_annotations, {}, parameter)
assert decorator_spec.annotations == {
"return": int,
"a": int,
"b": str,
"kwargs": str,
}
assert decorator_spec.doc_annotations == {}
def test_args_and_kwargs(self):
def args_and_kwargs(a, *args, word="default", **kwargs):
pass
decorator_spec = build_task_decorator_spec(args_and_kwargs, {}, parameter)
assert not decorator_spec.is_class
assert decorator_spec.args == ["a"]
assert decorator_spec.varargs == "args"
assert decorator_spec.varkw == "kwargs"
assert decorator_spec.defaults == {}
assert decorator_spec.kwonlyargs == ["word"]
assert decorator_spec.kwonlydefaults == {"word": "default"}
assert decorator_spec.defaults_values == ()
assert decorator_spec.name == "args_and_kwargs"
assert decorator_spec.known_keywords_names == {"a"}
def test_args_and_kwargs_and_decorator_kwarg(self):
def args_and_kwargs(a, *args, word="default", **kwargs):
pass
decorator_spec = build_task_decorator_spec(
args_and_kwargs, {"decorator": 1}, parameter
)
assert not decorator_spec.is_class
assert decorator_spec.args == ["a"]
assert decorator_spec.varargs == "args"
assert decorator_spec.varkw == "kwargs"
assert decorator_spec.defaults == {}
assert decorator_spec.decorator_kwargs == {"decorator": 1}
assert decorator_spec.kwonlyargs == ["word"]
assert decorator_spec.kwonlydefaults == {"word": "default"}
assert decorator_spec.defaults_values == ()
assert decorator_spec.name == "args_and_kwargs"
assert decorator_spec.known_keywords_names == {"a", "decorator"}
| 38.818182
| 83
| 0.648712
| 235
| 2,135
| 5.574468
| 0.187234
| 0.307634
| 0.30458
| 0.067176
| 0.752672
| 0.722137
| 0.696183
| 0.666412
| 0.666412
| 0.666412
| 0
| 0.001862
| 0.245433
| 2,135
| 54
| 84
| 39.537037
| 0.811297
| 0
| 0
| 0.511111
| 0
| 0
| 0.065105
| 0
| 0
| 0
| 0
| 0
| 0.511111
| 1
| 0.133333
| false
| 0.066667
| 0.044444
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
35527d26b2161b72c878f119f1d4a925d6428597
| 196
|
py
|
Python
|
actforce/apps/base/admin.py
|
neworganizing/actforce
|
a47ac6f85ea3ff521d7b27731904fce07c34f450
|
[
"Apache-2.0"
] | 1
|
2017-10-11T11:41:31.000Z
|
2017-10-11T11:41:31.000Z
|
actforce/apps/base/admin.py
|
neworganizing/actforce
|
a47ac6f85ea3ff521d7b27731904fce07c34f450
|
[
"Apache-2.0"
] | null | null | null |
actforce/apps/base/admin.py
|
neworganizing/actforce
|
a47ac6f85ea3ff521d7b27731904fce07c34f450
|
[
"Apache-2.0"
] | null | null | null |
from django.contrib import admin
from actforce.apps.base.models import *
class SalesforceAccountAdmin(admin.ModelAdmin):
pass
admin.site.register(SalesforceAccount, SalesforceAccountAdmin)
| 21.777778
| 62
| 0.826531
| 21
| 196
| 7.714286
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 196
| 8
| 63
| 24.5
| 0.920455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
355cbda748dad0847bce0ee4e2431c101c409e4c
| 5,057
|
py
|
Python
|
tests/tree/test_tree_innerproduct_derivs.py
|
bluetyson/discretize
|
a4ead91d6a1f84658ab20946da5fa86dc9ccc831
|
[
"MIT"
] | null | null | null |
tests/tree/test_tree_innerproduct_derivs.py
|
bluetyson/discretize
|
a4ead91d6a1f84658ab20946da5fa86dc9ccc831
|
[
"MIT"
] | null | null | null |
tests/tree/test_tree_innerproduct_derivs.py
|
bluetyson/discretize
|
a4ead91d6a1f84658ab20946da5fa86dc9ccc831
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import numpy as np
import unittest
import discretize
def doTestFace(h, rep, fast, meshType, invProp=False, invMat=False):
if meshType == 'Curv':
hRect = discretize.utils.exampleLrmGrid(h, 'rotate')
mesh = discretize.CurvilinearMesh(hRect)
elif meshType == 'Tree':
mesh = discretize.TreeMesh(h, levels=3)
mesh.refine(lambda xc: 3)
elif meshType == 'Tensor':
mesh = discretize.TensorMesh(h)
v = np.random.rand(mesh.nF)
sig = np.random.rand(1) if rep is 0 else np.random.rand(mesh.nC*rep)
def fun(sig):
M = mesh.getFaceInnerProduct(sig, invProp=invProp, invMat=invMat)
Md = mesh.getFaceInnerProductDeriv(sig, invProp=invProp, invMat=invMat, doFast=fast)
return M*v, Md(v)
print(meshType, 'Face', h, rep, fast, ('harmonic' if invProp and invMat else 'standard'))
return discretize.Tests.checkDerivative(fun, sig, num=5, plotIt=False)
def doTestEdge(h, rep, fast, meshType, invProp=False, invMat=False):
if meshType == 'Curv':
hRect = discretize.utils.exampleLrmGrid(h,'rotate')
mesh = discretize.CurvilinearMesh(hRect)
elif meshType == 'Tree':
mesh = discretize.TreeMesh(h, levels=3)
mesh.refine(lambda xc: 3)
elif meshType == 'Tensor':
mesh = discretize.TensorMesh(h)
v = np.random.rand(mesh.nE)
sig = np.random.rand(1) if rep is 0 else np.random.rand(mesh.nC*rep)
def fun(sig):
M = mesh.getEdgeInnerProduct(sig, invProp=invProp, invMat=invMat)
Md = mesh.getEdgeInnerProductDeriv(sig, invProp=invProp, invMat=invMat, doFast=fast)
return M*v, Md(v)
print(meshType, 'Edge', h, rep, fast, ('harmonic' if invProp and invMat else 'standard'))
return discretize.Tests.checkDerivative(fun, sig, num=5, plotIt=False)
class TestInnerProductsDerivsTensor(unittest.TestCase):
def test_FaceIP_2D_float_Tree(self):
self.assertTrue(doTestFace([8, 8], 0, False, 'Tree'))
def test_FaceIP_3D_float_Tree(self):
self.assertTrue(doTestFace([8, 8, 8], 0, False, 'Tree'))
def test_FaceIP_2D_isotropic_Tree(self):
self.assertTrue(doTestFace([8, 8], 1, False, 'Tree'))
def test_FaceIP_3D_isotropic_Tree(self):
self.assertTrue(doTestFace([8, 8, 8], 1, False, 'Tree'))
def test_FaceIP_2D_anisotropic_Tree(self):
self.assertTrue(doTestFace([8, 8], 2, False, 'Tree'))
def test_FaceIP_3D_anisotropic_Tree(self):
self.assertTrue(doTestFace([8, 8, 8], 3, False, 'Tree'))
def test_FaceIP_2D_tensor_Tree(self):
self.assertTrue(doTestFace([8, 8], 3, False, 'Tree'))
def test_FaceIP_3D_tensor_Tree(self):
self.assertTrue(doTestFace([8, 8, 8], 6, False, 'Tree'))
def test_FaceIP_2D_float_fast_Tree(self):
self.assertTrue(doTestFace([8, 8], 0, True, 'Tree'))
def test_FaceIP_3D_float_fast_Tree(self):
self.assertTrue(doTestFace([8, 8, 8], 0, True, 'Tree'))
def test_FaceIP_2D_isotropic_fast_Tree(self):
self.assertTrue(doTestFace([8, 8], 1, True, 'Tree'))
def test_FaceIP_3D_isotropic_fast_Tree(self):
self.assertTrue(doTestFace([8, 8, 8], 1, True, 'Tree'))
def test_FaceIP_2D_anisotropic_fast_Tree(self):
self.assertTrue(doTestFace([8, 8], 2, True, 'Tree'))
def test_FaceIP_3D_anisotropic_fast_Tree(self):
self.assertTrue(doTestFace([8, 8, 8], 3, True, 'Tree'))
# def test_EdgeIP_2D_float_Tree(self):
# self.assertTrue(doTestEdge([8, 8], 0, False, 'Tree'))
def test_EdgeIP_3D_float_Tree(self):
self.assertTrue(doTestEdge([8, 8, 8], 0, False, 'Tree'))
# def test_EdgeIP_2D_isotropic_Tree(self):
# self.assertTrue(doTestEdge([8, 8], 1, False, 'Tree'))
def test_EdgeIP_3D_isotropic_Tree(self):
self.assertTrue(doTestEdge([8, 8, 8], 1, False, 'Tree'))
# def test_EdgeIP_2D_anisotropic_Tree(self):
# self.assertTrue(doTestEdge([8, 8], 2, False, 'Tree'))
def test_EdgeIP_3D_anisotropic_Tree(self):
self.assertTrue(doTestEdge([8, 8, 8], 3, False, 'Tree'))
# def test_EdgeIP_2D_tensor_Tree(self):
# self.assertTrue(doTestEdge([8, 8], 3, False, 'Tree'))
def test_EdgeIP_3D_tensor_Tree(self):
self.assertTrue(doTestEdge([8, 8, 8], 6, False, 'Tree'))
# def test_EdgeIP_2D_float_fast_Tree(self):
# self.assertTrue(doTestEdge([8, 8], 0, True, 'Tree'))
def test_EdgeIP_3D_float_fast_Tree(self):
self.assertTrue(doTestEdge([8, 8, 8], 0, True, 'Tree'))
# def test_EdgeIP_2D_isotropic_fast_Tree(self):
# self.assertTrue(doTestEdge([8, 8], 1, True, 'Tree'))
def test_EdgeIP_3D_isotropic_fast_Tree(self):
self.assertTrue(doTestEdge([8, 8, 8], 1, True, 'Tree'))
# def test_EdgeIP_2D_anisotropic_fast_Tree(self):
# self.assertTrue(doTestEdge([8, 8], 2, True, 'Tree'))
def test_EdgeIP_3D_anisotropic_fast_Tree(self):
self.assertTrue(doTestEdge([8, 8, 8], 3, True, 'Tree'))
if __name__ == '__main__':
unittest.main()
| 39.507813
| 93
| 0.66581
| 698
| 5,057
| 4.627507
| 0.124642
| 0.026006
| 0.104025
| 0.190712
| 0.916409
| 0.912384
| 0.882353
| 0.834056
| 0.541176
| 0.298452
| 0
| 0.03326
| 0.191418
| 5,057
| 127
| 94
| 39.818898
| 0.756664
| 0.137829
| 0
| 0.289157
| 0
| 0
| 0.039586
| 0
| 0
| 0
| 0
| 0
| 0.253012
| 1
| 0.301205
| false
| 0
| 0.048193
| 0
| 0.409639
| 0.036145
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ea08d992facb33b6d33648dc0311eb8d423ff834
| 5,585
|
py
|
Python
|
bootstrap/p1.4/src/operations/openshift.py
|
idzikovskyi/df-on-k8s
|
80e8047293882199d72441932209dc34c7c7d6e3
|
[
"Apache-2.0"
] | 4
|
2021-04-06T01:29:12.000Z
|
2022-02-06T16:59:20.000Z
|
bootstrap/p1.4/src/operations/openshift.py
|
idzikovskyi/df-on-k8s
|
80e8047293882199d72441932209dc34c7c7d6e3
|
[
"Apache-2.0"
] | 1
|
2022-02-28T22:13:04.000Z
|
2022-02-28T22:13:04.000Z
|
bootstrap/p1.4/src/operations/openshift.py
|
idzikovskyi/df-on-k8s
|
80e8047293882199d72441932209dc34c7c7d6e3
|
[
"Apache-2.0"
] | 4
|
2021-04-27T17:07:27.000Z
|
2022-03-08T17:39:08.000Z
|
from common.mapr_exceptions.ex import NotFoundException
from operations.operationsbase import OperationsBase
class OpenShift(OperationsBase):
def __init__(self):
super(OpenShift, self).__init__()
def switch_to_oc(self):
self.is_openshift = True
def is_openshift_connected(self):
"""
This command runs the `oc status` and return if Openshift is connected
:return: True/False
"""
cmd = "oc status"
response = self._run_and_return_response(cmd, False)
if response is None:
return False
elif "warnings" in response.lower():
return False
return True
def run_oc_apply(self, key):
yaml_file, changed = self.get_yaml(key)
if yaml_file is None:
raise NotFoundException("The key '{0}' does not have an entry in the yamls dictionary".format(key))
cmd = "{0} {1}".format(OperationsBase.OC_APPLY, yaml_file)
result = self._run(cmd)
if changed:
self.delete_temp_yaml(yaml_file)
return result
def run_oc_delete(self, key):
yaml_file, changed = self.get_yaml(key)
if yaml_file is None:
raise NotFoundException("The key '{0}' does not have an entry in the yamls dictionary".format(key))
cmd = "{0} {1}".format(OperationsBase.OC_DELETE, yaml_file)
result = self._run(cmd)
if changed:
self.delete_temp_yaml(yaml_file)
return result
def nodesvc_openshift_policy_add(self):
cmd = 'oc adm policy add-cluster-role-to-user hpe-nodesvc ' \
' system:serviceaccount:hpe-nodesvc:hpe-nodesvc'
self._run(cmd)
def nodesvc_openshift_policy_remove(self):
cmd = 'oc adm policy remove-cluster-role-from-user hpe-nodesvc ' \
' system:serviceaccount:hpe-nodesvc:hpe-nodesvc'
self._run(cmd)
def csi_openshift_policy_add(self):
cmd = 'oc adm policy add-cluster-role-to-user hpe-csi-nodeplugin ' \
' system:serviceaccount:hpe-csi:hpe-csi-nodeplugin'
self._run(cmd)
cmd = 'oc adm policy add-cluster-role-to-user hpe-csi-attacher ' \
' system:serviceaccount:hpe-csi:hpe-csi-provisioner'
self._run(cmd)
cmd = 'oc adm policy add-cluster-role-to-user hpe-csi-provisioner ' \
' system:serviceaccount:hpe-csi:hpe-csi-provisioner'
self._run(cmd)
def csi_openshift_policy_remove(self):
cmd = 'oc adm policy remove-cluster-role-from-user hpe-csi-nodeplugin ' \
' system:serviceaccount:hpe-csi:hpe-csi-nodeplugin'
self._run(cmd)
cmd = 'oc adm policy remove-cluster-role-from-user hpe-csi-attacher ' \
' system:serviceaccount:hpe-csi:hpe-csi-provisioner'
self._run(cmd)
cmd = 'oc adm policy remove-cluster-role-from-user hpe-csi-provisioner ' \
' system:serviceaccount:hpe-csi:hpe-csi-provisioner'
self._run(cmd)
def drill_openshift_policy_add(self):
cmd = 'oc adm policy add-cluster-role-to-user hpe-drilloperator ' \
' system:serviceaccount:drill-operator:hpe-drilloperator'
self._run(cmd)
def drill_openshift_policy_remove(self):
cmd = 'oc adm policy remove-cluster-role-from-user hpe-drilloperator ' \
' system:serviceaccount:drill-operator:hpe-drilloperator'
self._run(cmd)
def ingress_openshift_policy_add(self):
cmd = 'oc adm policy add-cluster-role-to-user hpe-ingress ' \
' system:serviceaccount:hpe-ingress:hpe-ingress'
self._run(cmd)
def ingress_openshift_policy_remove(self):
cmd = 'oc adm policy remove-cluster-role-from-user hpe-ingress ' \
' system:serviceaccount:hpe-ingress:hpe-ingress'
self._run(cmd)
def spark_openshift_policy_add(self):
cmd = 'oc adm policy add-cluster-role-to-user hpe-sparkoperator ' \
' system:serviceaccount:spark-operator:hpe-sparkoperator'
self._run(cmd)
def spark_openshift_policy_remove(self):
cmd = 'oc adm policy remove-cluster-role-from-user hpe-sparkoperator ' \
' system:serviceaccount:spark-operator:hpe-sparkoperator'
self._run(cmd)
def dataplatform_openshift_policy_add(self):
cmd = 'oc adm policy add-cluster-role-to-user hpe-dataplatformoperator ' \
' system:serviceaccount:hpe-system:hpe-dataplatformoperator'
self._run(cmd)
def dataplatform_openshift_policy_remove(self):
cmd = 'oc adm policy remove-cluster-role-from-user hpe-dataplatformoperator ' \
' system:serviceaccount:hpe-system:hpe-dataplatformoperator'
self._run(cmd)
def tenant_openshift_policy_add(self):
cmd = 'oc adm policy add-cluster-role-to-user hpe-tenantoperator ' \
' system:serviceaccount:hpe-system:hpe-tenantoperator'
self._run(cmd)
def tenant_openshift_policy_remove(self):
cmd = 'oc adm policy remove-cluster-role-from-user hpe-tenantoperator ' \
' system:serviceaccount:hpe-system:hpe-tenantoperator'
self._run(cmd)
def ui_openshift_policy_add(self):
cmd = 'oc adm policy add-cluster-role-to-user hpe-maprui ' \
' system:serviceaccount:hpe-ui:hpe-maprui'
self._run(cmd)
def ui_openshift_policy_remove(self):
cmd = 'oc adm policy remove-cluster-role-from-user hpe-maprui ' \
' system:serviceaccount:hpe-ui:hpe-maprui'
self._run(cmd)
| 38.253425
| 111
| 0.651209
| 699
| 5,585
| 5.048641
| 0.121602
| 0.045622
| 0.062341
| 0.079343
| 0.867385
| 0.867385
| 0.867385
| 0.843865
| 0.843865
| 0.843865
| 0
| 0.001426
| 0.246553
| 5,585
| 145
| 112
| 38.517241
| 0.837215
| 0.016115
| 0
| 0.513761
| 0
| 0
| 0.42559
| 0.285557
| 0
| 0
| 0
| 0
| 0
| 1
| 0.192661
| false
| 0
| 0.018349
| 0
| 0.266055
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
ea2f08894436ab8f99c851c58331b328284e8c82
| 91
|
py
|
Python
|
torch_itl/__init__.py
|
mathurinm/torch_itl
|
e3d92d753bd51ccf585029129110c93bbf9b5fd0
|
[
"MIT"
] | null | null | null |
torch_itl/__init__.py
|
mathurinm/torch_itl
|
e3d92d753bd51ccf585029129110c93bbf9b5fd0
|
[
"MIT"
] | null | null | null |
torch_itl/__init__.py
|
mathurinm/torch_itl
|
e3d92d753bd51ccf585029129110c93bbf9b5fd0
|
[
"MIT"
] | null | null | null |
from .kernel import *
from .model import *
from .sampler import *
from .estimator import *
| 18.2
| 24
| 0.736264
| 12
| 91
| 5.583333
| 0.5
| 0.447761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175824
| 91
| 4
| 25
| 22.75
| 0.893333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
575d0e7cd7b90b25ff7855dd4052ddadef11eb4b
| 108
|
py
|
Python
|
Math/10430_나머지/10430_나머지.py
|
7dudtj/BOJ_myCode
|
37d105590a7963e2232102b3098fea3c3504b96f
|
[
"MIT"
] | 1
|
2022-03-30T15:50:47.000Z
|
2022-03-30T15:50:47.000Z
|
Math/10430_나머지/10430_나머지.py
|
7dudtj/BOJ_myCode
|
37d105590a7963e2232102b3098fea3c3504b96f
|
[
"MIT"
] | null | null | null |
Math/10430_나머지/10430_나머지.py
|
7dudtj/BOJ_myCode
|
37d105590a7963e2232102b3098fea3c3504b96f
|
[
"MIT"
] | 1
|
2021-07-20T07:11:06.000Z
|
2021-07-20T07:11:06.000Z
|
A, B, C = map(int, input().split())
print((A+B)%C)
print((A%C+B%C)%C)
print((A*B)%C)
print(((A%C)*(B%C))%C)
| 18
| 35
| 0.509259
| 27
| 108
| 2.037037
| 0.296296
| 0.181818
| 0.163636
| 0.290909
| 0.654545
| 0.654545
| 0.654545
| 0.654545
| 0.654545
| 0.654545
| 0
| 0
| 0.092593
| 108
| 5
| 36
| 21.6
| 0.561224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.8
| 1
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
57855e342b32f293d32f270f0dbffb8029369ba8
| 2,128
|
py
|
Python
|
civisml_deploy/tests/test_analysis_logs.py
|
civisanalytics/model-deployment
|
27baa862bc46d009dff58ce13e31cdf971f618b8
|
[
"BSD-3-Clause"
] | 3
|
2018-11-15T14:57:01.000Z
|
2020-09-26T21:49:06.000Z
|
civisml_deploy/tests/test_analysis_logs.py
|
civisanalytics/model-deployment
|
27baa862bc46d009dff58ce13e31cdf971f618b8
|
[
"BSD-3-Clause"
] | 6
|
2018-05-01T17:43:47.000Z
|
2019-08-20T16:53:10.000Z
|
civisml_deploy/tests/test_analysis_logs.py
|
civisanalytics/model-deployment
|
27baa862bc46d009dff58ce13e31cdf971f618b8
|
[
"BSD-3-Clause"
] | 3
|
2018-04-30T19:08:58.000Z
|
2019-11-01T13:52:19.000Z
|
import os
import tempfile
from unittest.mock import Mock, patch
import pandas as pd
import pytest
from civisml_deploy import utils
from civisml_deploy.tests.conftest import reg_app, clf_app
def reg_log_df():
return pd.DataFrame({
0: [0, 10],
1: [10, 0],
2: [-9.5, 10.5],
3: [100, 200],
4: [123, 123]
})
def clf_log_df():
return pd.DataFrame({
0: [0, 10],
1: [10, 0],
2: [1., 0.],
3: [0., 1.],
4: [100, 200],
5: [123, 123]
})
@pytest.mark.parametrize("app, ref_df", [(reg_app(), reg_log_df()),
(clf_app(), clf_log_df())])
@patch.object(utils, 'datetime', autospec=True)
def test_analytic_logs(mock_dt, app, ref_df):
utils.CIVIS_SERVICE_VERSION = '123'
with tempfile.NamedTemporaryFile() as tfile:
with patch.dict(utils.os.environ, {'LOGPATH': tfile.name}):
utils.setup_logs()
mock_utc = Mock()
mock_utc.strftime.side_effect = ['100', '200']
mock_dt.utcnow.return_value = mock_utc
app.get('/predict?x0=0&x1=10')
app.get('/predict?x0=10&x1=0')
test_df = pd.read_csv(os.environ.get('LOGPATH'), header=None)
pd.testing.assert_frame_equal(test_df, ref_df)
@pytest.mark.parametrize("app, ref_df", [(reg_app(), reg_log_df()),
(clf_app(), clf_log_df())])
@patch.object(utils, 'datetime', autospec=True)
def test_analytic_logs_no_svc_version(mock_dt, app, ref_df):
utils.CIVIS_SERVICE_VERSION = None
with tempfile.NamedTemporaryFile() as tfile:
with patch.dict(utils.os.environ, {'LOGPATH': tfile.name}):
utils.setup_logs()
mock_utc = Mock()
mock_utc.strftime.side_effect = ['100', '200']
mock_dt.utcnow.return_value = mock_utc
app.get('/predict?x0=0&x1=10')
app.get('/predict?x0=10&x1=0')
test_df = pd.read_csv(os.environ.get('LOGPATH'), header=None)
pd.testing.assert_frame_equal(test_df, ref_df.iloc[:, :-1])
| 29.971831
| 73
| 0.577068
| 293
| 2,128
| 3.976109
| 0.266212
| 0.025751
| 0.027468
| 0.051502
| 0.801717
| 0.801717
| 0.801717
| 0.801717
| 0.801717
| 0.736481
| 0
| 0.059432
| 0.272556
| 2,128
| 70
| 74
| 30.4
| 0.693152
| 0
| 0
| 0.592593
| 0
| 0
| 0.073778
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 1
| 0.074074
| false
| 0
| 0.12963
| 0.037037
| 0.240741
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
57b0cffda71887719145add5e428d99d80bfca75
| 142
|
py
|
Python
|
chirun/plastex/amscd/__init__.py
|
sthagen/chirun-ncl-chirun
|
45897319d5203b9867b5d6e00b2db1aa90a6580c
|
[
"Apache-2.0"
] | 5
|
2021-12-06T15:57:24.000Z
|
2022-01-24T20:34:00.000Z
|
chirun/plastex/amscd/__init__.py
|
sthagen/chirun-ncl-chirun
|
45897319d5203b9867b5d6e00b2db1aa90a6580c
|
[
"Apache-2.0"
] | 38
|
2021-12-09T13:16:46.000Z
|
2022-03-30T11:42:13.000Z
|
chirun/plastex/amscd/__init__.py
|
sthagen/chirun-ncl-chirun
|
45897319d5203b9867b5d6e00b2db1aa90a6580c
|
[
"Apache-2.0"
] | 1
|
2022-01-17T17:41:35.000Z
|
2022-01-17T17:41:35.000Z
|
"""
Implement the amscd package using the imager
"""
from chirun.plastex import VerbatimEnvironment
class CD(VerbatimEnvironment):
pass
| 15.777778
| 46
| 0.774648
| 16
| 142
| 6.875
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15493
| 142
| 8
| 47
| 17.75
| 0.916667
| 0.309859
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
57e895d1e8f081c410a3e176fa38b47fe5b16a12
| 75
|
py
|
Python
|
app_functions.py
|
launchlabsdev/MapUI_Base
|
5c2660623c9fc9b92a98d8468bd27bc1be2afbf3
|
[
"BSD-3-Clause"
] | 20
|
2020-08-19T23:27:01.000Z
|
2022-02-03T12:02:17.000Z
|
app_functions.py
|
launchlabsdev/MapUI_Base
|
5c2660623c9fc9b92a98d8468bd27bc1be2afbf3
|
[
"BSD-3-Clause"
] | 1
|
2021-04-10T18:06:05.000Z
|
2021-04-10T18:06:05.000Z
|
app_functions.py
|
launchlabsdev/MapUI_Base
|
5c2660623c9fc9b92a98d8468bd27bc1be2afbf3
|
[
"BSD-3-Clause"
] | 2
|
2020-12-03T19:35:36.000Z
|
2021-11-10T14:58:39.000Z
|
## ==> GUI FILE
from main import *
class Functions(MainWindow):
pass
| 10.714286
| 28
| 0.653333
| 9
| 75
| 5.444444
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.226667
| 75
| 6
| 29
| 12.5
| 0.844828
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
17a9f9dafbc71c1f9a8b2a46698229d0857d0584
| 29
|
py
|
Python
|
Code/odooerp/odoo-8.0/openerp/addons/stock_dropshipping/tests/__init__.py
|
zhupangithub/WEBERP
|
714512082ec5c6db07cbf6af0238ceefe2d2c1a5
|
[
"MIT"
] | 1
|
2019-12-29T11:53:56.000Z
|
2019-12-29T11:53:56.000Z
|
odoo/addons/stock_dropshipping/tests/__init__.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | null | null | null |
odoo/addons/stock_dropshipping/tests/__init__.py
|
tuanquanghpvn/odoo8-tutorial
|
52d25f1ca5f233c431cb9d3b24b79c3b4fb5127e
|
[
"MIT"
] | 3
|
2020-10-08T14:42:10.000Z
|
2022-01-28T14:12:29.000Z
|
from . import test_invoicing
| 14.5
| 28
| 0.827586
| 4
| 29
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.92
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
17b15fff765643f31b6b5586fb9deb5ce6278429
| 89
|
py
|
Python
|
lib/ua/agents/trident.py
|
hdknr/ua
|
bc41f5b46fb99d0d576c7542c2184b39679f4ebf
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
lib/ua/agents/trident.py
|
hdknr/ua
|
bc41f5b46fb99d0d576c7542c2184b39679f4ebf
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
lib/ua/agents/trident.py
|
hdknr/ua
|
bc41f5b46fb99d0d576c7542c2184b39679f4ebf
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
''' Internet Expolorer 11
'''
from . import BaseAgent
class Agent(BaseAgent):
pass
| 11.125
| 25
| 0.685393
| 10
| 89
| 6.1
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028169
| 0.202247
| 89
| 7
| 26
| 12.714286
| 0.830986
| 0.235955
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
17c3ba4ae65c1914678cb1a5512fdf88f05c3577
| 11,135
|
py
|
Python
|
tests/app/templating/summary/test_question.py
|
qateam123/eq
|
704757952323647d659c49a71975c56406ff4047
|
[
"MIT"
] | null | null | null |
tests/app/templating/summary/test_question.py
|
qateam123/eq
|
704757952323647d659c49a71975c56406ff4047
|
[
"MIT"
] | 8
|
2020-03-24T15:24:18.000Z
|
2022-03-02T04:32:56.000Z
|
tests/app/templating/summary/test_question.py
|
qateam123/eq
|
704757952323647d659c49a71975c56406ff4047
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
import mock
from app.templating.summary.question import Question
class TestQuestion(TestCase):
def test_create_question(self):
# Given
answers = mock.MagicMock()
answer_schema = mock.MagicMock()
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
question = Question(question_schema, answers)
# Then
self.assertEqual(question.id, 'question_id')
self.assertEqual(question.title, 'question_title')
self.assertEqual(len(question.answers), 1)
def test_create_question_with_no_answers(self):
# Given
answers = {}
answer_schema = mock.MagicMock()
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
question = Question(question_schema, answers)
# Then
self.assertEqual(question.id, 'question_id')
self.assertEqual(question.title, 'question_title')
self.assertEqual(len(question.answers), 1)
def test_create_question_with_multiple_answers(self):
# Given
answers = {'answer_1': 'Han',
'answer_2': 'Solo'}
first_answer_schema = {'id': 'answer_1', 'label': 'First name', 'type': 'text'}
second_answer_schema = {'id': 'answer_2', 'label': 'Surname', 'type': 'text'}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [first_answer_schema, second_answer_schema]}
# When
question = Question(question_schema, answers)
# Then
self.assertEqual(len(question.answers), 2)
self.assertEqual(question.answers[0].value, 'Han')
self.assertEqual(question.answers[1].value, 'Solo')
def test_merge_date_range_answers(self):
# Given
answers = {'answer_1': '13/02/2016',
'answer_2': '13/09/2016'}
first_date_answer_schema = {'id': 'answer_1', 'label': 'From', 'type': 'date'}
second_date_answer_schema = {'id': 'answer_2', 'label': 'To', 'type': 'date'}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'DateRange', 'answers': [first_date_answer_schema, second_date_answer_schema]}
# When
question = Question(question_schema, answers)
# Then
self.assertEqual(len(question.answers), 1)
self.assertEqual(question.answers[0].value['from'], '13/02/2016')
self.assertEqual(question.answers[0].value['to'], '13/09/2016', '%d/%m/%Y')
def test_merge_multiple_date_range_answers(self):
# Given
answers = {'answer_1': '13/02/2016',
'answer_2': '13/09/2016',
'answer_3': '13/03/2016',
'answer_4': '13/10/2016'}
first_date_answer_schema = {'id': 'answer_1', 'label': 'From', 'type': 'date'}
second_date_answer_schema = {'id': 'answer_2', 'label': 'To', 'type': 'date'}
third_date_answer_schema = {'id': 'answer_3', 'label': 'First period', 'type': 'date'}
fourth_date_answer_schema = {'id': 'answer_4', 'label': 'Second period', 'type': 'date'}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'DateRange', 'answers':
[first_date_answer_schema, second_date_answer_schema, third_date_answer_schema, fourth_date_answer_schema]}
# When
question = Question(question_schema, answers)
# Then
self.assertEqual(len(question.answers), 2)
self.assertEqual(question.answers[0].value['from'], '13/02/2016')
self.assertEqual(question.answers[0].value['to'], '13/09/2016', '%d/%m/%Y')
self.assertEqual(question.answers[1].value['from'], '13/03/2016', '%d/%m/%Y')
self.assertEqual(question.answers[1].value['to'], '13/10/2016', '%d/%m/%Y')
def test_checkbox_button_options(self):
# Given
answers = {'answer_1': ['Light Side', 'Dark Side']}
options = [{
'label': 'Light Side',
'value': 'Light Side',
},
{
'label': 'Dark Side',
'value': 'Dark Side',
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Checkbox', 'options': options}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
question = Question(question_schema, answers)
# Then
self.assertEqual(len(question.answers[0].value), 2)
self.assertEqual(question.answers[0].value[0], 'Light Side')
self.assertEqual(question.answers[0].value[1], 'Dark Side')
def test_checkbox_button_other_option_empty(self):
# Given
answers = {'answer_1': ['other', '']}
options = [{
'label': 'Light Side',
'value': 'Light Side',
},
{
'label': 'Other option label',
'value': 'other',
"other": {
"label": "Please specify other"
}
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Checkbox', 'options': options}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
question = Question(question_schema, answers)
# Then
self.assertEqual(len(question.answers[0].value), 1)
self.assertEqual(question.answers[0].value[0], 'Other option label')
def test_checkbox_button_other_option_text(self):
# Given
answers = {'answer_1': ['Light Side', 'other', 'Neither']}
options = [{
'label': 'Light Side',
'value': 'Light Side',
},
{
'label': 'Other',
'value': 'other',
"other": {
"label": "Please specify other"
}
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Checkbox', 'options': options}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
question = Question(question_schema, answers)
# Then
self.assertEqual(len(question.answers[0].value), 2)
self.assertEqual(question.answers[0].value[0], 'Light Side')
self.assertEqual(question.answers[0].value[1], 'Neither')
def test_checkbox_button_none_selected_should_be_none(self):
# Given
answers = {'answer_1': []}
options = [{
'label': 'Light Side',
'value': 'Light Side',
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Checkbox', 'options': options}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
question = Question(question_schema, answers)
# Then
self.assertEqual(question.answers[0].value, None)
def test_radio_button_other_option_empty(self):
# Given
answers = {'answer_1': ''}
options = [{
'label': 'Light Side',
'value': 'Light Side',
},
{
'label': 'Other option label',
'value': 'other',
"other": {
"label": "Please specify other"
}
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Radio', 'options': options}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
question = Question(question_schema, answers)
# Then
self.assertEqual(question.answers[0].value, 'Other option label')
def test_radio_button_other_option_text(self):
# Given
answers = {'answer_1': 'I want to be on the dark side'}
options = [{
'label': 'Light Side',
'value': 'Light Side',
},
{
'label': 'Other option label',
'value': 'other',
"other": {
"label": "Please specify other"
}
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Radio', 'options': options}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
question = Question(question_schema, answers)
# Then
self.assertEqual(question.answers[0].value, 'I want to be on the dark side')
def test_radio_button_none_selected_should_be_none(self):
# Given
answers = {'answer_1': None}
options = [{
'label': 'Light Side',
'value': 'Light Side',
}]
answer_schema = {'id': 'answer_1', 'label': 'Which side?', 'type': 'Radio', 'options': options}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema]}
# When
question = Question(question_schema, answers)
# Then
self.assertEqual(question.answers[0].value, None)
def test_question_should_be_skipped(self):
# Given
answers = {'answer_1': 'skip me'}
answer_schema = {'id': 'answer_1', 'title': '', 'type': '', 'label': ''}
skip_condition = {'when': [{'id': 'answer_1', 'condition': 'equals', 'value': 'skip me'}]}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema],
'skip_condition': skip_condition}
# When
question = Question(question_schema, answers)
# Then
self.assertTrue(question.is_skipped(answers))
def test_question_with_no_answers_should_not_be_skipped(self):
# Given
answers = {}
answer_schema = {'id': 'answer_1', 'title': '', 'type': '', 'label': ''}
skip_condition = {'when': [{'id': 'answer_1', 'condition': 'equals', 'value': 'skip me'}]}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'GENERAL', 'answers': [answer_schema],
'skip_condition': skip_condition}
# When
question = Question(question_schema, answers)
# Then
self.assertFalse(question.is_skipped(answers))
def test_build_answers_repeating_answers(self):
# Given
answers = {
'answer': 'value',
'answer_1': 'value1',
'answer_2': 'value2',
}
answer_schema = {'id': 'answer', 'title': '', 'type': '', 'label': ''}
question_schema = {'id': 'question_id', 'title': 'question_title', 'type': 'RepeatingAnswer',
'answers': [answer_schema]}
# When
question = Question(question_schema, answers)
# Then
self.assertEqual(len(question.answers), 3)
| 38.933566
| 161
| 0.566233
| 1,166
| 11,135
| 5.19211
| 0.084048
| 0.079286
| 0.079782
| 0.059465
| 0.888669
| 0.8667
| 0.816485
| 0.803271
| 0.797324
| 0.756359
| 0
| 0.020849
| 0.272025
| 11,135
| 285
| 162
| 39.070175
| 0.726005
| 0.021464
| 0
| 0.581152
| 0
| 0
| 0.234747
| 0
| 0
| 0
| 0
| 0
| 0.167539
| 1
| 0.078534
| false
| 0
| 0.015707
| 0
| 0.099476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
17e2f663033f3197a1663d777499f8cad6f0eaf1
| 12,785
|
py
|
Python
|
datasets/DALIDataLoader.py
|
ShowLo/Networks
|
48f8545783966c383b6c3b600fbe37a15ea8ae3c
|
[
"MIT"
] | null | null | null |
datasets/DALIDataLoader.py
|
ShowLo/Networks
|
48f8545783966c383b6c3b600fbe37a15ea8ae3c
|
[
"MIT"
] | null | null | null |
datasets/DALIDataLoader.py
|
ShowLo/Networks
|
48f8545783966c383b6c3b600fbe37a15ea8ae3c
|
[
"MIT"
] | null | null | null |
'''
A new dataloader using NVIDIA DALI in order to speed up the dataloader in pytorch
Ref: https://github.com/d-li14/mobilenetv2.pytorch/blob/master/utils/dataloaders.py
https://github.com/NVIDIA/DALI/blob/master/docs/examples/pytorch/resnet50/main.py
'''
import os
import torch
import numpy as np
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from math import ceil
try:
from nvidia.dali.plugin.pytorch import DALIClassificationIterator
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
except ImportError:
print("Please install DALI from https://www.github.com/NVIDIA/DALI to run DataLoader.")
class TinyImageNetHybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, seed, dali_cpu=False):
super(TinyImageNetHybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root=data_dir,
shard_id=local_rank,
num_shards=world_size,
pad_last_batch=True,
random_shuffle=False,
shuffle_after_epoch=True)
# decide to work on cpu or gpu
dali_device = 'cpu' if dali_cpu else 'gpu'
decoder_device = 'cpu' if dali_cpu else 'mixed'
self.decode = ops.ImageDecoder(device=decoder_device, output_type=types.RGB)
self.res = ops.RandomResizedCrop(device=dali_device, size=crop, random_aspect_ratio=[0.75, 4./3],
random_area=[0.08, 1.0], num_attempts=100, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device='gpu',
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485*255, 0.456*255, 0.406*255],
std=[0.229*255, 0.224*255, 0.225*255])
self.coin = ops.CoinFlip(probability=0.5)
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name='Reader')
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror = rng)
return [output, self.labels]
class TinyImageNetHybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, seed):
super(TinyImageNetHybridValPipe, self).__init__(batch_size, num_threads, device_id, seed)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root=data_dir,
shard_id=local_rank,
num_shards=world_size,
pad_last_batch=True,
random_shuffle=False)
self.decode = ops.ImageDecoder(device='mixed', output_type=types.RGB)
self.cmnp = ops.CropMirrorNormalize(device='gpu',
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485*255, 0.456*255, 0.406*255],
std=[0.229*255, 0.224*255, 0.225*255])
def define_graph(self):
self.jpegs, self.labels = self.input(name='Reader')
images = self.decode(self.jpegs)
output = self.cmnp(images)
return [output, self.labels]
class ImageNetHybridTrainPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, seed, dali_cpu=False):
super(ImageNetHybridTrainPipe, self).__init__(batch_size, num_threads, device_id, seed = seed)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root=data_dir,
shard_id=local_rank,
num_shards=world_size,
pad_last_batch=True,
random_shuffle=False,
shuffle_after_epoch=True)
# decide to work on cpu or gpu
dali_device = 'cpu' if dali_cpu else 'gpu'
decoder_device = 'cpu' if dali_cpu else 'mixed'
# This padding sets the size of the internal nvJPEG buffers to be able to handle all images from full-sized ImageNet
# without additional reallocations
device_memory_padding = 211025920 if decoder_device == 'mixed' else 0
host_memory_padding = 140544512 if decoder_device == 'mixed' else 0
'''
self.decode = ops.ImageDecoderRandomCrop(device=decoder_device, output_type=types.RGB,
device_memory_padding=device_memory_padding,
host_memory_padding=host_memory_padding,
random_aspect_ratio=[0.75, 1.25],
random_area=[0.08, 1.0],
num_attempts=100)
self.res = ops.Resize(device=dali_device, resize_x=crop, resize_y=crop, interp_type=types.INTERP_TRIANGULAR)
'''
self.decode = ops.ImageDecoder(device=decoder_device, output_type=types.RGB,
device_memory_padding=device_memory_padding,
host_memory_padding=host_memory_padding,)
self.res = ops.RandomResizedCrop(device=dali_device, size=crop, random_aspect_ratio=[0.75, 4./3],
random_area=[0.08, 1.0], num_attempts=100, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device='gpu',
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485 * 255,0.456 * 255,0.406 * 255],
std=[0.229 * 255,0.224 * 255,0.225 * 255])
self.coin = ops.CoinFlip(probability=0.5)
def define_graph(self):
rng = self.coin()
self.jpegs, self.labels = self.input(name='Reader')
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images.gpu(), mirror = rng)
return [output, self.labels]
class ImageNetHybridValPipe(Pipeline):
def __init__(self, batch_size, num_threads, device_id, data_dir, crop, size, seed):
super(ImageNetHybridValPipe, self).__init__(batch_size, num_threads, device_id, seed = seed)
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
self.input = ops.FileReader(
file_root=data_dir,
shard_id=local_rank,
num_shards=world_size,
pad_last_batch=True,
random_shuffle=False)
self.decode = ops.ImageDecoder(device='mixed', output_type=types.RGB)
self.res = ops.Resize(device='gpu', resize_shorter=size, interp_type=types.INTERP_TRIANGULAR)
self.cmnp = ops.CropMirrorNormalize(device='gpu',
output_dtype=types.FLOAT,
output_layout=types.NCHW,
crop=(crop, crop),
image_type=types.RGB,
mean=[0.485*255, 0.456*255, 0.406*255],
std=[0.229*255, 0.224*255, 0.225*255])
def define_graph(self):
self.jpegs, self.labels = self.input(name='Reader')
images = self.decode(self.jpegs)
images = self.res(images)
output = self.cmnp(images)
return [output, self.labels]
class DALIWrapper(object):
def gen_wrapper(dali_pipeline):
for data in dali_pipeline:
input = data[0]['data']
target = data[0]['label'].squeeze().cuda().long()
yield input, target
def __init__(self, dali_pipeline):
self.dali_pipeline = dali_pipeline
def __iter__(self):
return DALIWrapper.gen_wrapper(self.dali_pipeline)
def get_dali_tinyImageNet_train_loader(data_path, batch_size, seed, num_threads=4, dali_cpu=False):
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
train_dir = os.path.join(data_path, 'train')
pipe = TinyImageNetHybridTrainPipe(batch_size=batch_size, num_threads=num_threads,
device_id=local_rank, data_dir=train_dir,
crop=56, seed=seed, dali_cpu=dali_cpu)
pipe.build()
train_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size('Reader') / world_size), fill_last_batch=False, last_batch_padded=True, auto_reset=True)
return DALIWrapper(train_loader), ceil(pipe.epoch_size('Reader') / (world_size*batch_size))
def get_dali_tinyImageNet_val_loader(data_path, batch_size, seed, num_threads=4):
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
val_dir = os.path.join(data_path, 'val')
pipe = TinyImageNetHybridValPipe(batch_size=batch_size, num_threads=num_threads,
device_id=local_rank, data_dir=val_dir,
crop=56, seed=seed)
pipe.build()
val_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size('Reader')/world_size), fill_last_batch=False, last_batch_padded=True, auto_reset=True)
return DALIWrapper(val_loader), ceil(pipe.epoch_size('Reader') / (world_size * batch_size))
def get_dali_imageNet_train_loader(data_path, batch_size, seed, num_threads=4, dali_cpu=False):
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
train_dir = os.path.join(data_path, 'ILSVRC2012_img_train')
pipe = ImageNetHybridTrainPipe(batch_size=batch_size, num_threads=num_threads,
device_id=local_rank, data_dir=train_dir,
crop=224, seed=seed, dali_cpu=dali_cpu)
pipe.build()
train_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size('Reader') / world_size), fill_last_batch=False, last_batch_padded=True, auto_reset=True)
return DALIWrapper(train_loader), ceil(pipe.epoch_size('Reader') / (world_size*batch_size))
def get_dali_imageNet_val_loader(data_path, batch_size, seed, num_threads=4):
if torch.distributed.is_initialized():
local_rank = torch.distributed.get_rank()
world_size = torch.distributed.get_world_size()
else:
local_rank = 0
world_size = 1
val_dir = os.path.join(data_path, 'ILSVRC2012_img_val')
pipe = ImageNetHybridValPipe(batch_size=batch_size, num_threads=num_threads,
device_id=local_rank, data_dir=val_dir,
crop=224, size=256, seed=seed)
pipe.build()
val_loader = DALIClassificationIterator(pipe, size=int(pipe.epoch_size('Reader')/world_size), fill_last_batch=False, last_batch_padded=True, auto_reset=True)
return DALIWrapper(val_loader), ceil(pipe.epoch_size('Reader') / (world_size * batch_size))
| 46.831502
| 165
| 0.596558
| 1,494
| 12,785
| 4.845382
| 0.140562
| 0.044758
| 0.041995
| 0.031496
| 0.799696
| 0.791684
| 0.780495
| 0.779666
| 0.776074
| 0.761846
| 0
| 0.031732
| 0.309816
| 12,785
| 273
| 166
| 46.831502
| 0.788645
| 0.036371
| 0
| 0.737089
| 0
| 0
| 0.022955
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070423
| false
| 0
| 0.051643
| 0.004695
| 0.187793
| 0.004695
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
aa3b8d479a51ec26db6c5e30cf682832291af0e1
| 30
|
py
|
Python
|
clts/util.py
|
clpn/clts
|
e3aeb2b590850242904b480a1442924449eb0185
|
[
"Apache-2.0"
] | null | null | null |
clts/util.py
|
clpn/clts
|
e3aeb2b590850242904b480a1442924449eb0185
|
[
"Apache-2.0"
] | 11
|
2018-01-08T14:35:50.000Z
|
2021-04-21T12:18:27.000Z
|
clts/util.py
|
clpn/clts
|
e3aeb2b590850242904b480a1442924449eb0185
|
[
"Apache-2.0"
] | 1
|
2018-01-08T14:27:27.000Z
|
2018-01-08T14:27:27.000Z
|
from markdown import markdown
| 15
| 29
| 0.866667
| 4
| 30
| 6.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 30
| 1
| 30
| 30
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
a4c3dc03507d7d5be70eeaef64278e79abe63503
| 12,870
|
py
|
Python
|
services/messenger/tests/notification_test.py
|
Counter0021/Anti-Freelancer-microservices-back-end
|
e55481e0a4353107036cd5ba664fee57e29c7597
|
[
"MIT"
] | null | null | null |
services/messenger/tests/notification_test.py
|
Counter0021/Anti-Freelancer-microservices-back-end
|
e55481e0a4353107036cd5ba664fee57e29c7597
|
[
"MIT"
] | null | null | null |
services/messenger/tests/notification_test.py
|
Counter0021/Anti-Freelancer-microservices-back-end
|
e55481e0a4353107036cd5ba664fee57e29c7597
|
[
"MIT"
] | null | null | null |
from unittest import TestCase, mock
from app.crud import message_crud, dialogue_crud, notification_crud
from app.message.schemas import GetMessage
from app.schemas import UserData
from tests import BaseTest, async_loop
class NotificationTestCase(BaseTest, TestCase):
def test_notifications(self):
async_loop(dialogue_crud.create(self.session, users_ids='1_2'))
async_loop(message_crud.create(self.session, sender_id=1, msg='Hello world!', dialogue_id=1))
async_loop(
notification_crud.create(
self.session,
sender_id=1,
recipient_id=2,
message_id=1,
)
)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 1)
async_loop(message_crud.create(self.session, sender_id=1, msg='Hello world!', dialogue_id=1))
async_loop(
notification_crud.create(
self.session,
sender_id=1,
recipient_id=2,
message_id=2,
)
)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 2)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 2)
headers = {'Authorization': 'Bearer Token'}
# Get all
with mock.patch('app.permission.permission', return_value=2) as _:
response = self.client.get(f'{self.url}/notifications/', headers=headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json(), [
{
'id': 2,
'type': 'SEND',
'data': {
**GetMessage(
**async_loop(message_crud.get(self.session, id=2)).__dict__,
).dict(),
'created_at': f'{async_loop(message_crud.get(self.session, id=2)).created_at}Z'.replace(
' ',
'T'
)
},
},
{
'id': 1, 'type': 'SEND',
'data': {
**GetMessage(
**async_loop(message_crud.get(self.session, id=1)).__dict__,
).dict(),
'created_at': f'{async_loop(message_crud.get(self.session, id=1)).created_at}Z'.replace(
' ',
'T'
)
},
}
]
)
with mock.patch('app.permission.permission', return_value=1) as _:
response = self.client.get(f'{self.url}/notifications/', headers=headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), [])
async_loop(message_crud.create(self.session, sender_id=2, msg='Hello world!', dialogue_id=1))
async_loop(
notification_crud.create(
self.session,
sender_id=2,
recipient_id=1,
message_id=3,
)
)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 1)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 3)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 3)
with mock.patch('app.permission.permission', return_value=2) as _:
response = self.client.get(f'{self.url}/notifications/', headers=headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json(), [
{
'id': 2,
'type': 'SEND',
'data': {
**GetMessage(
**async_loop(message_crud.get(self.session, id=2)).__dict__,
).dict(),
'created_at': f'{async_loop(message_crud.get(self.session, id=2)).created_at}Z'.replace(
' ',
'T'
)
},
},
{
'id': 1, 'type': 'SEND',
'data': {
**GetMessage(
**async_loop(message_crud.get(self.session, id=1)).__dict__,
).dict(),
'created_at': f'{async_loop(message_crud.get(self.session, id=1)).created_at}Z'.replace(
' ',
'T'
)
},
}
]
)
with mock.patch('app.permission.permission', return_value=1) as _:
response = self.client.get(f'{self.url}/notifications/', headers=headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json(), [
{
'id': 3,
'type': 'SEND',
'data': {
**GetMessage(
**async_loop(message_crud.get(self.session, id=3)).__dict__,
).dict(),
'created_at': f'{async_loop(message_crud.get(self.session, id=3)).created_at}Z'.replace(
' ',
'T'
)
},
}
]
)
async_loop(dialogue_crud.create(self.session, users_ids='3_2'))
async_loop(message_crud.create(self.session, sender_id=3, msg='Hello world!', dialogue_id=2))
async_loop(
notification_crud.create(
self.session,
sender_id=3,
recipient_id=2,
message_id=4,
)
)
self.assertEqual(len(async_loop(dialogue_crud.all(self.session))), 2)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 4)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 4)
with mock.patch('app.permission.permission', return_value=2) as _:
response = self.client.get(f'{self.url}/notifications/', headers=headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json(), [
{
'id': 4,
'type': 'SEND',
'data': {
**GetMessage(
**async_loop(message_crud.get(self.session, id=4)).__dict__,
).dict(),
'created_at': f'{async_loop(message_crud.get(self.session, id=4)).created_at}Z'.replace(
' ',
'T'
)
},
},
{
'id': 2,
'type': 'SEND',
'data': {
**GetMessage(
**async_loop(message_crud.get(self.session, id=2)).__dict__,
).dict(),
'created_at': f'{async_loop(message_crud.get(self.session, id=2)).created_at}Z'.replace(
' ',
'T'
)
},
},
]
)
self.assertEqual(len(async_loop(notification_crud.filter(self.session, recipient_id=2))), 3)
self.assertEqual(len(response.json()), 2)
# Get
with mock.patch('app.permission.permission', return_value=2) as _:
response = self.client.get(f'{self.url}/notifications/1', headers=headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json(), {
'id': 1, 'type': 'SEND',
'data': {
**GetMessage(
**async_loop(message_crud.get(self.session, id=1)).__dict__,
).dict(),
'created_at': f'{async_loop(message_crud.get(self.session, id=1)).created_at}Z'.replace(
' ',
'T'
)
},
}
)
with mock.patch('app.permission.permission', return_value=2) as _:
response = self.client.get(f'{self.url}/notifications/3', headers=headers)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {'detail': 'You not owner this notification'})
with mock.patch('app.permission.permission', return_value=2) as _:
response = self.client.get(f'{self.url}/notifications/143', headers=headers)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {'detail': 'Notification not found'})
# View only 1
with mock.patch('app.permission.permission', return_value=1) as _:
response = self.client.delete(f'{self.url}/notifications/1', headers=headers)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {'detail': 'You not owner this notification'})
# View all (delete)
with mock.patch('app.permission.permission', return_value=2) as _:
response = self.client.delete(f'{self.url}/notifications/', headers=headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'msg': 'Notifications has been viewed. You have more notifications'})
self.assertEqual(len(async_loop(notification_crud.filter(self.session, recipient_id=2))), 1)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 4)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 2)
self.assertEqual(async_loop(notification_crud.exist(self.session, id=4)), False)
self.assertEqual(async_loop(notification_crud.exist(self.session, id=3)), True)
self.assertEqual(async_loop(notification_crud.exist(self.session, id=2)), False)
self.assertEqual(async_loop(notification_crud.exist(self.session, id=1)), True)
with mock.patch('app.permission.permission', return_value=2) as _:
response = self.client.delete(f'{self.url}/notifications/', headers=headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json(),
{'msg': 'Notifications has been viewed. You don\'t have more notifications'}
)
self.assertEqual(len(async_loop(notification_crud.filter(self.session, recipient_id=2))), 0)
self.assertEqual(len(async_loop(message_crud.all(self.session))), 4)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 1)
self.assertEqual(async_loop(notification_crud.exist(self.session, id=4)), False)
self.assertEqual(async_loop(notification_crud.exist(self.session, id=3)), True)
self.assertEqual(async_loop(notification_crud.exist(self.session, id=2)), False)
self.assertEqual(async_loop(notification_crud.exist(self.session, id=1)), False)
# View only 1
with mock.patch('app.permission.permission', return_value=1) as _:
response = self.client.delete(f'{self.url}/notifications/3', headers=headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), {'msg': 'Notification has been viewed'})
self.assertEqual(async_loop(notification_crud.exist(self.session, id=3)), False)
self.assertEqual(len(async_loop(notification_crud.all(self.session))), 0)
with mock.patch('app.permission.permission', return_value=1) as _:
response = self.client.delete(f'{self.url}/notifications/143', headers=headers)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.json(), {'detail': 'Notification not found'})
| 46.630435
| 116
| 0.507925
| 1,262
| 12,870
| 4.998415
| 0.071315
| 0.079899
| 0.065948
| 0.082435
| 0.935954
| 0.925809
| 0.922162
| 0.921528
| 0.921528
| 0.891249
| 0
| 0.017917
| 0.371173
| 12,870
| 275
| 117
| 46.8
| 0.761522
| 0.004118
| 0
| 0.591837
| 0
| 0.004082
| 0.133245
| 0.077746
| 0
| 0
| 0
| 0
| 0.228571
| 1
| 0.004082
| false
| 0
| 0.020408
| 0
| 0.028571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1022de053dd97929061ea4fc619b870deb6b6aeb
| 10,644
|
py
|
Python
|
mmdet/apis/inference.py
|
arthur801031/3d-multi-resolution-rcnn
|
8e5454a72f8daa174bf3eabfa5964152f04ab287
|
[
"Apache-2.0"
] | 16
|
2021-03-02T07:41:01.000Z
|
2022-03-14T08:55:45.000Z
|
mmdet/apis/inference.py
|
arthur801031/3d-multi-resolution-rcnn
|
8e5454a72f8daa174bf3eabfa5964152f04ab287
|
[
"Apache-2.0"
] | 2
|
2022-01-06T20:54:13.000Z
|
2022-02-24T03:50:51.000Z
|
mmdet/apis/inference.py
|
arthur801031/3d-multi-resolution-rcnn
|
8e5454a72f8daa174bf3eabfa5964152f04ab287
|
[
"Apache-2.0"
] | 2
|
2021-05-26T19:23:35.000Z
|
2022-01-06T20:30:24.000Z
|
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
import torch
from mmdet.core import get_classes
from mmdet.datasets import to_tensor
from mmdet.datasets.transforms import ImageTransform
from PIL import Image
import cv2
def _prepare_data(img, img_transform, cfg, device):
ori_shape = img.shape
img, img_shape, pad_shape, scale_factor = img_transform(
img,
scale=cfg.data.test.img_scale,
keep_ratio=cfg.data.test.get('resize_keep_ratio', True))
img = to_tensor(img).to(device).unsqueeze(0)
img_meta = [
dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=False)
]
return dict(img=[img], img_meta=[img_meta])
def _prepare_data_3d(img_np, img_transform, cfg, device):
ori_shape = (img_np.shape[0], img_np.shape[1], 3)
total_num_slices = img_np.shape[2]
imgs = []
for cur_slice in range(total_num_slices):
img = img_np[:,:,cur_slice]
img = Image.fromarray(img).convert('RGB')
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img, img_shape, pad_shape, scale_factor = img_transform(
img,
scale=cfg.data.test.img_scale,
keep_ratio=cfg.data.test.get('resize_keep_ratio', True))
imgs.append(img)
imgs = to_tensor(np.array(imgs)).to(device).unsqueeze(0)
img_meta = [
dict(
ori_shape=ori_shape,
img_shape=(*img_shape, total_num_slices),
pad_shape=(*pad_shape, total_num_slices),
scale_factor=scale_factor,
flip=False)
]
imgs = imgs.permute(0, 2, 1, 3, 4)
assert imgs.shape[1] == 3 # make sure channel size is 3
return dict(imgs=imgs, img_meta=[img_meta])
def _prepare_data_3d_2scales(img_np, img_np_2, img_transform, cfg, device):
ori_shape = (img_np.shape[0], img_np.shape[1], 3)
ori_shape_2 = (img_np_2.shape[0], img_np_2.shape[1], 3)
total_num_slices = img_np.shape[2]
total_num_slices_2 = img_np_2.shape[2]
# first image
imgs = []
for cur_slice in range(total_num_slices):
img = img_np[:,:,cur_slice]
img = Image.fromarray(img).convert('RGB')
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img, img_shape, pad_shape, scale_factor = img_transform(
img,
scale=cfg.data.test.img_scale,
keep_ratio=cfg.data.test.get('resize_keep_ratio', True))
imgs.append(img)
imgs = to_tensor(np.array(imgs)).to(device).unsqueeze(0)
img_meta = [
dict(
ori_shape=ori_shape,
img_shape=(*img_shape, total_num_slices),
pad_shape=(*pad_shape, total_num_slices),
# scale_factor=1.0 / (img_np_2.shape[0] / img_np.shape[0]), # scale up to 1.5x
scale_factor=1.0, # scale down 1.0x
flip=False)
]
imgs = imgs.permute(0, 2, 1, 3, 4)
# second image
imgs_2 = []
for cur_slice in range(total_num_slices_2):
img = img_np_2[:,:,cur_slice]
img = Image.fromarray(img).convert('RGB')
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img, img_shape, pad_shape, scale_factor = img_transform(
img,
scale=cfg.data2_2scales.test.img_scale,
keep_ratio=cfg.data.test.get('resize_keep_ratio', True))
imgs_2.append(img)
imgs_2 = to_tensor(np.array(imgs_2)).to(device).unsqueeze(0)
img_meta_2 = [
dict(
ori_shape=ori_shape_2,
img_shape=(*img_shape, total_num_slices_2),
pad_shape=(*pad_shape, total_num_slices_2),
# scale_factor=scale_factor, # scale up to 1.5x
scale_factor=1.5, # scale down 1.0x
flip=False)
]
imgs_2 = imgs_2.permute(0, 2, 1, 3, 4)
assert imgs.shape[1] == 3 # make sure channel size is 3
assert imgs_2.shape[1] == 3
return dict(imgs=imgs, img_meta=[img_meta], imgs_2=imgs_2, img_meta_2=[img_meta_2])
def _inference_single(model, img, img_transform, cfg, device):
img = mmcv.imread(img)
data = _prepare_data(img, img_transform, cfg, device)
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
def _inference_single_3d(model, img, img_transform, cfg, device):
img_np = np.load(img)
data = _prepare_data_3d(img_np, img_transform, cfg, device)
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
def _inference_single_3d_2scales(model, img, img_2, img_transform, cfg, device):
img_np = np.load(img)
img_np_2 = np.load(img_2)
data = _prepare_data_3d_2scales(img_np, img_np_2, img_transform, cfg, device)
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result
def _inference_generator(model, imgs, img_transform, cfg, device):
for img in imgs:
yield _inference_single(model, img, img_transform, cfg, device)
def _inference_generator_3d(model, imgs, img_transform, cfg, device):
for img in imgs:
yield _inference_single_3d(model, img, img_transform, cfg, device)
def _inference_generator_3d_2scales(model, imgs, imgs_2, img_transform, cfg, device):
for img, img_2 in zip(imgs, imgs_2):
assert img.split('/')[-1] == img_2.split('/')[-1]
yield _inference_single_3d_2scales(model, img, img_2, img_transform, cfg, device)
def inference_detector(model, imgs, cfg, device='cuda:0'):
img_transform = ImageTransform(
size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)
model = model.to(device)
model.eval()
if not isinstance(imgs, list):
return _inference_single(model, imgs, img_transform, cfg, device)
else:
return _inference_generator(model, imgs, img_transform, cfg, device)
def inference_detector_3d(model, imgs, cfg, device='cuda:0'):
img_transform = ImageTransform(
size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)
model = model.to(device)
model.eval()
if not isinstance(imgs, list):
return _inference_single_3d(model, imgs, img_transform, cfg, device)
else:
return _inference_generator_3d(model, imgs, img_transform, cfg, device)
def inference_detector_3d_2scales(model, imgs, imgs_2, cfg, device='cuda:0'):
img_transform = ImageTransform(
size_divisor=cfg.data.test.size_divisor, **cfg.img_norm_cfg)
model = model.to(device)
model.eval()
if not isinstance(imgs, list):
return _inference_single_3d_2scales(model, imgs, imgs_2, img_transform, cfg, device)
else:
return _inference_generator_3d_2scales(model, imgs, imgs_2, img_transform, cfg, device)
def show_result(img, result, dataset='coco', score_thr=0.3, out_file=None, font_scale=0.5):
img = mmcv.imread(img)
class_names = get_classes(dataset)
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
write_bboxes_to_npy(bboxes, out_file)
mmcv.imshow_det_bboxes(
img.copy(),
bboxes,
labels,
class_names=class_names,
score_thr=score_thr,
show=out_file is None,
out_file=out_file,
font_scale=font_scale)
def show_result_3d(img, result, dataset='coco', score_thr=0.3, out_file=None, font_scale=0.5):
img_np = np.load(img)
class_names = get_classes(dataset)
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img[mask] = img[mask] * 0.5 + color_mask * 0.5
bboxes_placeholders = [[] for i in range(0, 160)]
for bbox in bboxes:
for z_index in range(int(np.floor(bbox[4])), int(np.ceil(bbox[5])+ 1)):
bboxes_placeholders[z_index].append([bbox[0], bbox[1], bbox[2], bbox[3], bbox[6]])
for index, boxes in enumerate(bboxes_placeholders):
if len(boxes) > 0:
img = img_np[:,:,index]
img = Image.fromarray(img).convert('RGB')
img = np.array(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
labels = np.array([0 for i in range(len(boxes))])
mmcv.imshow_det_bboxes(
img.copy(),
np.array(boxes),
labels,
class_names=class_names,
score_thr=score_thr,
show=out_file is None,
out_file=out_file.split('.')[-2] + '-{}.png'.format(index),
font_scale=0)
def display_result_3d(img, result, dataset='coco', score_thr=0.3):
img_np = np.load(img)
class_names = get_classes(dataset)
bbox_result = result
bboxes = np.vstack(bbox_result)
bboxes_placeholders = [[] for i in range(0, 160)]
for bbox in bboxes:
for z_index in range(int(np.floor(bbox[4])), int(np.ceil(bbox[5])+ 1)):
bboxes_placeholders[z_index].append([bbox[0], bbox[1], bbox[2], bbox[3], bbox[6]])
for index, boxes in enumerate(bboxes_placeholders):
if len(boxes) > 0:
for box in boxes:
if box[4] > score_thr:
print('slice {} score {}'.format(index, box[4]))
'''
write bounding boxes result to npy file
'''
def write_bboxes_to_npy(bboxes, out_file):
if out_file is not None:
bboxes_filename = out_file.split('.')[0] # A001-2342.jpeg => A001-2342
bboxes_filename = bboxes_filename + '.npy'
np.save(bboxes_filename, bboxes)
| 36.83045
| 95
| 0.635569
| 1,540
| 10,644
| 4.143506
| 0.111688
| 0.023507
| 0.049365
| 0.069111
| 0.84924
| 0.830277
| 0.806927
| 0.7643
| 0.740323
| 0.713368
| 0
| 0.027619
| 0.244833
| 10,644
| 289
| 96
| 36.83045
| 0.766235
| 0.030909
| 0
| 0.633745
| 0
| 0
| 0.013848
| 0
| 0
| 0
| 0
| 0
| 0.016461
| 1
| 0.065844
| false
| 0
| 0.037037
| 0
| 0.152263
| 0.004115
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
103e1ab647b50cd09ec85eab17fae0a8531cdfae
| 200
|
py
|
Python
|
configs/s3dis/pvcnnpp/__init__.py
|
chaomath/pvcnn
|
8f07316611067e9a0e2df8b35e4a729a03e0806b
|
[
"MIT"
] | 1
|
2020-01-28T13:27:17.000Z
|
2020-01-28T13:27:17.000Z
|
configs/s3dis/pvcnnpp/__init__.py
|
chaomath/pvcnn
|
8f07316611067e9a0e2df8b35e4a729a03e0806b
|
[
"MIT"
] | null | null | null |
configs/s3dis/pvcnnpp/__init__.py
|
chaomath/pvcnn
|
8f07316611067e9a0e2df8b35e4a729a03e0806b
|
[
"MIT"
] | null | null | null |
from models.s3dis import PVCNN2
from utils.config import Config, configs
# model
configs.model = Config(PVCNN2)
configs.model.num_classes = configs.data.num_classes
configs.dataset.num_points = 8192
| 25
| 52
| 0.815
| 29
| 200
| 5.517241
| 0.517241
| 0.225
| 0.2125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039106
| 0.105
| 200
| 7
| 53
| 28.571429
| 0.854749
| 0.025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
52c87bfdd31628125bb6ad5efb102e53a4761a91
| 33
|
py
|
Python
|
pygments_colorizer/__init__.py
|
BrutalSimplicity/colorizer
|
935024fe0f67c17b6f53d2351082fca1bc429b16
|
[
"MIT"
] | null | null | null |
pygments_colorizer/__init__.py
|
BrutalSimplicity/colorizer
|
935024fe0f67c17b6f53d2351082fca1bc429b16
|
[
"MIT"
] | null | null | null |
pygments_colorizer/__init__.py
|
BrutalSimplicity/colorizer
|
935024fe0f67c17b6f53d2351082fca1bc429b16
|
[
"MIT"
] | null | null | null |
from .colorizer import colorizer
| 16.5
| 32
| 0.848485
| 4
| 33
| 7
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 33
| 1
| 33
| 33
| 0.965517
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5e1e880917f9977d663cd29e64bb2fe5d3e2ce7c
| 1,910
|
py
|
Python
|
crop.py
|
NoobHumans/api
|
c9248001e6b1132bfd5088d4d14bb3bf4225ac37
|
[
"MIT"
] | null | null | null |
crop.py
|
NoobHumans/api
|
c9248001e6b1132bfd5088d4d14bb3bf4225ac37
|
[
"MIT"
] | null | null | null |
crop.py
|
NoobHumans/api
|
c9248001e6b1132bfd5088d4d14bb3bf4225ac37
|
[
"MIT"
] | 1
|
2021-12-26T15:40:02.000Z
|
2021-12-26T15:40:02.000Z
|
from PIL import Image
class cropping:
def crop1(self, fname):
image = Image.open('./img/'+fname+'.jpg')
xc = image.width/2
yc = image.height/2
x1 = xc - 450
y1 = yc - 400
x2 = xc + 450
y2 = yc + 400
cropped = image.crop((x1, y1, x2, y2))
cropped.save('./img/'+fname+'.jpg')
def crop2(self, fname):
image = Image.open('./img/'+fname+'.jpg')
xc = image.width/2
yc = image.height/2
x1 = xc - 480
y1 = yc - 460
x2 = xc + 480
y2 = yc + 460
cropped = image.crop((x1, y1, x2, y2))
cropped.save('./img/'+fname+'.jpg')
def crop3(self, fname):
image = Image.open('./img/'+fname+'.jpg')
xc = image.width/2
yc = image.height/2
x1 = xc - 450
y1 = yc - 275
x2 = xc + 450
y2 = yc + 275
cropped = image.crop((x1, y1, x2, y2))
cropped.save('./img/'+fname+'.jpg')
def crop4(self, fname):
image = Image.open('./img/'+fname+'.jpg')
xc = image.width/2
yc = image.height/2
x1 = xc - 500
y1 = yc - 275
x2 = xc + 500
y2 = yc + 275
cropped = image.crop((x1, y1, x2, y2))
cropped.save('./img/'+fname+'.jpg')
def crop5(self, fname):
image = Image.open('./img/'+fname+'.jpg')
xc = image.width/2
yc = image.height/2
x1 = xc - 540
y1 = yc - 955
x2 = xc + 540
y2 = yc + 910
cropped = image.crop((x1, y1, x2, y2))
cropped.save('./img/'+fname+'.jpg')
def crop6(self, fname):
image = Image.open('./img/'+fname+'.jpg')
xc = image.width/2
yc = image.height/2
x1 = xc - 500
y1 = yc - 345
x2 = xc + 500
y2 = yc + 335
cropped = image.crop((x1, y1, x2, y2))
cropped.save('./img/'+fname+'.jpg')
| 30.31746
| 49
| 0.465445
| 258
| 1,910
| 3.445736
| 0.158915
| 0.107987
| 0.148481
| 0.128234
| 0.860517
| 0.813273
| 0.813273
| 0.813273
| 0.813273
| 0.813273
| 0
| 0.113674
| 0.364398
| 1,910
| 63
| 50
| 30.31746
| 0.618616
| 0
| 0
| 0.677419
| 0
| 0
| 0.062794
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.016129
| 0
| 0.129032
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5e3482a65cfe5ed73fac3fd9b8f361b590c9526b
| 100
|
py
|
Python
|
StudentsWorks/Shipon Hossen Raju/07 02 2022.py
|
ProgrammerShipon/python-cook-book-Imranslab-Edition
|
b4ac591eacc17e0c1304144901062a3c5e67d4a1
|
[
"MIT"
] | null | null | null |
StudentsWorks/Shipon Hossen Raju/07 02 2022.py
|
ProgrammerShipon/python-cook-book-Imranslab-Edition
|
b4ac591eacc17e0c1304144901062a3c5e67d4a1
|
[
"MIT"
] | null | null | null |
StudentsWorks/Shipon Hossen Raju/07 02 2022.py
|
ProgrammerShipon/python-cook-book-Imranslab-Edition
|
b4ac591eacc17e0c1304144901062a3c5e67d4a1
|
[
"MIT"
] | null | null | null |
x = 5
print(type(x));
x = "6"
print(type(x))
x = 5.0
print(type(x))
x = 3.45
print(type(x))
| 11.111111
| 16
| 0.51
| 22
| 100
| 2.318182
| 0.363636
| 0.705882
| 0.784314
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.092105
| 0.24
| 100
| 8
| 17
| 12.5
| 0.578947
| 0
| 0
| 0.375
| 0
| 0
| 0.01087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
eac486862bfc26fbc3a13858a739e416431e25c8
| 5,665
|
py
|
Python
|
test/test_approximate_string_matching.py
|
matkaczmarek/string-algorithms
|
df1367e633ee0b3377cbdd67a77982f373fe2459
|
[
"MIT"
] | null | null | null |
test/test_approximate_string_matching.py
|
matkaczmarek/string-algorithms
|
df1367e633ee0b3377cbdd67a77982f373fe2459
|
[
"MIT"
] | null | null | null |
test/test_approximate_string_matching.py
|
matkaczmarek/string-algorithms
|
df1367e633ee0b3377cbdd67a77982f373fe2459
|
[
"MIT"
] | null | null | null |
import itertools
import os
import unittest
from random import randrange
import regex
from approximate_string_matching import dont_care, matching_with_dont_cares, \
approximate_boyer_moore, \
distance
from generator import rand
STRING_MATCHING_WITH_DONT_CARE_ALGORITHMS = [
dont_care.basic_fft,
matching_with_dont_cares.exact_matching_with_dont_cares,
matching_with_dont_cares.exact_matching_with_dont_cares_n_log_m
]
STRING_MATCHING_WITH_EDIT_DISTANCE_ALGORITHMS = [
approximate_boyer_moore.approximate_boyer_moore,
approximate_boyer_moore.simple_dynamic_edit_distance
]
class TestStringMatchingWithDontCare(unittest.TestCase):
run_large = unittest.skipUnless(
os.environ.get('LARGE', False), 'Skip test in small runs')
def check_get_first_match_with_dont_care(self, t, w, n, m, reference):
for algorithm in STRING_MATCHING_WITH_DONT_CARE_ALGORITHMS:
self.assertEqual(
reference,
next(algorithm(t, w, n, m)),
'Algorithm {}, text {}, pattern {}'.format(
algorithm.__name__, t, w))
def check_get_all_matches_with_dont_care(self, t, w, n, m, reference):
for algorithm in STRING_MATCHING_WITH_DONT_CARE_ALGORITHMS:
self.assertEqual(
reference,
list(algorithm(t, w, n, m)),
'Algorithm {}, text {}, pattern {}'.format(
algorithm.__name__, t, w))
def check_no_match(self, t, w, n, m):
for algorithm in STRING_MATCHING_WITH_DONT_CARE_ALGORITHMS:
self.assertFalse(
list(algorithm(t, w, n, m)),
'Algorithm {}, text {}, pattern {}'.format(
algorithm.__name__, t, w))
def test_get_first_match_with_dont_care(self):
self.check_get_first_match_with_dont_care('#abbaba', '#?ba', 6, 3, 2)
def test_get_all_matches_with_dont_care(self):
self.check_get_all_matches_with_dont_care(
'#abaaabbaababb', '#a?b', 13, 3, [4, 5, 8, 11])
def test_no_match(self):
self.check_no_match('#abaaba', '#b???b', 6, 5)
@run_large
def test_random_string_matching_with_dont_care(self):
T, n, m, A = 100, 500, 10, ['a', 'b']
for _ in range(T):
t, w = rand.random_word(n, A), rand.random_word(m, A)
reference = [match.start() + 1 for match in regex.finditer(
w[1:].replace('?', '.'), t[1:], overlapped = True)]
self.check_get_all_matches_with_dont_care(t, w, n, m, reference)
@run_large
def test_all_string_matching_with_dont_care(self):
N, M, A = 7, 3, ['a', 'b']
for n in range(2, N + 1):
for t in itertools.product(A, repeat = n):
t = '#' + ''.join(t)
for m in range(1, M + 1):
for w in itertools.product(A + ['?'], repeat = m):
w = '#' + ''.join(w)
reference = [match.start() + 1 for match in regex.finditer(
w[1:].replace('?', '.'), t[1:], overlapped = True)]
self.check_get_all_matches_with_dont_care(t, w, n, m, reference)
class TestStringMatchingWithEditDistance(unittest.TestCase):
run_large = unittest.skipUnless(
os.environ.get('LARGE', False), 'Skip test in small runs')
def check_get_first_match_with_edit_distance(self, t, w, n, m, k, reference):
for algorithm in STRING_MATCHING_WITH_EDIT_DISTANCE_ALGORITHMS:
self.assertEqual(
next(algorithm(t, w, n, m, k)),
reference,
'Algorithm {}, text {}, pattern {}'.format(
algorithm.__name__, t, w))
def check_get_all_matches_with_edit_distance(self, t, w, n, m, k, reference):
for algorithm in STRING_MATCHING_WITH_EDIT_DISTANCE_ALGORITHMS:
self.assertEqual(
list(algorithm(t, w, n, m, k)),
reference,
'Algorithm {}, text {}, pattern {}'.format(
algorithm.__name__, t, w))
def check_no_match(self, t, w, n, m, k):
for algorithm in STRING_MATCHING_WITH_EDIT_DISTANCE_ALGORITHMS:
self.assertFalse(
list(algorithm(t, w, n, m, k)),
'Algorithm {}, text {}, pattern {}'.format(
algorithm.__name__, t, w))
def test_get_first_match_with_dont_care(self):
self.check_get_first_match_with_edit_distance('#abbaba', '#?ba', 6, 3, 1, 4)
def test_get_all_matches_with_dont_care(self):
self.check_get_all_matches_with_edit_distance(
'#abaaabbaababb', '#a?b', 13, 3, 1, [2, 6, 7, 10, 12, 13])
def test_no_match(self):
self.check_no_match('#abaaba', '#b?b?b', 6, 5, 2)
@run_large
def test_random_string_matching_with_edit_distance(self):
T, n, m, A = 100, 300, 10, ['a', 'b']
for _ in range(T):
t, w, k = rand.random_word(n, A), rand.random_word(m, A), randrange(m-1)
reference = [index-1 for index in range(1, n+2)
if check_subwords(t, w, m, k, index)]
self.check_get_all_matches_with_edit_distance(t, w, n, m, k, reference)
@run_large
def test_all_string_matching_with_dont_care(self):
N, M, A = 7, 3, ['a', 'b']
for n in range(2, N + 1):
for t in itertools.product(A, repeat = n):
t = '#' + ''.join(t)
for m in range(1, M + 1):
for w in itertools.product(A + ['?'], repeat = m):
w = '#' + ''.join(w)
k = w.count('?')
reference = [index-1 for index in range(2, n+2)
if check_subwords(t, w, m, k, index)]
self.check_get_all_matches_with_edit_distance(t, w, n, m, k,
reference)
def check_subwords(t, w, m, k, index):
for i in range(max(1, index-m-k), index):
if distance.edit_distance('#' + t[i:index], w, len(t[i:index]), m) <= k:
return True
return False
| 37.269737
| 80
| 0.630185
| 826
| 5,665
| 4.021792
| 0.128329
| 0.016255
| 0.061409
| 0.019266
| 0.857616
| 0.843167
| 0.789585
| 0.757375
| 0.727875
| 0.677604
| 0
| 0.017345
| 0.236717
| 5,665
| 151
| 81
| 37.516556
| 0.750925
| 0
| 0
| 0.576
| 0
| 0
| 0.063195
| 0
| 0
| 0
| 0
| 0
| 0.048
| 1
| 0.136
| false
| 0
| 0.056
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
eae92dd398b45f9062d68c304222804190a1b6ac
| 127
|
py
|
Python
|
manager/dns/__init__.py
|
hreeder/ignition
|
d1fd492ef737be7c4a208601e54b2f9370dec36a
|
[
"MIT"
] | null | null | null |
manager/dns/__init__.py
|
hreeder/ignition
|
d1fd492ef737be7c4a208601e54b2f9370dec36a
|
[
"MIT"
] | null | null | null |
manager/dns/__init__.py
|
hreeder/ignition
|
d1fd492ef737be7c4a208601e54b2f9370dec36a
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
dns = Blueprint('dns', __name__, template_folder='templates')
from manager.dns import views
| 21.166667
| 62
| 0.755906
| 16
| 127
| 5.6875
| 0.6875
| 0.263736
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15748
| 127
| 5
| 63
| 25.4
| 0.850467
| 0
| 0
| 0
| 0
| 0
| 0.098361
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
d8066cd3e9ce367b6451a8d96ee487cde3ce11db
| 91
|
py
|
Python
|
brownclustering/__init__.py
|
yangyuan/brown-clustering
|
d0b2a4cf20f1dc925cf76431d6b740bec50f4859
|
[
"MIT"
] | 21
|
2018-04-01T16:55:49.000Z
|
2022-03-31T10:36:50.000Z
|
brownclustering/__init__.py
|
yangyuan/brown-clustering
|
d0b2a4cf20f1dc925cf76431d6b740bec50f4859
|
[
"MIT"
] | 1
|
2021-03-31T07:30:40.000Z
|
2021-03-31T07:30:40.000Z
|
brownclustering/__init__.py
|
yangyuan/brown-clustering
|
d0b2a4cf20f1dc925cf76431d6b740bec50f4859
|
[
"MIT"
] | 6
|
2018-11-12T11:18:20.000Z
|
2020-09-22T08:24:24.000Z
|
from brownclustering.corpus import Corpus
from brownclustering.core import BrownClustering
| 30.333333
| 48
| 0.89011
| 10
| 91
| 8.1
| 0.5
| 0.469136
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.087912
| 91
| 2
| 49
| 45.5
| 0.975904
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d827fc4c0960176a711a9d9d10c251e11d8975fa
| 239
|
py
|
Python
|
tests/test_base.py
|
FlorianLudwig/datamodel-code-generator
|
052882f7c5bd883fac0f42916a5a8fc9623f37ce
|
[
"MIT"
] | null | null | null |
tests/test_base.py
|
FlorianLudwig/datamodel-code-generator
|
052882f7c5bd883fac0f42916a5a8fc9623f37ce
|
[
"MIT"
] | null | null | null |
tests/test_base.py
|
FlorianLudwig/datamodel-code-generator
|
052882f7c5bd883fac0f42916a5a8fc9623f37ce
|
[
"MIT"
] | null | null | null |
from datamodel_code_generator.parser.base import snake_to_upper_camel
def test_snake_to_upper_camel_underscore():
"""In case a name starts with a underline, we should keep it."""
assert snake_to_upper_camel('_hello') == '_Hello'
| 34.142857
| 69
| 0.778243
| 37
| 239
| 4.621622
| 0.72973
| 0.122807
| 0.210526
| 0.298246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133891
| 239
| 6
| 70
| 39.833333
| 0.826087
| 0.242678
| 0
| 0
| 0
| 0
| 0.068571
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
dc25408d5df10bb190353400afb5f95cf99a84f5
| 97
|
py
|
Python
|
data/data_controller.py
|
richardimms/Skyline
|
dc10022fd4ec61808666f5579e52cf2c0049a5b3
|
[
"MIT"
] | null | null | null |
data/data_controller.py
|
richardimms/Skyline
|
dc10022fd4ec61808666f5579e52cf2c0049a5b3
|
[
"MIT"
] | 4
|
2019-01-28T17:47:31.000Z
|
2019-01-28T17:56:25.000Z
|
data/data_controller.py
|
richardimms/Skyline
|
dc10022fd4ec61808666f5579e52cf2c0049a5b3
|
[
"MIT"
] | null | null | null |
class DBController():
def insert():
pass
def connect():
pass
| 9.7
| 21
| 0.453608
| 8
| 97
| 5.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.453608
| 97
| 9
| 22
| 10.777778
| 0.830189
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| true
| 0.4
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
dc6b9d9e791f7e054d5212d2de2da5d9128a6f13
| 1,827
|
py
|
Python
|
src/tree.py
|
thearyanmittal/turtle-art
|
0633db32f92795e13f96ea53181ca1ec36764a46
|
[
"MIT"
] | null | null | null |
src/tree.py
|
thearyanmittal/turtle-art
|
0633db32f92795e13f96ea53181ca1ec36764a46
|
[
"MIT"
] | null | null | null |
src/tree.py
|
thearyanmittal/turtle-art
|
0633db32f92795e13f96ea53181ca1ec36764a46
|
[
"MIT"
] | null | null | null |
from turtle import *
import random
harry = Turtle()
harry.penup()
harry.goto(0, 0)
harry.setheading(90)
harry.pendown()
harry.clear()
harry.hideturtle()
harry.speed(0)
colormode(255)
length = 100
angle = 15
depth = 10
rad = 150
ext = 15
def tree(radius, extent, depth, turtboi):
if depth > 0:
right = turtboi
right.pensize(depth)
if depth > 3:
right.pencolor(79, 30, 26)
elif depth > 2:
right.pencolor('#633936')
else:
leaf = random.choice(['#C91E0A', '#753a36', '#DF3908', '#EDA421', '#E98604', '#A79F0F', '#8B9216'])
right.pencolor(leaf)
left = turtboi.clone()
right.circle(radius, extent)
left.circle(-radius, extent)
tree(radius-1, extent, depth-1, left)
tree(radius-1, extent, depth-1, right)
def backwardtree(radius, extent, depth, turtboi):
if depth > 0:
right = turtboi
right.pensize(depth)
if depth > 3:
right.pencolor(79, 30, 26)
elif depth > 2:
right.pencolor('#633936')
else:
leaf = random.choice(['#C91E0A', '#753a36', '#DF3908', '#EDA421', '#E98604', '#A79F0F', '#8B9216'])
right.pencolor(leaf)
left = turtboi.clone()
right.circle(radius, -extent)
left.circle(-radius, -extent)
backwardtree(radius-1, extent, depth-1, left)
backwardtree(radius-1, extent, depth-1, right)
tree(rad, ext, depth, harry)
harry.penup()
harry.goto(0, 0)
harry.setheading(90)
harry.pendown()
backwardtree(rad, ext, depth, harry)
harry.penup()
harry.goto(0, 0)
harry.setheading(180)
harry.pendown()
input()
tree(rad, ext, depth, harry)
harry.penup()
harry.goto(0, 0)
harry.setheading(180)
harry.pendown()
backwardtree(rad, ext, depth, harry)
exitonclick()
| 21.494118
| 111
| 0.600438
| 226
| 1,827
| 4.853982
| 0.261062
| 0.065634
| 0.054695
| 0.06928
| 0.855059
| 0.855059
| 0.740201
| 0.703737
| 0.703737
| 0.703737
| 0
| 0.092956
| 0.246305
| 1,827
| 85
| 112
| 21.494118
| 0.703704
| 0
| 0
| 0.636364
| 0
| 0
| 0.061269
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030303
| false
| 0
| 0.030303
| 0
| 0.060606
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f498a7fd0a08f4bcdf9cd3746c2dbb7d9656ff06
| 38
|
py
|
Python
|
app/provider/__init__.py
|
monosloth/console
|
a47e1479320a18a4b5716e87ee275985ebd5825f
|
[
"MIT"
] | null | null | null |
app/provider/__init__.py
|
monosloth/console
|
a47e1479320a18a4b5716e87ee275985ebd5825f
|
[
"MIT"
] | null | null | null |
app/provider/__init__.py
|
monosloth/console
|
a47e1479320a18a4b5716e87ee275985ebd5825f
|
[
"MIT"
] | null | null | null |
from . command import CommandProvider
| 19
| 37
| 0.842105
| 4
| 38
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131579
| 38
| 1
| 38
| 38
| 0.969697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f4b64e6077cf1962aa76a2288cd3c21363bd1d3a
| 8,770
|
py
|
Python
|
test/test_role-reg.py
|
ThorodanBrom/iudx-auth-server
|
52c798811b3e551f63c82645591a6bfca62052e6
|
[
"MIT"
] | null | null | null |
test/test_role-reg.py
|
ThorodanBrom/iudx-auth-server
|
52c798811b3e551f63c82645591a6bfca62052e6
|
[
"MIT"
] | null | null | null |
test/test_role-reg.py
|
ThorodanBrom/iudx-auth-server
|
52c798811b3e551f63c82645591a6bfca62052e6
|
[
"MIT"
] | null | null | null |
from add_org import add_organization
from consent import *
import random
import string
name = { "title" : "mr.",
"firstName" : "abc",
"lastName" : "xyz"
}
csr = "-----BEGIN CERTIFICATE REQUEST-----\nMIICjDCCAXQCAQAwRzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRQwEgYDVQQK\nDAtNeU9yZywgSW5jLjEVMBMGA1UEAwwMbXlkb21haW4uY29tMIIBIjANBgkqhkiG\n9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyhF2a5PeL72zGdL47/6zVQQQtZJcO01iVbjR\nSSyswUa2jcfYfoQEVKo1JAz25G3nYfSW1Te3OWjuihvPhZeatFSUwTxcZJFxzIWm\n4/gOQIhJKCA/Wry3liW2sjIGLuHxeH2BoQCIEZyYcqVpRWEJ9RusRFcwPgvROigh\nhMXhgE86uaIRs0yPqzhc7sl53T4qx6qvQJ6uTXBWBvUELgSSgeyaT0gwU1mGmPck\n7Svo6tsWfBFfgT5Ecbqsc2nqChAExgocp5tkPJYcy8FB/tU/FW0rFthqecSvMrpS\ncZW9+iyzseyPrcK9ka6XSlVu9EoX82RW7SRyRL2T5VN3JemXfQIDAQABoAAwDQYJ\nKoZIhvcNAQELBQADggEBAJRFEYn6dSzEYpgYLItUm7Sp3LzquJw7QfMyUvsy45rp\n0VTdQdYp/hVR2aCLiD33ht4FxlhbZm/8XcTuYolP6AbF6FldxWmmFFS9LRAj7nTV\ndU1pZftwFPp6JsKUCYHVsuxs7swliXbEcBVtD6QktzZNrRJmUKi38DAFcbFwgLaM\nG/iRIm4DDj2hmanKp+vUWjXfj13naa7bDtIlzW96y24jsu+naabg8MVShfGCStIv\nrX3T2JkkSjpTw7YzIpgI8/Zg9VR1l0udvfh9bn7mjmOYc3EYwJKvuJDn1TzVuIIi\n9NmVasTjhZJ0PyWithWuZplo/LXUwSoid8HVyqe5ZVI=\n-----END CERTIFICATE REQUEST-----\n"
bad_csr = "-----BEGIN CERTIFICATE REQUEST-----\nMIICjDCCAXQCAQAwRzELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMRQwEgYDVQQK\nDAtNeU9yZywgSW5jLjEVMBMGA1UEAwwMbXlkb21haW4uY29tMIIBIjANBgkqhkiG\n9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyhF2a5PeL72zGdL47/6zVQQQtZJcO01iVbjR\nSSyswUa2jcfYfoQEVKo1JAz25G3nYfSW1Te3OWjuihvPhZeatFSUwTxcZJFxzIWm\n4/gOQIhJKCA/Wry3liW2sjIGLuHxeH2BoQCIEZyYcqVpRWEJ9RusRFcwPgvROigh\nhMXhgE86uaIRs0yPqzhc7sl53T4qx6qvQJ6uTXBWBvUELgSSgeyaT0gwU1mGmPck\n7Svo6tsWfBFfgT5Ecbqsc2nqChAExgocp5tkPJYcy8FB/tU/FW0rFthqecSvMrpS\ncZW9+iyzseyPrcK9ka6XSlVu9EoX82RW7SRyRL2T5VN3JemXfQIDAQABoAAwDQYJ\nKoZIhvcNAQELBQADggEBAJRFEYn6dSzEYpgYLItUm7Sp3LzquJw7QfMyUvsy45rp\n0VTdQdYp/hVR2aCLiD33ht4FxlhbZm/8XcTuYolP6AbF6FldxWmmFFS9LRAj7nTV\ndU1pZftwFPp6JsKUCYHVsuxs7swliXbEcBVtD6QktzZNrRJmUKi38DAFcbFwgLaM\nG/iRIm4DDj2hmanKp+vUWjXfj13nasaswwa7bDtIlzW96y24jsu+naabg8MVShfGCStIv\nrX3T2JkkSjpTw7YzIpgI8/Zg9VR1l0udvfh9bn7sdakjsd92jkamjmOYc3EYwJKvuJDn1TzVuIIi\n9NmVasTjhZJ0PyWithWuZplo/LXUwSoid8HVyqe5ZVI=\n-----END CERTIFICATE REQUEST-----\n"
# random website
website = ''.join(random.choice(string.ascii_lowercase) for _ in range(8)) + '.com'
# random email
email_name = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(6))
email = email_name + '@gmail.com'
org_id = add_organization(website)
def test_no_role():
# no role specified
r = role_reg(email, '9454234223', name , [], None, csr)
assert r['success'] == False
assert r['status_code'] == 400
def test_success():
r = role_reg(email, '9454234223', name , ["consumer"], None, csr)
print(r)
assert r['success'] == True
assert r['status_code'] == 200
def test_same_role():
# same role
r = role_reg(email, '9454234223', name , ["consumer"], None, csr)
assert r['success'] == False
assert r['status_code'] == 403
def test_email_mismatch_domain():
# email does not match domain of organization
r = role_reg(email, '9454234223', name , ["onboarder", "data ingester"], org_id, csr)
assert r['success'] == False
assert r['status_code'] == 403
def test_ingester_success():
email = email_name + '@' + website
r = role_reg(email, '9454234223', name , ["data ingester"], org_id, csr)
assert r['success'] == True
assert r['status_code'] == 200
def test_no_csr():
# no csr is valid for existing user
email = email_name + '@' + website
r = role_reg(email, '9454234223', name , ["onboarder"], org_id)
assert r['success'] == True
assert r['status_code'] == 200
def test_invalid_role():
# invalid roles
email = email_name + '@' + website
r = role_reg(email, '9454234223', name , ["onboarder", "provider"], org_id, csr)
assert r['success'] == False
assert r['status_code'] == 400
def test_org_email_consumer():
# register as consumer with organisation mail w/o phone
email = email_name + '@' + website
r = role_reg(email, '', name , ["consumer"], None)
assert r['success'] == True
assert r['status_code'] == 200
def test_delegate_without_org_id():
# register as delegate without org ID
email = email_name + '@' + website
r = role_reg(email, '9454234223', name , ["delegate"], None)
assert r['success'] == False
assert r['status_code'] == 400
def test_delegate_without_phone():
# need phone number when registering as delegate
email = email_name + '@' + website
r = role_reg(email, '', name , ["delegate"], org_id, csr)
assert r['success'] == False
assert r['status_code'] == 400
def test_delegate_reg():
email = email_name + '@' + website
r = role_reg(email, '9454234223', name , ["delegate"], org_id)
assert r['success'] == True
assert r['status_code'] == 200
# new random email
new_email_name = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(6))
def test_non_org_email():
# all roles - non-org email
email = new_email_name + '@gmail.com'
r = role_reg(email, '9454234223', name , ["onboarder", "data ingester", "consumer", "delegate"], org_id, csr)
assert r['success'] == False
assert r['status_code'] == 403
def test_no_csr():
# no csr
email = new_email_name + '@' + website
r = role_reg(email, '9454234223', name , ["onboarder", "data ingester", "consumer", "delegate"], org_id)
assert r['success'] == False
assert r['status_code'] == 400
def test_bad_csr():
# bad csr
email = new_email_name + '@' + website
r = role_reg(email, '9454234223', name , ["onboarder", "data ingester", "consumer", "delegate"], org_id, bad_csr)
assert r['success'] == False
assert r['status_code'] == 400
def test_invalid_org_id():
# invalid org ID
email = new_email_name + '@' + website
r = role_reg(email, '9454234223', name , ["onboarder", "data ingester", "consumer", "delegate"], 210781030, csr)
assert r['success'] == False
assert r['status_code'] == 403
def test_missing_phone():
email = new_email_name + '@' + website
r = role_reg(email, '', name , ["onboarder", "data ingester", "consumer", "delegate"], org_id, csr)
assert r['success'] == False
assert r['status_code'] == 400
def test_all_role_reg():
email = new_email_name + '@' + website
r = role_reg(email, '9454234223', name , ["onboarder", "data ingester", "consumer", "delegate"], org_id, csr)
assert r['success'] == True
assert r['status_code'] == 200
new_website = ''.join(random.choice(string.ascii_lowercase) for _ in range(8)) + '.com'
new_org_id = add_organization(new_website)
def test_diff_email_from_org_domain():
# onboarder, ingester cannot register with org using different domain email
email = new_email_name + '@gmail.com'
r = role_reg(email, '9454234223', name , ["onboarder", "data ingester"], new_org_id, csr)
assert r['success'] == False
assert r['status_code'] == 403
#### tests with provider role ####
def test_provider_reg_after_other_role_reg():
# cannot register because the email was used for lesser role registration
r = provider_reg(email, '9454234223', name , org_id, csr)
assert r['success'] == False
assert r['status_code'] == 403
prov_email_name = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(6))
provider_email = prov_email_name + '@' + website
def test_provider_reg():
# provider registers with fresh email
r = provider_reg(provider_email, '9454234223', name , org_id, csr)
assert r['success'] == True
assert r['status_code'] == 200
def test_provider_cannot_get_other_roles():
# provider can get all other role
r = role_reg(provider_email, '9454234223', name , ["data ingester"], org_id, csr)
assert r['success'] == False
assert r['status_code'] == 403
r = role_reg(provider_email, '9454234223', name , ["onboarder"], org_id, csr)
assert r['success'] == False
assert r['status_code'] == 403
r = role_reg(provider_email, '9454234223', name , ["consumer"], org_id, csr)
assert r['success'] == False
assert r['status_code'] == 403
| 49.269663
| 1,005
| 0.674914
| 908
| 8,770
| 6.322687
| 0.154185
| 0.056088
| 0.056088
| 0.068107
| 0.773384
| 0.771468
| 0.762237
| 0.736457
| 0.736457
| 0.690472
| 0
| 0.071952
| 0.206043
| 8,770
| 177
| 1,006
| 49.548023
| 0.752549
| 0.068301
| 0
| 0.532258
| 0
| 0.016129
| 0.375046
| 0.229522
| 0
| 0
| 0
| 0
| 0.370968
| 1
| 0.169355
| false
| 0
| 0.032258
| 0
| 0.201613
| 0.008065
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
f4d6d818aca86544b62ad0beb4a8efe3cfee92b1
| 90
|
py
|
Python
|
tenable/nessus/base.py
|
jwndlng/pyTenable
|
81edd314ec38d891288593d562bc4ae7c3fa0fb8
|
[
"MIT"
] | null | null | null |
tenable/nessus/base.py
|
jwndlng/pyTenable
|
81edd314ec38d891288593d562bc4ae7c3fa0fb8
|
[
"MIT"
] | null | null | null |
tenable/nessus/base.py
|
jwndlng/pyTenable
|
81edd314ec38d891288593d562bc4ae7c3fa0fb8
|
[
"MIT"
] | null | null | null |
'''
'''
from tenable.base import APIEndpoint
class NessusEndpoint(APIEndpoint):
pass
| 12.857143
| 36
| 0.733333
| 9
| 90
| 7.333333
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 90
| 7
| 37
| 12.857143
| 0.868421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
521a57a07461bb0ae88cbca8c8715a1acc2af89f
| 104
|
py
|
Python
|
tonic/torch/__init__.py
|
Eyalcohenx/tonic
|
afc15c6fa23fed4f696f68f0acf961964b0172dc
|
[
"MIT"
] | 350
|
2020-08-06T13:49:11.000Z
|
2022-03-24T08:53:59.000Z
|
tonic/torch/__init__.py
|
Eyalcohenx/tonic
|
afc15c6fa23fed4f696f68f0acf961964b0172dc
|
[
"MIT"
] | 12
|
2020-08-07T02:21:58.000Z
|
2021-05-20T11:50:44.000Z
|
tonic/torch/__init__.py
|
Eyalcohenx/tonic
|
afc15c6fa23fed4f696f68f0acf961964b0172dc
|
[
"MIT"
] | 35
|
2020-08-06T16:53:40.000Z
|
2021-12-17T06:01:09.000Z
|
from . import agents, models, normalizers, updaters
__all__ = [agents, models, normalizers, updaters]
| 20.8
| 51
| 0.759615
| 11
| 104
| 6.818182
| 0.636364
| 0.32
| 0.613333
| 0.826667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.144231
| 104
| 4
| 52
| 26
| 0.842697
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
871a9648aa34d2d6cb89c1f9a0b1c431c88d7fce
| 33
|
py
|
Python
|
components/__init__.py
|
chrisvoncsefalvay/dash-sir-interactive-model
|
97d854774fb5395452127b5627efab39bddcdbdf
|
[
"BSD-3-Clause"
] | 3
|
2020-11-29T06:36:23.000Z
|
2021-11-28T13:10:46.000Z
|
components/__init__.py
|
chrisvoncsefalvay/dash-sir-interactive-model
|
97d854774fb5395452127b5627efab39bddcdbdf
|
[
"BSD-3-Clause"
] | null | null | null |
components/__init__.py
|
chrisvoncsefalvay/dash-sir-interactive-model
|
97d854774fb5395452127b5627efab39bddcdbdf
|
[
"BSD-3-Clause"
] | null | null | null |
from components.plot import solve
| 33
| 33
| 0.878788
| 5
| 33
| 5.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 33
| 1
| 33
| 33
| 0.966667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
875ba8d91e7fe08d23528d35454d3982e49fee3b
| 36
|
py
|
Python
|
02/01/center.py
|
pylangstudy/201708
|
126b1af96a1d1f57522d5a1d435b58597bea2e57
|
[
"CC0-1.0"
] | null | null | null |
02/01/center.py
|
pylangstudy/201708
|
126b1af96a1d1f57522d5a1d435b58597bea2e57
|
[
"CC0-1.0"
] | 39
|
2017-07-31T22:54:01.000Z
|
2017-08-31T00:19:03.000Z
|
02/01/center.py
|
pylangstudy/201708
|
126b1af96a1d1f57522d5a1d435b58597bea2e57
|
[
"CC0-1.0"
] | null | null | null |
print('>' + 'ABC'.center(10) + '<')
| 18
| 35
| 0.444444
| 4
| 36
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.064516
| 0.138889
| 36
| 1
| 36
| 36
| 0.451613
| 0
| 0
| 0
| 0
| 0
| 0.138889
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
875c08254c084369f479ebcd2f3edd4efb88a9bc
| 298
|
py
|
Python
|
src/utils/__init__.py
|
jonasvj/protein-generation
|
ad716f2dba6f6642a6d54571571e6f539cee3644
|
[
"MIT"
] | null | null | null |
src/utils/__init__.py
|
jonasvj/protein-generation
|
ad716f2dba6f6642a6d54571571e6f539cee3644
|
[
"MIT"
] | null | null | null |
src/utils/__init__.py
|
jonasvj/protein-generation
|
ad716f2dba6f6642a6d54571571e6f539cee3644
|
[
"MIT"
] | null | null | null |
from .utils import SequenceDataset
from .utils import custom_collate_fn
from .utils import train_model_cli
from .utils import get_repo_dir
from .utils import get_device
from .utils import set_seeds
from .utils import random_split
from .utils import load_pickle_obj
from .utils import set_font_sizes
| 33.111111
| 36
| 0.852349
| 49
| 298
| 4.918367
| 0.469388
| 0.3361
| 0.560166
| 0.149378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11745
| 298
| 9
| 37
| 33.111111
| 0.91635
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5e451603189caa998223b03482d730e236665fe5
| 149
|
py
|
Python
|
pyqc/__init__.py
|
shunzgim/PyQC
|
8bcbb5b6c5990cac578b2645c558a1fdac29bc1f
|
[
"MIT"
] | null | null | null |
pyqc/__init__.py
|
shunzgim/PyQC
|
8bcbb5b6c5990cac578b2645c558a1fdac29bc1f
|
[
"MIT"
] | null | null | null |
pyqc/__init__.py
|
shunzgim/PyQC
|
8bcbb5b6c5990cac578b2645c558a1fdac29bc1f
|
[
"MIT"
] | null | null | null |
from pyqc.environment import *
from pyqc.backends import *
from pyqc.gates import *
from pyqc.types import *
from pyqc.qcglue import *
name = 'pyqc'
| 21.285714
| 30
| 0.758389
| 22
| 149
| 5.136364
| 0.409091
| 0.353982
| 0.495575
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.154362
| 149
| 7
| 31
| 21.285714
| 0.896825
| 0
| 0
| 0
| 0
| 0
| 0.026667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.833333
| 0
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5e6611586dd51fd650bb2589d646c0085eebc185
| 4,779
|
py
|
Python
|
discordbot/utils/streamer_utills.py
|
he305/discordbot
|
63949e31f7579f45de42b91f638f97dc17c1393d
|
[
"MIT"
] | null | null | null |
discordbot/utils/streamer_utills.py
|
he305/discordbot
|
63949e31f7579f45de42b91f638f97dc17c1393d
|
[
"MIT"
] | null | null | null |
discordbot/utils/streamer_utills.py
|
he305/discordbot
|
63949e31f7579f45de42b91f638f97dc17c1393d
|
[
"MIT"
] | null | null | null |
import aiohttp
import asyncio
from discordbot.hidden_data import CLIENT_ID
headers = {
'Client-ID': CLIENT_ID,
'Accept': 'application/vnd.twitchtv.v5+json'
}
class TwitchUtills:
@staticmethod
async def get_channel_by_name(streamer_name):
async with aiohttp.ClientSession() as session:
async with session.get(
"https://api.twitch.tv/kraken/users?login=" + streamer_name,
timeout=10,
headers=headers) as resp:
stream_data = await resp.json()
user_id = stream_data['users'][0]['_id']
return user_id
@staticmethod
async def __get_stream(streamer_id):
async with aiohttp.ClientSession() as session:
async with session.get(
"https://api.twitch.tv/kraken/streams/" + streamer_id,
timeout=10,
headers=headers) as resp:
stream_data = await resp.json()
if stream_data["stream"] is None:
return None
return stream_data["stream"]
@staticmethod
async def get_streaming_status(streamer_id):
stream = await TwitchUtills.__get_stream(streamer_id)
if stream is None:
return False
return True
@staticmethod
async def get_game(streamer_id):
stream = await TwitchUtills.__get_stream(streamer_id)
if stream is None:
return ""
return stream["game"]
@staticmethod
async def get_viewers(streamer_id):
stream = await TwitchUtills.__get_stream(streamer_id)
if stream is None:
return "0"
return stream["viewers"]
@staticmethod
async def get_title(streamer_id):
stream = await TwitchUtills.__get_stream(streamer_id)
if stream is None:
return ""
return stream["channel"]["status"]
class WasdUtills:
@staticmethod
async def __get_stream(streamer_id):
async with aiohttp.ClientSession() as session:
async with session.get(
"https://wasd.tv/api/media-containers?media_container_status=RUNNING&limit=1&offset=0&media_container_type=SINGLE,COOP&channel_id=" + streamer_id,
timeout=10) as resp:
stream_data = await resp.json()
if not stream_data["result"]:
return None
return stream_data["result"][0]
@staticmethod
async def get_streaming_status(streamer_id):
stream = await WasdUtills.__get_stream(streamer_id)
if stream is None:
return False
return True
@staticmethod
async def get_viewers(streamer_id):
stream = await WasdUtills.__get_stream(streamer_id)
if stream is None:
return "0"
return stream["media_container_streams"][0]["stream_current_viewers"]
@staticmethod
async def get_title(streamer_id):
stream = await WasdUtills.__get_stream(streamer_id)
if stream is None:
return ""
return stream["media_container_name"]
class GoodgameUtills:
@staticmethod
async def get_channel_by_name(streamer_name):
async with aiohttp.ClientSession() as session:
async with session.get(
'http://goodgame.ru/api/getchannelstatus?id={}&fmt=json'.format(streamer_name),
timeout=10,
headers=headers) as resp:
stream_data = await resp.json()
if not stream_data:
return None
user_id = next(iter(stream_data))
return user_id
@staticmethod
async def __get_stream(streamer_id):
async with aiohttp.ClientSession() as session:
async with session.get(
"https://goodgame.ru/api/getchannelstatus?fmt=json&id=" + streamer_id,
timeout=10) as resp:
stream_data = await resp.json()
if not stream_data:
return None
return stream_data[streamer_id]
@staticmethod
async def get_streaming_status(streamer_id):
stream = await GoodgameUtills.__get_stream(streamer_id)
if stream is None or stream["status"] == "Dead":
return False
return True
@staticmethod
async def get_viewers(streamer_id):
stream = await GoodgameUtills.__get_stream(streamer_id)
if stream is None:
return "0"
return stream["viewers"]
@staticmethod
async def get_title(streamer_id):
stream = await GoodgameUtills.__get_stream(streamer_id)
if stream is None:
return ""
return stream["title"]
| 32.073826
| 162
| 0.60159
| 528
| 4,779
| 5.214015
| 0.155303
| 0.098075
| 0.108972
| 0.125318
| 0.790774
| 0.765347
| 0.765347
| 0.765347
| 0.765347
| 0.75445
| 0
| 0.005835
| 0.318686
| 4,779
| 148
| 163
| 32.290541
| 0.839681
| 0
| 0
| 0.764228
| 0
| 0.00813
| 0.106089
| 0.016112
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02439
| 0
| 0.284553
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5ea3a2280adc9aaf3df67fbd422271e2fb608dca
| 14,161
|
py
|
Python
|
ota/gui/frame_scroll.py
|
estherlin/Ocular-Torsion-Quantification
|
2e948b5ca97094b60d98e7bf3c07bb32fb4cbd09
|
[
"MIT"
] | null | null | null |
ota/gui/frame_scroll.py
|
estherlin/Ocular-Torsion-Quantification
|
2e948b5ca97094b60d98e7bf3c07bb32fb4cbd09
|
[
"MIT"
] | null | null | null |
ota/gui/frame_scroll.py
|
estherlin/Ocular-Torsion-Quantification
|
2e948b5ca97094b60d98e7bf3c07bb32fb4cbd09
|
[
"MIT"
] | null | null | null |
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname('.'), os.path.pardir)))
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.patches import Ellipse
from matplotlib.patches import Arrow
from matplotlib.patches import Wedge
class FrameTracker(object):
"""
Object that displays a video frame. Class used by frame_scroll method.
Parameters
------------------------
ax : object containing elements of a figure
Used to set window title and axis labels
video : array_like
series of video frames
"""
def __init__(self, ax, video):
self.ax = ax
self.ax.set_title('Use keyboard to navigate images')
self.video = video
self.slices = len(self.video)
self.ind = 0
self.im = ax.imshow(self.video[self.ind])
self.ax.set_xlabel('Frame %s' % self.ind)
def on_key(self, event):
if event.key == 'up':
self.ind = (self.ind + 1) % self.slices
elif event.key =='down':
self.ind = (self.ind - 1) % self.slices
elif event.key =='right':
self.ind = (self.ind + 50) % self.slices
elif event.key =='left':
self.ind = (self.ind - 50) % self.slices
self.update()
def update(self):
self.im.set_data(self.video[self.ind])
self.ax.set_xlabel('Frame %s' % self.ind)
self.im.axes.figure.canvas.draw()
class EyelidTracker(FrameTracker):
"""
Object that displays a video frame with the located eyelid overlayed. Class used by eyelid_scroll method.
Parameters
------------------------
ax : object containing elements of a figure
Used to set window title and axis labels
video : array_like
series of video frames
eyelid : dictionary
dictionary of pupils where the key is the frame index and the value is the frame with the eyelid removed.
Does not need to include all video frames.
"""
def __init__(self, ax, video, eyelid_list):
FrameTracker.__init__(self, ax, video)
self.eyelid_list = eyelid_list
def update(self):
display_img = self.video[self.ind]
if self.ind in self.eyelid_list:
self.eyelid_at_ind = self.eyelid_list[self.ind]
if self.eyelid_at_ind is not None:
display_img = self.eyelid_at_ind
self.im.set_data(display_img)
self.ax.set_xlabel('Frame %s' % self.ind)
self.im.axes.figure.canvas.draw()
class PupilTracker(FrameTracker):
"""
Object that displays a video frame with the located pupil overlayed. Class used by pupil_scroll method.
Parameters
------------------------
ax : object containing elements of a figure
Used to set window title and axis labels
video : array_like
series of video frames
pupil_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
"""
def __init__(self, ax, video, pupil_list):
FrameTracker.__init__(self, ax, video)
self.pupil_list = pupil_list
def update(self):
try:
self.pupil_patch.remove()
self.center_patch.remove()
except AttributeError:
pass
except ValueError:
pass
if self.ind in self.pupil_list:
self.pupil_at_ind = self.pupil_list[self.ind]
if self.pupil_at_ind:
#self.pupil_circle = Circle((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),self.pupil_at_ind.radius,fill=False,ec=[1,0,0])
self.pupil_circle = Ellipse((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row), self.pupil_at_ind.width, self.pupil_at_ind.height,fill=False,ec=[1,0,0])
self.pupil_center = Circle((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),int(0.1*self.pupil_at_ind.radius),fill=True,ec=[1,0,0], fc=[1,0,0])
self.pupil_patch = self.ax.add_patch(self.pupil_circle)
self.center_patch = self.ax.add_patch(self.pupil_center)
else:
print('ERROR: No pupil at frame index %d' % (self.ind))
self.im.set_data(self.video[self.ind])
self.ax.set_xlabel('Frame %s' % self.ind)
self.im.axes.figure.canvas.draw()
class TorsionTracker(FrameTracker):
'''
Torsion tracking object. Window updates x y axis to visualize torsion. Class used by torsion_scroll method.
Parameters:
------------------------
ax : object containing elements of a figure
Used to set window title and axis labels
video : array_like
video to scroll through
pupil_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
offset_first_frame : dictionary
dictionary of rotation angles. key is the frame index and the value is the rotation.
Does not need to include all video frames
'''
def __init__(self, ax, video, pupil_list, offset_first_frame):
FrameTracker.__init__(self, ax, video)
self.pupil_list = pupil_list
self.torsion_list = offset_first_frame
def update(self):
try:
self.pupil_patch.remove()
self.center_patch.remove()
self.x_patch.remove()
self.y_patch.remove()
except AttributeError:
pass
except ValueError:
pass
if self.ind in self.pupil_list:
self.pupil_at_ind = self.pupil_list[self.ind]
if self.pupil_at_ind:
self.pupil_circle = Circle((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),self.pupil_at_ind.radius,fill=False,ec=[1,0,0])
self.pupil_center = Circle((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),int(0.1*self.pupil_at_ind.radius),fill=True,ec=[1,0,0],fc=[1,0,0])
self.pupil_patch = self.ax.add_patch(self.pupil_circle)
self.center_patch = self.ax.add_patch(self.pupil_center)
else:
print('ERROR: No pupil at frame index %d' % (self.ind))
if self.ind in self.torsion_list:
self.angle = self.torsion_list[self.ind]
radius = self.video.height/2
if self.pupil_at_ind:
self.x_axis = Arrow(self.pupil_at_ind.center_col, self.pupil_at_ind.center_row,radius*np.cos(np.pi*(self.angle)/180),-radius*np.sin(np.pi*(self.angle)/180), width = 5, ec=[1,0,0], fc=[1,0,0], fill=True)
self.y_axis = Arrow(self.pupil_at_ind.center_col, self.pupil_at_ind.center_row,radius*np.cos(np.pi*(self.angle+90)/180),-radius*np.sin(np.pi*(self.angle+90)/180), width = 5, ec=[1,0,0], fc=[1,0,0], fill=True)
self.x_patch = self.ax.add_patch(self.x_axis)
self.y_patch = self.ax.add_patch(self.y_axis)
else:
print('ERROR: No pupil at frame index %d' % (self.ind))
self.im.set_data(self.video[self.ind])
self.ax.set_xlabel('Frame %s' % self.ind)
self.im.axes.figure.canvas.draw()
class WindowTracker(FrameTracker):
'''
Window tracking object. Window updates window location while frames are scrolling. Class used by window_scroll method.
Parameters:
------------------------
ax : object containing elements of a figure
Used to set window title and axis labels
video : array_like
video to scroll through
pupil_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
offset_first_frame : dictionary
dictionary of rotation angles. key is the frame index and the value is the rotation.
Does not need to include all video frames
theta_window : tuple of angles
theta[0] is the lower bound of the window
theta[1] is the upper bound of the window
WINDOW_RADIUS: integer
Pixel width of the window radius
'''
def __init__(self, ax, video, pupil_list, offset_first_frame,theta_window,WINDOW_RADIUS):
FrameTracker.__init__(self, ax, video)
self.pupil_list = pupil_list
self.offset_first_frame = offset_first_frame
self.theta_window = theta_window
self.WINDOW_RADIUS = WINDOW_RADIUS
def update(self):
try:
self.pupil_patch.remove()
self.center_patch.remove()
self.window_patch.remove()
except AttributeError:
pass
except ValueError:
pass
if self.ind in self.pupil_list:
self.pupil_at_ind = self.pupil_list[self.ind]
if self.pupil_at_ind:
self.pupil_circle = Circle((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),self.pupil_at_ind.radius,fill=False,ec=[1,0,0])
self.pupil_center = Circle((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),int(0.1*self.pupil_at_ind.radius),fill=True,ec=[1,0,0],fc=[1,0,0])
self.pupil_patch = self.ax.add_patch(self.pupil_circle)
self.center_patch = self.ax.add_patch(self.pupil_center)
else:
print('ERROR: No pupil at frame index %d' % (self.ind))
if self.ind in self.offset_first_frame:
self.angle = self.offset_first_frame[self.ind]
radius = self.video.height/2
self.window = Wedge((self.pupil_at_ind.center_col,self.pupil_at_ind.center_row),self.pupil_at_ind.radius+self.WINDOW_RADIUS,-(self.theta_window[1]+self.angle),-(self.theta_window[0]+self.angle),self.WINDOW_RADIUS,fill=False,ec=[1,0,0])
self.window_patch = self.ax.add_patch(self.window)
self.im.set_data(self.video[self.ind])
self.ax.set_xlabel('Frame %s' % self.ind)
self.im.axes.figure.canvas.draw()
#plt.savefig('frame_%d.png' % (self.ind), bbox_inches='tight')
def frame_scroll(video):
'''
Allows user to scroll through video frames using the keyboard.
Parameters:
------------------------
video : array_like
video to scroll through
'''
fig, ax = plt.subplots(1, 1)
tracker = FrameTracker(ax, video)
fig.canvas.mpl_connect('key_press_event', tracker.on_key)
plt.show()
def eyelid_scroll(video, eyelid_list):
'''
Overlays eyelid during frame scroll
Parameters:
------------------------
video : array_like
video to scroll through
eyelid_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
'''
fig, ax = plt.subplots(1, 1)
tracker = EyelidTracker(ax, video, eyelid_list)
fig.canvas.mpl_connect('key_press_event', tracker.on_key)
plt.show()
def pupil_scroll(video,pupil_list):
'''
Overlays pupil during frame scroll
Parameters:
------------------------
video : array_like
video to scroll through
pupil_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
'''
fig, ax = plt.subplots(1, 1)
tracker = PupilTracker(ax, video, pupil_list)
fig.canvas.mpl_connect('key_press_event', tracker.on_key)
plt.show()
def torsion_scroll(video, pupil_list, offset_first_frame):
'''
Tracks torsion using rotating 2D axis during frame scroll.
Parameters:
------------------------
video : array_like
video to scroll through
pupil_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
offset_first_frame : dictionary
dictionary of rotation angles. key is the frame index and the value is the rotation.
Does not need to include all video frames
'''
fig, ax = plt.subplots(1,1)
tracker = TorsionTracker(ax, video, pupil_list, offset_first_frame)
fig.canvas.mpl_connect('key_press_event', tracker.on_key)
plt.show()
def window_scroll(video,pupil_list,offset_first_frame,theta_window,WINDOW_RADIUS):
'''
Tracks window location during frame scroll.
Parameters:
------------------------
video : array_like
video to scroll through
pupil_list : dictionary
dictionary of pupils where the key is the frame index and the value is the pupil.
Does not need to include all video frames.
offset_first_frame : dictionary
dictionary of rotation angles. key is the frame index and the value is the rotation.
Does not need to include all video frames
theta_window : tuple of angles
theta[0] is the lower bound of the window
theta[1] is the upper bound of the window
WINDOW_RADIUS: integer
Pixel width of the window radius
'''
fig, ax = plt.subplots(1,1)
tracker = WindowTracker(ax, video, pupil_list, offset_first_frame, theta_window,WINDOW_RADIUS)
fig.canvas.mpl_connect('key_press_event', tracker.on_key)
plt.show()
| 40.002825
| 247
| 0.605395
| 1,903
| 14,161
| 4.335786
| 0.096689
| 0.06981
| 0.047994
| 0.061084
| 0.806205
| 0.791662
| 0.776876
| 0.739426
| 0.724639
| 0.724639
| 0
| 0.009489
| 0.292988
| 14,161
| 353
| 248
| 40.116147
| 0.814622
| 0.381117
| 0
| 0.573171
| 0
| 0
| 0.036704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0.036585
| 0.04878
| 0
| 0.176829
| 0.02439
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5ec17dadd52ac718e220528c5cff3fa3cf253139
| 120
|
py
|
Python
|
modules/unit_tests/core/ui/__init__.py
|
nursix/DRKCM
|
09328289ff721c416494398aa751ff99906327cb
|
[
"MIT"
] | 3
|
2022-01-26T08:07:54.000Z
|
2022-03-21T21:53:52.000Z
|
modules/unit_tests/core/ui/__init__.py
|
nursix/eden-asp
|
e49f46cb6488918f8d5a163dcd5a900cd686978c
|
[
"MIT"
] | null | null | null |
modules/unit_tests/core/ui/__init__.py
|
nursix/eden-asp
|
e49f46cb6488918f8d5a163dcd5a900cd686978c
|
[
"MIT"
] | 1
|
2017-10-03T13:03:47.000Z
|
2017-10-03T13:03:47.000Z
|
from .dashboard import *
from .datatable import *
from .forms import *
from .navigation import *
from .widgets import *
| 20
| 25
| 0.75
| 15
| 120
| 6
| 0.466667
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 120
| 5
| 26
| 24
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0d5d15f9c7acfc8531486077609f46759fd96861
| 32,244
|
py
|
Python
|
cli/tests/pcluster3_config_converter/test_pcluster3_config_converter.py
|
enrico-usai/cfncluster
|
acf083776c301d4f2a03ce5cd6fc79f9b88c74e0
|
[
"Apache-2.0"
] | 415
|
2018-11-13T15:02:15.000Z
|
2022-03-31T15:26:06.000Z
|
cli/tests/pcluster3_config_converter/test_pcluster3_config_converter.py
|
enrico-usai/cfncluster
|
acf083776c301d4f2a03ce5cd6fc79f9b88c74e0
|
[
"Apache-2.0"
] | 2,522
|
2018-11-13T16:16:27.000Z
|
2022-03-31T13:57:10.000Z
|
cli/tests/pcluster3_config_converter/test_pcluster3_config_converter.py
|
enrico-usai/cfncluster
|
acf083776c301d4f2a03ce5cd6fc79f9b88c74e0
|
[
"Apache-2.0"
] | 164
|
2018-11-14T22:47:46.000Z
|
2022-03-22T11:33:22.000Z
|
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance
# with the License. A copy of the License is located at http://aws.amazon.com/apache2.0/
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import pytest
import yaml
from assertpy import assert_that
from pcluster3_config_converter.pcluster3_config_converter import Pcluster3ConfigConverter
from tests.pcluster3_config_converter import test_data
@pytest.mark.parametrize(
"expected_input, expected_output, warn",
[
(
"pcluster.config.ini",
"pcluster.config.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to "
"False in AWS ParallelCluster version 2.",
"Note: In AWS ParallelCluster version 3, access to the Instance Metadata Service(IMDS) on the head "
"node is restricted to the cluster administrator. If additional users required access to IMDS, you "
"can set HeadNode/Imds/Secured to False.",
"Warning: Parameter vpc_id = vpc-12345678 is no longer supported. Ignoring it during conversion.",
"Warning: Parameter update_check = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter encrypted_ephemeral = true is no longer supported. Ignoring it during conversion.",
"Warning: additional_iam_policies = arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess is added to "
"both headnode and scheduling sections. Please review the configuration file after conversion "
"and decide whether to further trim down the permissions and specialize.",
"Warning: pre_install = s3://testbucket/scripts/pre_install.sh is added to both headnode and "
"scheduling sections. Please review the configuration file after conversion and decide whether to "
"further trim down the permissions and specialize.",
"Warning: post_install = s3://testbucekt/scripts/post_install.sh is added to both headnode and "
"scheduling sections. Please review the configuration file after conversion and decide whether "
"to further trim down the permissions and specialize.",
],
),
],
)
def test_pcluster3_config_converter_command(test_datadir, tmpdir, expected_input, expected_output, warn):
config_file_path = os.path.join(str(test_datadir), expected_input)
args = [
"pcluster3-config-converter",
"--config-file",
config_file_path,
"--output-file",
tmpdir / "pcluster.config.yaml",
]
result = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8")
_assert_files_are_equal(
tmpdir / "pcluster.config.yaml",
test_datadir / expected_output,
)
for message in warn:
assert_that(result.stdout).contains(message)
@pytest.mark.parametrize(
"expected_input, expected_output, warn, error, force_convert, cluster_label",
[
(
"slurm_full.ini",
"slurm_full.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Note: In AWS ParallelCluster version 3, access to the Instance Metadata Service(IMDS) on the head "
"node is restricted to the cluster administrator. If additional users required access to IMDS, you "
"can set HeadNode/Imds/Secured to False.",
"Warning: Parameter vpc_id = vpc-0e0f223cc35256b9a is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter update_check = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter encrypted_ephemeral = true is no longer supported. Ignoring it during conversion.",
"Warning: additional_iam_policies = arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess is added to both "
"headnode and scheduling sections. Please review the configuration file after conversion and decide "
"whether to further trim down the permissions and specialize.",
"Warning: s3_read_write_resource = arn:aws:s3:::test/hello/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: s3_read_resource = arn:aws:s3:::testbucket/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: pre_install = s3://testbucket/pre_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: post_install = s3://testbucket/post_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: proxy_server = https://x.x.x.x:8080 is added to both headnode and scheduling sections. "
"Please review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: additional_sg = sg-xxxxxx is added to both headnode and scheduling sections. Please review "
"the configuration file after conversion and decide whether to further trim down the permissions and "
"specialize.",
"Warning: vpc_security_group_id = sg-xxxxxx is added to both headnode and scheduling sections. Please "
"review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: Parameters ['extra_json', 'custom_chef_cookbook', 'template_url', 'instance_types_data'] "
"are not officially supported and not recommended.",
"Warning: Duplicate names 'custom1' are not allowed in the SharedStorage section. Please change them "
"before cluster creation.",
"Warning: '_' is not allowed in the name of 'compute_resource ondemand_i1'. Please rename it before "
"cluster creation.",
"Warning: '_' is not allowed in the name of 'compute_resource ondemand_i3'. Please rename it before "
"cluster creation.",
"Warning: Parameter initial_count = 2 is no longer supported. Ignoring it during conversion.",
"Warning: '_' is not allowed in the name of 'compute_resource ondemand_i2'. Please rename it before "
"cluster creation.",
],
None,
True,
"default",
),
(
"slurm_required.ini",
"slurm_required.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Note: In AWS ParallelCluster version 3, access to the Instance Metadata Service(IMDS) on the head "
"node is restricted to the cluster administrator. If additional users required access to IMDS, you "
"can set HeadNode/Imds/Secured to False.",
"Warning: Parameter vpc_id = vpc-123 is no longer supported. Ignoring it during conversion.",
"Warning: Parameter update_check = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it "
"during conversion.",
],
None,
False,
"cluster_label1",
),
(
"awsbatch_required.ini",
"awsbatch_required.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Warning: Parameter vpc_id = vpc-0e0f223cc35256b9a is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter update_check = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter sanity_check = false is no longer supported, please specify "
"`--suppress-validators ALL` during cluster creation.",
],
None,
False,
None,
),
(
"awsbatch_full.ini",
"awsbatch_full.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Warning: Parameter vpc_id = vpc-0e0f223cc35256b9a is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter update_check = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter encrypted_ephemeral = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter sanity_check = false is no longer supported, please specify "
"`--suppress-validators ALL` during cluster creation.",
"Warning: s3_read_resource = arn:aws:s3:::testbucket/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: disable_hyperthreading = true is added to both headnode and scheduling sections. Please "
"review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: pre_install = s3://testbucket/pre_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: post_install = s3://testbucket/post_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: proxy_server = https://x.x.x.x:8080 is added to both headnode and scheduling sections. "
"Please review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: additional_sg = sg-xxxxxx is added to both headnode and scheduling sections. Please review "
"the configuration file after conversion and decide whether to further trim down the permissions and "
"specialize.",
"Warning: vpc_security_group_id = sg-xxxxxx is added to both headnode and scheduling sections. Please "
"review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: Parameters ['extra_json'] are not officially supported and not recommended.",
"Warning: Duplicate names 'custom1' are not allowed in the SharedStorage section. Please change them "
"before cluster creation.",
],
None,
True,
"default",
),
(
"slurm_full.ini",
None,
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Note: In AWS ParallelCluster version 3, access to the Instance Metadata Service(IMDS) on the head "
"node is restricted to the cluster administrator. If additional users required access to IMDS, you "
"can set HeadNode/Imds/Secured to False.",
"Warning: Parameter vpc_id = vpc-0e0f223cc35256b9a is no longer supported. Ignoring it "
"during conversion.",
"Warning: Parameter update_check = true is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it during "
"conversion.",
"Warning: Parameter encrypted_ephemeral = true is no longer supported. Ignoring it during conversion.",
"Warning: additional_iam_policies = arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess is added to both "
"headnode and scheduling sections. Please review the configuration file after conversion and decide "
"whether to further trim down the permissions and specialize.",
"Warning: s3_read_write_resource = arn:aws:s3:::test/hello/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: s3_read_resource = arn:aws:s3:::testbucket/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: pre_install = s3://testbucket/pre_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: post_install = s3://testbucket/post_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: proxy_server = https://x.x.x.x:8080 is added to both headnode and scheduling sections. "
"Please review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: additional_sg = sg-xxxxxx is added to both headnode and scheduling sections. Please review "
"the configuration file after conversion and decide whether to further trim down the permissions and "
"specialize.",
"Warning: vpc_security_group_id = sg-xxxxxx is added to both headnode and scheduling sections. Please "
"review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
],
"ERROR: ['extra_json', 'custom_chef_cookbook', 'template_url', 'instance_types_data'] are not officially "
"supported and not recommended. If you want to proceed with conversion, please specify `--force-convert` "
"and rerun the command.",
False,
None,
),
(
"compute_subnet_cidr.ini",
None,
None,
"ERROR: Parameter compute_subnet_cidr = 0.0.0.0/16 is no longer supported. Please remove it and run the "
"converter again.",
False,
None,
),
(
"missing_vpc.ini",
None,
None,
"Missing vpc_settings in the configuration file",
False,
None,
),
(
"slurm_full.ini",
None,
None,
"The specified cluster section is not in the configuration.",
False,
"invalid_cluster_label",
),
(
"slurm_requred.ini",
None,
None,
"Can not find a valid cluster section.",
False,
None,
),
(
"sit_base.ini",
"sit_base.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Note: In AWS ParallelCluster version 3, access to the Instance Metadata Service(IMDS) on the head "
"node is restricted to the cluster administrator. If additional users required access to IMDS, you "
"can set HeadNode/Imds/Secured to False.",
"Warning: Parameter vpc_id = vpc-12345678 is no longer supported. Ignoring it during conversion.",
"Warning: Parameter update_check = false is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it during "
"conversion.",
],
None,
False,
None,
),
(
"sit_full.ini",
"sit_full.yaml",
[
"Note: Volume encrypted defaults to True in AWS ParallelCluster version 3 while it defaults to False "
"in AWS ParallelCluster version 2.",
"Note: In AWS ParallelCluster version 3, access to the Instance Metadata Service(IMDS) on the head "
"node is restricted to the cluster administrator. If additional users required access to IMDS, you "
"can set HeadNode/Imds/Secured to False.",
"Warning: Parameter vpc_id = vpc-12345678 is no longer supported. Ignoring it during conversion.",
"Warning: Parameter update_check = false is no longer supported. Ignoring it during conversion.",
"Warning: Parameter ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} is no longer supported. Ignoring it during "
"conversion.",
"Warning: s3_read_write_resource = arn:aws:s3:::test/hello/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: s3_read_resource = arn:aws:s3:::testbucket/* is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: disable_hyperthreading = false is added to both headnode and scheduling sections. Please "
"review the configuration file after conversion and decide whether to further trim down the "
"permissions and specialize.",
"Warning: pre_install = s3://testbucket/pre_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: post_install = s3://testbucket/post_install.sh is added to both headnode and scheduling "
"sections. Please review the configuration file after conversion and decide whether to further trim "
"down the permissions and specialize.",
"Warning: Parameter initial_queue_size = 2 is no longer supported. Ignoring it during conversion.",
],
None,
False,
None,
),
],
)
def test_pcluster3_config_converter(
test_datadir, tmpdir, expected_input, expected_output, mocker, warn, error, force_convert, capsys, cluster_label
):
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter.Pcluster3ConfigConverter.get_region",
return_value="us-west-1",
)
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter._get_account_id",
return_value="1234567",
)
converter = Pcluster3ConfigConverter(
test_datadir / expected_input, cluster_label, tmpdir / "output_yaml", False, force_convert
)
try:
converter.validate()
converter.convert_to_pcluster3_config()
converter.write_configuration_file()
_assert_files_are_equal(
tmpdir / "output_yaml",
test_datadir / expected_output,
)
except SystemExit as e:
print(e)
assert_that(e.args[0]).contains(error)
if warn:
readouterr = capsys.readouterr()
for message in warn:
assert_that(readouterr.out).contains(message)
@pytest.mark.parametrize(
"test_case",
test_data.region_test,
)
def test_convert_region(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.convert_region("Region")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.image_test,
)
def test_convert_image(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_image("Image")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.iam_test,
)
def test_convert_iam(test_case, mocker):
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter.Pcluster3ConfigConverter.get_region",
return_value="us-west-1",
)
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter._get_account_id",
return_value="1234567",
)
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_iam("Iam")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.additional_packages_test,
)
def test_convert_additional_packages(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_additional_packages("AdditionalPackages")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.tags_test,
)
def test_convert_tags(test_case):
user_input, expected_output, error_message = test_case[0], test_case[1], test_case[2]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
try:
converter.convert_tags("Tags")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
except SystemExit as e:
assert_that(e.args[0]).contains(error_message)
@pytest.mark.parametrize(
"test_case",
test_data.monitoring_test,
)
def test_convert_monitoring(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_monitoring("Monitoring")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.convert_custom_s3_bucket_test,
)
def test_convert_custom_s3_bucket(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_custom_s3_bucket("CustomS3Bucket")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.convert_dev_settings_test,
)
def test_convert_dev_settings(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_dev_settings("DevSettings")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.convert_additional_resources_test,
)
def test_convert_additional_resources(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_additional_resources("AdditionalResources")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.shared_storage_test,
)
def test_convert_shared_storage(test_case):
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.convert_shared_storage("SharedStorage")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.headnode_test,
)
def test_convert_headnode(test_case, mocker):
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter.Pcluster3ConfigConverter.get_region",
return_value="us-west-1",
)
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter._get_account_id",
return_value="1234567",
)
user_input, expected_output = test_case[0], test_case[1]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.validate_vpc_settings()
converter.convert_headnode("HeadNode")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
@pytest.mark.parametrize(
"test_case",
test_data.scheduling_test,
)
def test_convert_scheduling(test_case, mocker):
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter.Pcluster3ConfigConverter.get_region",
return_value="us-west-1",
)
mocker.patch(
"pcluster3_config_converter.pcluster3_config_converter._get_account_id",
return_value="1234567",
)
user_input, expected_output, warn = test_case[0], test_case[1], test_case[2]
expected_output_data = yaml.safe_load(expected_output)
converter = Pcluster3ConfigConverter(
config_file=user_input, cluster_template="default", output_file="dummy_output", input_as_string=True
)
converter.validate_cluster_section_name()
converter.validate_vpc_settings()
converter.convert_scheduling("Scheduling")
assert_that(converter.pcluster3_configuration).is_equal_to(expected_output_data)
if warn:
assert_that(converter.comments).contains(warn)
@pytest.mark.parametrize(
"pcluster2_field, value, pcluster3_field, method, error_message",
[
("proxy_server", "https://x.x.x.x:8080", "HttpProxyAddress", None, None),
("disable_hyperthreading", True, "DisableSimultaneousMultithreading", "getboolean", None),
("master_root_volume_size", 30, "Size", "getint", None),
(
"master_root_volume_size",
True,
"Size",
"getint",
"Wrong type for master_root_volume_size in dummy-section section: invalid literal for int() with base 10: "
"'True'",
),
("spot_price", 20.99, "SpotPrice", "getfloat", None),
],
)
def test_convert_single_field(
test_datadir, tmpdir, pcluster2_field, value, pcluster3_field, method, error_message, caplog, capsys
):
converter = Pcluster3ConfigConverter(
config_file="dummy_input", cluster_template="default", output_file="dummy_output"
)
converter.config_parser.read_dict({"dummy-section": {pcluster2_field: value}})
pcluster3_model = {}
try:
converter.convert_single_field("dummy-section", pcluster2_field, pcluster3_model, pcluster3_field, method)
assert_that(pcluster3_model).is_equal_to({pcluster3_field: value})
except SystemExit as e:
assert_that(e.args[0]).contains(error_message)
def _assert_files_are_equal(file, expected_file):
with open(file, "r") as f, open(expected_file, "r") as exp_f:
expected_file_content = exp_f.read()
expected_file_content = expected_file_content.replace("<DIR>", os.path.dirname(file))
assert_that(f.read()).is_equal_to(expected_file_content)
| 52.344156
| 120
| 0.669055
| 3,720
| 32,244
| 5.604032
| 0.088978
| 0.036264
| 0.01583
| 0.030076
| 0.841368
| 0.82386
| 0.816904
| 0.807071
| 0.788411
| 0.786156
| 0
| 0.012353
| 0.254373
| 32,244
| 615
| 121
| 52.429268
| 0.854754
| 0.016685
| 0
| 0.647469
| 0
| 0.026178
| 0.516863
| 0.071458
| 0
| 0
| 0
| 0
| 0.041885
| 1
| 0.027923
| false
| 0
| 0.012216
| 0
| 0.04014
| 0.001745
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0da170411343528d71ec269986c80aa26ec2470f
| 499
|
py
|
Python
|
tests/__init__.py
|
steve1aa/microdot
|
c130d8f2d45dcce9606dda25d31d653ce91faf92
|
[
"MIT"
] | 173
|
2019-04-16T11:17:22.000Z
|
2022-03-30T11:19:26.000Z
|
tests/__init__.py
|
steve1aa/microdot
|
c130d8f2d45dcce9606dda25d31d653ce91faf92
|
[
"MIT"
] | 36
|
2019-07-01T06:14:50.000Z
|
2022-03-08T18:53:19.000Z
|
tests/__init__.py
|
steve1aa/microdot
|
c130d8f2d45dcce9606dda25d31d653ce91faf92
|
[
"MIT"
] | 30
|
2019-04-16T16:21:36.000Z
|
2022-03-22T12:56:30.000Z
|
from tests.microdot.test_multidict import TestMultiDict
from tests.microdot.test_request import TestRequest
from tests.microdot.test_response import TestResponse
from tests.microdot.test_url_pattern import TestURLPattern
from tests.microdot.test_microdot import TestMicrodot
from tests.microdot_asyncio.test_request_asyncio import TestRequestAsync
from tests.microdot_asyncio.test_response_asyncio import TestResponseAsync
from tests.microdot_asyncio.test_microdot_asyncio import TestMicrodotAsync
| 49.9
| 74
| 0.901804
| 63
| 499
| 6.904762
| 0.301587
| 0.165517
| 0.312644
| 0.241379
| 0.193103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066132
| 499
| 9
| 75
| 55.444444
| 0.933476
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
0da8c07f8910a86d5cbd3a54f5d534c1cfd3b072
| 25
|
py
|
Python
|
test.py
|
creepyCaller/esp8266
|
ebbf693c5c58db1bb2c22f57885d2e2a2e99c750
|
[
"MIT"
] | null | null | null |
test.py
|
creepyCaller/esp8266
|
ebbf693c5c58db1bb2c22f57885d2e2a2e99c750
|
[
"MIT"
] | null | null | null |
test.py
|
creepyCaller/esp8266
|
ebbf693c5c58db1bb2c22f57885d2e2a2e99c750
|
[
"MIT"
] | null | null | null |
print('this is A test !')
| 25
| 25
| 0.64
| 5
| 25
| 3.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.761905
| 0
| 0
| 0
| 0
| 0
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
219ed4cc537cae2945a06e68b20e8f704a126dad
| 157
|
py
|
Python
|
seleniumdirector/__init__.py
|
cshanu/seleniumdirector
|
c2e7a343225bcca35e9119fd4d5854626e1ac48a
|
[
"MIT"
] | 5
|
2018-05-02T15:01:46.000Z
|
2021-11-05T20:26:56.000Z
|
seleniumdirector/__init__.py
|
cshanu/seleniumdirector
|
c2e7a343225bcca35e9119fd4d5854626e1ac48a
|
[
"MIT"
] | null | null | null |
seleniumdirector/__init__.py
|
cshanu/seleniumdirector
|
c2e7a343225bcca35e9119fd4d5854626e1ac48a
|
[
"MIT"
] | 1
|
2020-02-09T11:43:06.000Z
|
2020-02-09T11:43:06.000Z
|
from seleniumdirector.webdirector import WebDirector
from seleniumdirector.exceptions import SeleniumDirectorException
__version__ = "DEVELOPMENT_VERSION"
| 26.166667
| 65
| 0.88535
| 13
| 157
| 10.307692
| 0.615385
| 0.298507
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082803
| 157
| 5
| 66
| 31.4
| 0.930556
| 0
| 0
| 0
| 0
| 0
| 0.121019
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
21a720d2b85806016364581454e18c5f25c5a7cc
| 169
|
py
|
Python
|
tests/improved/api/index.py
|
alex-d-bondarev/ugly-python-demo
|
9171fa098573ad78de83f92c491f127548b57024
|
[
"MIT"
] | null | null | null |
tests/improved/api/index.py
|
alex-d-bondarev/ugly-python-demo
|
9171fa098573ad78de83f92c491f127548b57024
|
[
"MIT"
] | null | null | null |
tests/improved/api/index.py
|
alex-d-bondarev/ugly-python-demo
|
9171fa098573ad78de83f92c491f127548b57024
|
[
"MIT"
] | null | null | null |
import requests
def get_index_response():
"""Send GET request to Index API
:return: response object
"""
return requests.get("http://127.0.0.1:5000/")
| 16.9
| 49
| 0.650888
| 24
| 169
| 4.5
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074627
| 0.207101
| 169
| 9
| 50
| 18.777778
| 0.731343
| 0.325444
| 0
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
df6b904167f50da219ec4d08cd3490669eb7ec54
| 78
|
py
|
Python
|
math/numbers/base.py
|
ginnyTheCat/Simalia
|
2d0b26d0f413d16d6a7cb62aa9612d9f346c6fbb
|
[
"MIT"
] | null | null | null |
math/numbers/base.py
|
ginnyTheCat/Simalia
|
2d0b26d0f413d16d6a7cb62aa9612d9f346c6fbb
|
[
"MIT"
] | null | null | null |
math/numbers/base.py
|
ginnyTheCat/Simalia
|
2d0b26d0f413d16d6a7cb62aa9612d9f346c6fbb
|
[
"MIT"
] | null | null | null |
from simalia.math.calcable import Calcable
class Number(Calcable):
pass
| 13
| 42
| 0.769231
| 10
| 78
| 6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 78
| 5
| 43
| 15.6
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
10d6c4c5b05622bc5ee902bf27f6c5508ad290b3
| 4,492
|
py
|
Python
|
codes/labs_lecture19/lab02_graph_clustering/util/graph_generator.py
|
sanjaysaha1311/Deep_Learning_CS7454_2018_NTU
|
3ab24cceb8cf4a7c2fe53e4b3a7500a48344a475
|
[
"MIT"
] | 221
|
2019-08-15T02:19:23.000Z
|
2022-02-22T00:53:06.000Z
|
codes/labs_lecture19/lab02_graph_clustering/util/graph_generator.py
|
ChristophBrune/CE7454_2018
|
76fd3b0b69a36b8119d46df1b8a0e8c654188bf6
|
[
"MIT"
] | 1
|
2019-03-07T17:19:38.000Z
|
2019-03-19T14:30:36.000Z
|
codes/labs_lecture19/lab02_graph_clustering/util/graph_generator.py
|
ChristophBrune/CE7454_2018
|
76fd3b0b69a36b8119d46df1b8a0e8c654188bf6
|
[
"MIT"
] | 77
|
2019-08-15T07:06:00.000Z
|
2021-11-25T10:12:09.000Z
|
import numpy as np
import block
import torch
import scipy.sparse as sp
default_type='torch.cuda.FloatTensor'
default_type='torch.FloatTensor'
class variable_size_graph():
def __init__(self, task_parameters):
# parameters
vocab_size = task_parameters['Voc']
nb_of_clust = task_parameters['nb_clusters_target']
clust_size_min = task_parameters['size_min']
clust_size_max = task_parameters['size_max']
p = task_parameters['p']
q = task_parameters['q']
self_loop = True
W0 = task_parameters['W0']
u0 = task_parameters['u0']
# create block model graph and put random signal on it
W,c=block.unbalanced_block_model(nb_of_clust,clust_size_min,clust_size_max,p,q)
u=np.random.randint(vocab_size,size=W.shape[0])
# add the subgraph to be detected
W,c=block.add_a_block(W0,W,c,nb_of_clust,q)
u=np.concatenate((u,u0),axis=0)
# shuffle
W,c,idx=block.schuffle(W,c)
u=u[idx]
u=torch.from_numpy(u)
u=u.long()
# add self loop
if self_loop:
for i in range(W.shape[0]):
W[i,i]=1
# create the target
target= (c==nb_of_clust).astype(float)
target=torch.from_numpy(target)
target=target.long()
# mapping matrices
W_coo=sp.coo_matrix(W)
nb_edges=W_coo.nnz
nb_vertices=W.shape[0]
edge_to_starting_vertex=sp.coo_matrix( ( np.ones(nb_edges) ,(np.arange(nb_edges), W_coo.row) ),
shape=(nb_edges, nb_vertices) )
edge_to_ending_vertex=sp.coo_matrix( ( np.ones(nb_edges) ,(np.arange(nb_edges), W_coo.col) ),
shape=(nb_edges, nb_vertices) )
# attribute
#self.adj_matrix=torch.from_numpy(W).type(default_type)
#self.edge_to_starting_vertex=torch.from_numpy(edge_to_starting_vertex.toarray()).type(default_type)
#self.edge_to_ending_vertex=torch.from_numpy(edge_to_ending_vertex.toarray()).type(default_type)
self.adj_matrix=W
self.edge_to_starting_vertex=edge_to_starting_vertex
self.edge_to_ending_vertex=edge_to_ending_vertex
self.signal=u
self.target=target
class graph_semi_super_clu():
def __init__(self, task_parameters):
# parameters
vocab_size = task_parameters['Voc']
nb_of_clust = task_parameters['nb_clusters_target']
clust_size_min = task_parameters['size_min']
clust_size_max = task_parameters['size_max']
p = task_parameters['p']
q = task_parameters['q']
self_loop = True
# block model
W, c = block.unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)
# add self loop
if self_loop:
for i in range(W.shape[0]):
W[i,i]=1
# shuffle
W,c,idx = block.schuffle(W,c)
# signal on block model
u = np.zeros(c.shape[0])
for r in range(nb_of_clust):
cluster = np.where(c==r)[0]
s = cluster[np.random.randint(cluster.shape[0])]
u[s] = r+1
# target
target = c
# convert to pytorch
u = torch.from_numpy(u)
u = u.long()
target = torch.from_numpy(target)
target = target.long()
# mapping matrices
W_coo=sp.coo_matrix(W)
nb_edges=W_coo.nnz
nb_vertices=W.shape[0]
edge_to_starting_vertex=sp.coo_matrix( ( np.ones(nb_edges) ,(np.arange(nb_edges), W_coo.row) ),
shape=(nb_edges, nb_vertices) )
edge_to_ending_vertex=sp.coo_matrix( ( np.ones(nb_edges) ,(np.arange(nb_edges), W_coo.col) ),
shape=(nb_edges, nb_vertices) )
# attribute
#self.adj_matrix=torch.from_numpy(W).type(default_type)
#self.edge_to_starting_vertex=torch.from_numpy(edge_to_starting_vertex.toarray()).type(default_type)
#self.edge_to_ending_vertex=torch.from_numpy(edge_to_ending_vertex.toarray()).type(default_type)
self.adj_matrix=W
self.edge_to_starting_vertex=edge_to_starting_vertex
self.edge_to_ending_vertex=edge_to_ending_vertex
self.signal=u
self.target=target
| 33.522388
| 108
| 0.599955
| 615
| 4,492
| 4.063415
| 0.165854
| 0.048019
| 0.056022
| 0.080032
| 0.780312
| 0.780312
| 0.780312
| 0.780312
| 0.741096
| 0.741096
| 0
| 0.005696
| 0.296527
| 4,492
| 133
| 109
| 33.774436
| 0.785127
| 0.1752
| 0
| 0.734177
| 0
| 0
| 0.032907
| 0.005983
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025316
| false
| 0
| 0.050633
| 0
| 0.101266
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8004d12dd5a9d5cf6b7467d2114f4a9de3749207
| 7,299
|
py
|
Python
|
tests/test_api_labels.py
|
DMcP89/todoist-api-python
|
89b601b8edad47bc999cd7b2ab36c5e2a9f8cd8a
|
[
"MIT"
] | 24
|
2021-12-07T18:37:29.000Z
|
2022-03-31T23:09:48.000Z
|
tests/test_api_labels.py
|
DMcP89/todoist-api-python
|
89b601b8edad47bc999cd7b2ab36c5e2a9f8cd8a
|
[
"MIT"
] | 15
|
2021-12-01T14:07:25.000Z
|
2022-03-15T23:19:30.000Z
|
tests/test_api_labels.py
|
sadikkuzu/todoist-api-python
|
75db44ad76a210ff4d7a3d5726d0f0ad3389f16e
|
[
"MIT"
] | 3
|
2021-12-08T22:19:12.000Z
|
2022-02-18T06:36:40.000Z
|
import json
import typing
from typing import Any, Dict, List
import pytest
import responses
from tests.data.test_defaults import (
DEFAULT_REQUEST_ID,
INVALID_ENTITY_ID,
REST_API_BASE_URL,
)
from tests.utils.test_utils import (
assert_auth_header,
assert_id_validation,
assert_request_id_header,
)
from todoist_api_python.api import TodoistAPI
from todoist_api_python.api_async import TodoistAPIAsync
from todoist_api_python.models import Label
@pytest.mark.asyncio
async def test_get_label(
todoist_api: TodoistAPI,
todoist_api_async: TodoistAPIAsync,
requests_mock: responses.RequestsMock,
default_label_response: Dict[str, Any],
default_label: Label,
):
label_id = 1234
expected_endpoint = f"{REST_API_BASE_URL}/labels/{label_id}"
requests_mock.add(
responses.GET,
expected_endpoint,
json=default_label_response,
status=200,
)
label = todoist_api.get_label(label_id)
assert len(requests_mock.calls) == 1
assert_auth_header(requests_mock.calls[0].request)
assert label == default_label
label = await todoist_api_async.get_label(label_id)
assert len(requests_mock.calls) == 2
assert_auth_header(requests_mock.calls[1].request)
assert label == default_label
@typing.no_type_check
def test_get_label_invalid_id(
todoist_api: TodoistAPI,
requests_mock: responses.RequestsMock,
):
assert_id_validation(
lambda: todoist_api.get_label(INVALID_ENTITY_ID), requests_mock
)
@pytest.mark.asyncio
async def test_get_labels(
todoist_api: TodoistAPI,
todoist_api_async: TodoistAPIAsync,
requests_mock: responses.RequestsMock,
default_labels_response: List[Dict[str, Any]],
default_labels_list: List[Label],
):
requests_mock.add(
responses.GET,
f"{REST_API_BASE_URL}/labels",
json=default_labels_response,
status=200,
)
labels = todoist_api.get_labels()
assert len(requests_mock.calls) == 1
assert_auth_header(requests_mock.calls[0].request)
assert labels == default_labels_list
labels = await todoist_api_async.get_labels()
assert len(requests_mock.calls) == 2
assert_auth_header(requests_mock.calls[1].request)
assert labels == default_labels_list
@pytest.mark.asyncio
async def test_add_label_minimal(
todoist_api: TodoistAPI,
todoist_api_async: TodoistAPIAsync,
requests_mock: responses.RequestsMock,
default_label_response: Dict[str, Any],
default_label: Label,
):
label_name = "A Label"
expected_payload = {"name": label_name}
requests_mock.add(
responses.POST,
f"{REST_API_BASE_URL}/labels",
json=default_label_response,
status=200,
)
new_label = todoist_api.add_label(name=label_name, request_id=DEFAULT_REQUEST_ID)
assert len(requests_mock.calls) == 1
assert_auth_header(requests_mock.calls[0].request)
assert_request_id_header(requests_mock.calls[0].request)
assert requests_mock.calls[0].request.body == json.dumps(expected_payload)
assert new_label == default_label
new_label = await todoist_api_async.add_label(
name=label_name, request_id=DEFAULT_REQUEST_ID
)
assert len(requests_mock.calls) == 2
assert_auth_header(requests_mock.calls[1].request)
assert_request_id_header(requests_mock.calls[1].request)
assert requests_mock.calls[1].request.body == json.dumps(expected_payload)
assert new_label == default_label
@pytest.mark.asyncio
async def test_add_label_full(
todoist_api: TodoistAPI,
todoist_api_async: TodoistAPIAsync,
requests_mock: responses.RequestsMock,
default_label_response: Dict[str, Any],
default_label: Label,
):
label_name = "A Label"
optional_args = {
"color": 30,
"order": 3,
"favorite": True,
}
expected_payload: Dict[str, Any] = {"name": label_name}
expected_payload.update(optional_args)
requests_mock.add(
responses.POST,
f"{REST_API_BASE_URL}/labels",
json=default_label_response,
status=200,
)
new_label = todoist_api.add_label(
name=label_name, request_id=DEFAULT_REQUEST_ID, **optional_args
)
assert len(requests_mock.calls) == 1
assert_auth_header(requests_mock.calls[0].request)
assert_request_id_header(requests_mock.calls[0].request)
assert requests_mock.calls[0].request.body == json.dumps(expected_payload)
assert new_label == default_label
new_label = await todoist_api_async.add_label(
name=label_name, request_id=DEFAULT_REQUEST_ID, **optional_args
)
assert len(requests_mock.calls) == 2
assert_auth_header(requests_mock.calls[1].request)
assert_request_id_header(requests_mock.calls[1].request)
assert requests_mock.calls[1].request.body == json.dumps(expected_payload)
assert new_label == default_label
@pytest.mark.asyncio
async def test_update_label(
todoist_api: TodoistAPI,
todoist_api_async: TodoistAPIAsync,
requests_mock: responses.RequestsMock,
):
label_id = 123
args = {
"name": "An updated label",
"order": 2,
"color": 31,
"favorite": False,
}
requests_mock.add(
responses.POST, f"{REST_API_BASE_URL}/labels/{label_id}", status=204
)
response = todoist_api.update_label(
label_id=label_id, request_id=DEFAULT_REQUEST_ID, **args
)
assert len(requests_mock.calls) == 1
assert_auth_header(requests_mock.calls[0].request)
assert_request_id_header(requests_mock.calls[0].request)
assert requests_mock.calls[0].request.body == json.dumps(args)
assert response is True
response = await todoist_api_async.update_label(
label_id=label_id, request_id=DEFAULT_REQUEST_ID, **args
)
assert len(requests_mock.calls) == 2
assert_auth_header(requests_mock.calls[1].request)
assert_request_id_header(requests_mock.calls[1].request)
assert requests_mock.calls[1].request.body == json.dumps(args)
assert response is True
@typing.no_type_check
def test_update_label_invalid_id(
todoist_api: TodoistAPI,
requests_mock: responses.RequestsMock,
):
assert_id_validation(
lambda: todoist_api.update_label(INVALID_ENTITY_ID),
requests_mock,
)
@pytest.mark.asyncio
async def test_delete_label(
todoist_api: TodoistAPI,
todoist_api_async: TodoistAPIAsync,
requests_mock: responses.RequestsMock,
):
label_id = 1234
expected_endpoint = f"{REST_API_BASE_URL}/labels/{label_id}"
requests_mock.add(
responses.DELETE,
expected_endpoint,
status=204,
)
response = todoist_api.delete_label(label_id)
assert len(requests_mock.calls) == 1
assert_auth_header(requests_mock.calls[0].request)
assert response is True
response = await todoist_api_async.delete_label(label_id)
assert len(requests_mock.calls) == 2
assert_auth_header(requests_mock.calls[1].request)
assert response is True
@typing.no_type_check
def test_delete_label_invalid_id(
todoist_api: TodoistAPI,
requests_mock: responses.RequestsMock,
):
assert_id_validation(
lambda: todoist_api.delete_label(INVALID_ENTITY_ID),
requests_mock,
)
| 27.647727
| 85
| 0.724483
| 955
| 7,299
| 5.189529
| 0.091099
| 0.130751
| 0.123487
| 0.065375
| 0.857143
| 0.814972
| 0.787732
| 0.776029
| 0.75908
| 0.735674
| 0
| 0.011929
| 0.184546
| 7,299
| 263
| 86
| 27.752852
| 0.820733
| 0
| 0
| 0.630332
| 0
| 0
| 0.03658
| 0.025894
| 0
| 0
| 0
| 0
| 0.255924
| 1
| 0.014218
| false
| 0
| 0.047393
| 0
| 0.061611
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
8044f2dce16268c857f523516d36cdc68ecefbb2
| 42
|
py
|
Python
|
t/hello-plain.py
|
Hookscript/lang-python
|
49bc8b5f2b46c29ef81658c7ca0764f889e50c50
|
[
"Unlicense"
] | null | null | null |
t/hello-plain.py
|
Hookscript/lang-python
|
49bc8b5f2b46c29ef81658c7ca0764f889e50c50
|
[
"Unlicense"
] | 1
|
2015-05-21T17:21:58.000Z
|
2015-05-21T17:21:58.000Z
|
t/hello-plain.py
|
Hookscript/lang-python
|
49bc8b5f2b46c29ef81658c7ca0764f889e50c50
|
[
"Unlicense"
] | null | null | null |
import hookscript
print("Hello, world!")
| 10.5
| 22
| 0.738095
| 5
| 42
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 3
| 23
| 14
| 0.837838
| 0
| 0
| 0
| 0
| 0
| 0.309524
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 6
|
804750c68056089fc0704f18ce1cbcd6b9305791
| 107
|
py
|
Python
|
algorithms/selfie2anime/src/selfie2anime_test.py
|
algorithmia-algorithms/qa-algorithm-scripts
|
ac2ae204e473bb788fad989d41f56adf7a326a32
|
[
"MIT"
] | null | null | null |
algorithms/selfie2anime/src/selfie2anime_test.py
|
algorithmia-algorithms/qa-algorithm-scripts
|
ac2ae204e473bb788fad989d41f56adf7a326a32
|
[
"MIT"
] | null | null | null |
algorithms/selfie2anime/src/selfie2anime_test.py
|
algorithmia-algorithms/qa-algorithm-scripts
|
ac2ae204e473bb788fad989d41f56adf7a326a32
|
[
"MIT"
] | null | null | null |
from . import selfie2anime
def test_selfie2anime():
assert selfie2anime.apply("Jane") == "hello Jane"
| 21.4
| 53
| 0.728972
| 12
| 107
| 6.416667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032967
| 0.149533
| 107
| 4
| 54
| 26.75
| 0.813187
| 0
| 0
| 0
| 0
| 0
| 0.130841
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
33e604f1e4dc7729c16c72f5c473fe480a8dd8fd
| 11,596
|
py
|
Python
|
usaspending_api/download/tests/unit/test_base_download_helpers.py
|
ststuck/usaspending-api
|
b13bd5bcba0369ff8512f61a34745626c3969391
|
[
"CC0-1.0"
] | 217
|
2016-11-03T17:09:53.000Z
|
2022-03-10T04:17:54.000Z
|
usaspending_api/download/tests/unit/test_base_download_helpers.py
|
ststuck/usaspending-api
|
b13bd5bcba0369ff8512f61a34745626c3969391
|
[
"CC0-1.0"
] | 622
|
2016-09-02T19:18:23.000Z
|
2022-03-29T17:11:01.000Z
|
usaspending_api/download/tests/unit/test_base_download_helpers.py
|
ststuck/usaspending-api
|
b13bd5bcba0369ff8512f61a34745626c3969391
|
[
"CC0-1.0"
] | 93
|
2016-09-07T20:28:57.000Z
|
2022-02-25T00:25:27.000Z
|
import json
import pytest
from datetime import datetime, timezone
from model_mommy import mommy
from unittest.mock import patch
from usaspending_api.broker.lookups import EXTERNAL_DATA_TYPE_DICT
from usaspending_api.download.lookups import JOB_STATUS
from usaspending_api.download.v2.base_download_viewset import BaseDownloadViewSet
JSON_REQUEST = {"dummy_key": "dummy_value"}
@pytest.fixture
def common_test_data(db):
for js in JOB_STATUS:
mommy.make("download.JobStatus", job_status_id=js.id, name=js.name, description=js.desc)
download_jobs = [
{
"download_job_id": 1,
"file_name": "oldest_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 15, 12, 0, 0, 0, timezone.utc),
},
{
"download_job_id": 2,
"file_name": "yesterday.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 16, 12, 0, 0, 0, timezone.utc),
},
]
for job in download_jobs:
with patch("django.utils.timezone.now") as mock_now:
mock_now.return_value = job["update_date"]
mommy.make("download.DownloadJob", **job)
mommy.make(
"submissions.DABSSubmissionWindowSchedule",
submission_reveal_date=datetime(2021, 1, 1, 12, 0, 0, 0, timezone.utc),
)
def test_elasticsearch_download_cached(common_test_data):
external_load_dates = [
# FABS and FPDS dates are much newer to show they aren't used for ES downloads
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fabs"],
"last_load_date": datetime(2021, 1, 30, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fpds"],
"last_load_date": datetime(2021, 1, 30, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_transactions"],
"last_load_date": datetime(2021, 1, 17, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_awards"],
"last_load_date": datetime(2021, 1, 17, 16, 0, 0, 0, timezone.utc),
},
]
for load_date in external_load_dates:
mommy.make("broker.ExternalDataLoadDate", **load_date)
es_transaction_request = {**JSON_REQUEST, "download_types": ["elasticsearch_transactions", "sub_awards"]}
es_award_request = {**JSON_REQUEST, "download_types": ["elasticsearch_awards", "sub_awards"]}
download_jobs = [
{
"download_job_id": 10,
"file_name": "es_transaction_job_wrong.zip",
"job_status_id": 1,
"json_request": json.dumps(es_transaction_request),
"update_date": datetime(2021, 1, 17, 10, 0, 0, 0, timezone.utc),
},
{
"download_job_id": 11,
"file_name": "es_transaction_job_right.zip",
"job_status_id": 1,
"json_request": json.dumps(es_transaction_request),
"update_date": datetime(2021, 1, 17, 12, 30, 0, 0, timezone.utc),
},
{
"download_job_id": 20,
"file_name": "es_award_job_wrong.zip",
"job_status_id": 1,
"json_request": json.dumps(es_award_request),
"update_date": datetime(2021, 1, 17, 13, 0, 0, 0, timezone.utc),
},
{
"download_job_id": 21,
"file_name": "es_award_job_right.zip",
"job_status_id": 1,
"json_request": json.dumps(es_award_request),
"update_date": datetime(2021, 1, 17, 17, 0, 0, 0, timezone.utc),
},
]
for job in download_jobs:
with patch("django.utils.timezone.now") as mock_now:
mock_now.return_value = job["update_date"]
mommy.make("download.DownloadJob", **job)
result = BaseDownloadViewSet._get_cached_download(
json.dumps(es_transaction_request), es_transaction_request["download_types"]
)
assert result == {"download_job_id": 11, "file_name": "es_transaction_job_right.zip"}
result = BaseDownloadViewSet._get_cached_download(json.dumps(es_award_request), es_award_request["download_types"])
assert result == {"download_job_id": 21, "file_name": "es_award_job_right.zip"}
def test_elasticsearch_cached_download_not_found(common_test_data):
external_load_dates = [
# FABS and FPDS dates are much newer to show they aren't used for ES downloads
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fabs"],
"last_load_date": datetime(2021, 1, 30, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fpds"],
"last_load_date": datetime(2021, 1, 30, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_transactions"],
"last_load_date": datetime(2021, 1, 17, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_awards"],
"last_load_date": datetime(2021, 1, 17, 16, 0, 0, 0, timezone.utc),
},
]
for load_date in external_load_dates:
mommy.make("broker.ExternalDataLoadDate", **load_date)
result = BaseDownloadViewSet._get_cached_download(
json.dumps(JSON_REQUEST), ["elasticsearch_transactions", "sub_awards"]
)
assert result is None
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST), ["elasticsearch_awards", "sub_awards"])
assert result is None
def test_non_elasticsearch_download_cached(common_test_data):
external_load_dates = [
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fabs"],
"last_load_date": datetime(2021, 1, 17, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fpds"],
"last_load_date": datetime(2021, 1, 17, 13, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_transactions"],
"last_load_date": datetime(2021, 1, 17, 14, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_awards"],
"last_load_date": datetime(2021, 1, 17, 16, 0, 0, 0, timezone.utc),
},
]
for load_date in external_load_dates:
mommy.make("broker.ExternalDataLoadDate", **load_date)
download_jobs = [
{
"download_job_id": 10,
"file_name": "10_download_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 17, 10, 0, 0, 0, timezone.utc),
},
{
"download_job_id": 11,
"file_name": "11_download_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 17, 12, 30, 0, 0, timezone.utc),
},
{
"download_job_id": 20,
"file_name": "20_download_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 17, 13, 0, 0, 0, timezone.utc),
},
{
"download_job_id": 21,
"file_name": "21_download_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 17, 17, 0, 0, 0, timezone.utc),
},
]
for job in download_jobs:
with patch("django.utils.timezone.now") as mock_now:
mock_now.return_value = job["update_date"]
mommy.make("download.DownloadJob", **job)
# Grab latest valid download
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST))
assert result == {"download_job_id": 21, "file_name": "21_download_job.zip"}
# FABS date updated; download no longer cached
mommy.make(
"broker.ExternalDataLoadDate",
external_data_type__external_data_type_id=EXTERNAL_DATA_TYPE_DICT["fabs"],
last_load_date=datetime(2021, 1, 18, 12, 0, 0, 0, timezone.utc),
)
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST))
assert result is None
# New download comes through and is cached
with patch("django.utils.timezone.now") as mock_now:
job = {
"download_job_id": 30,
"file_name": "30_download_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 18, 13, 0, 0, 0, timezone.utc),
}
mock_now.return_value = job["update_date"]
mommy.make("download.DownloadJob", **job)
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST))
assert result == {"download_job_id": 30, "file_name": "30_download_job.zip"}
# New submission_reveal_date is set in DABSSubmissionWindowSchedule; clears the cache
mommy.make(
"submissions.DABSSubmissionWindowSchedule",
submission_reveal_date=datetime(2021, 1, 19, 6, 0, 0, 0, timezone.utc),
)
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST))
assert result is None
# Download after the new submission_reveal_date is cached
with patch("django.utils.timezone.now") as mock_now:
job = {
"download_job_id": 31,
"file_name": "31_download_job.zip",
"job_status_id": 1,
"json_request": json.dumps(JSON_REQUEST),
"update_date": datetime(2021, 1, 19, 6, 15, 0, 0, timezone.utc),
}
mock_now.return_value = job["update_date"]
mommy.make("download.DownloadJob", **job)
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST))
assert result == {"download_job_id": 31, "file_name": "31_download_job.zip"}
def test_non_elasticsearch_cached_download_not_found(common_test_data):
external_load_dates = [
# FABS and FPDS dates are much newer to show they aren't used for ES downloads
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fabs"],
"last_load_date": datetime(2021, 1, 30, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["fpds"],
"last_load_date": datetime(2021, 1, 30, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_transactions"],
"last_load_date": datetime(2021, 1, 17, 12, 0, 0, 0, timezone.utc),
},
{
"external_data_type__external_data_type_id": EXTERNAL_DATA_TYPE_DICT["es_awards"],
"last_load_date": datetime(2021, 1, 17, 16, 0, 0, 0, timezone.utc),
},
]
for load_date in external_load_dates:
mommy.make("broker.ExternalDataLoadDate", **load_date)
result = BaseDownloadViewSet._get_cached_download(json.dumps(JSON_REQUEST))
assert result is None
| 40.545455
| 119
| 0.630476
| 1,470
| 11,596
| 4.612925
| 0.090476
| 0.017402
| 0.122696
| 0.077717
| 0.883498
| 0.864769
| 0.839257
| 0.839257
| 0.807403
| 0.794868
| 0
| 0.050104
| 0.251294
| 11,596
| 285
| 120
| 40.687719
| 0.730938
| 0.041652
| 0
| 0.504098
| 0
| 0
| 0.263058
| 0.107889
| 0
| 0
| 0
| 0
| 0.040984
| 1
| 0.020492
| false
| 0
| 0.032787
| 0
| 0.053279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
33f6ebe8d23223a87b7bdc8a9f203aa0d11d41f0
| 2,659
|
py
|
Python
|
python/scrapper.py
|
yujingma45/K-means-Color-in-Movie-Posters-
|
1538ce6c3e08549fda5c789e7ecd5d50bedf23cf
|
[
"MIT"
] | 1
|
2021-05-23T18:33:14.000Z
|
2021-05-23T18:33:14.000Z
|
python/scrapper.py
|
yujingma45/K-means-Color-in-Movie-Posters-
|
1538ce6c3e08549fda5c789e7ecd5d50bedf23cf
|
[
"MIT"
] | null | null | null |
python/scrapper.py
|
yujingma45/K-means-Color-in-Movie-Posters-
|
1538ce6c3e08549fda5c789e7ecd5d50bedf23cf
|
[
"MIT"
] | 2
|
2016-11-25T10:37:42.000Z
|
2017-01-12T20:21:07.000Z
|
from bs4 import BeautifulSoup
import requests
import re
import urllib2
from bs4 import BeautifulSoup
image_type ="animation" #"comedy" # "action"
query = "animation Movie Posters"#"2014 Comdey Movie Posters" #"2014 Action Movie Posters"
url = "http://www.bing.com/images/search?q=" + query + "&qft=+filterui:color2-bw+filterui:imagesize-large&FORM=R5IR3"
page = urllib2.urlopen("http://www.bing.com/images/search?q=animation%20Movie%20Posters&qs=n&form=QBIR&pq=animation%20movie%20posters&sc=4-23&sp=-1&sk=").read()
soup = BeautifulSoup(page)
images = [a['src'] for a in soup.find_all("img", {"src": re.compile("mm.bing.net")})]
counter = 0
for img in images:
with open(image_type + "_"+ str(counter)+"."+"jpg",'wb') as f:
f.write(urllib2.urlopen(img).read())
counter += 1
image_type ="comedy" # "action"
query = "Comdey Movie Posters" #"2014 Action Movie Posters"
url = "http://www.bing.com/images/search?q=" + query + "&qft=+filterui:color2-bw+filterui:imagesize-large&FORM=R5IR3"
page = urllib2.urlopen("http://www.bing.com/images/search?q=Comdey%20Movie%20Posters&qs=n&form=QBIR&pq=comdey%20movie%20posters&sc=1-20&sp=-1&sk=").read()
soup = BeautifulSoup(page)
images = [a['src'] for a in soup.find_all("img", {"src": re.compile("mm.bing.net")})]
counter = 0
for img in images:
with open(image_type + "_"+ str(counter)+"."+"jpg",'wb') as f:
f.write(urllib2.urlopen(img).read())
counter += 1
image_type = "action"
query = "Action Movie Posters"
url = "http://www.bing.com/images/search?q=" + query + "&qft=+filterui:color2-bw+filterui:imagesize-large&FORM=R5IR3"
page = urllib2.urlopen("http://www.bing.com/images/search?q=Action%20Movie%20Posters&qs=n&form=QBIR&pq=action%20movie%20posters&sc=3-20&sp=-1&sk=").read()
soup = BeautifulSoup(page)
images = [a['src'] for a in soup.find_all("img", {"src": re.compile("mm.bing.net")})]
counter = 0
for img in images:
with open(image_type + "_"+ str(counter)+"."+"jpg",'wb') as f:
f.write(urllib2.urlopen(img).read())
counter += 1
image_type = "Horror"
query = "2014 Horror Movie Posters"
url = "http://www.bing.com/images/search?q=" + query + "&qft=+filterui:color2-bw+filterui:imagesize-large&FORM=R5IR3"
page = urllib2.urlopen("http://www.bing.com/images/search?q=2014%20Horror%20Movie%20Posters&qs=n&form=QBIR&pq=2014%20horror%20movie%20posters&sc=1-25&sp=-1&sk=").read()
soup = BeautifulSoup(page)
images = [a['src'] for a in soup.find_all("img", {"src": re.compile("mm.bing.net")})]
counter = 0
for img in images:
with open(image_type + "_"+ str(counter)+"."+"jpg",'wb') as f:
f.write(urllib2.urlopen(img).read())
counter += 1
| 40.907692
| 168
| 0.6871
| 416
| 2,659
| 4.353365
| 0.177885
| 0.039757
| 0.048592
| 0.061844
| 0.811154
| 0.811154
| 0.811154
| 0.747101
| 0.747101
| 0.747101
| 0
| 0.045358
| 0.112824
| 2,659
| 65
| 169
| 40.907692
| 0.72234
| 0.040993
| 0
| 0.693878
| 0
| 0.081633
| 0.436542
| 0.094303
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.102041
| 0
| 0.102041
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
1d33e9cfb2da262fe3af0b4c60ee06ccbc98958f
| 83
|
py
|
Python
|
utils/__init__.py
|
dusterherz/advent-of-code-2021
|
82d6808702ec72a1ba4f241afc8e3d174e14dfc3
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
dusterherz/advent-of-code-2021
|
82d6808702ec72a1ba4f241afc8e3d174e14dfc3
|
[
"MIT"
] | null | null | null |
utils/__init__.py
|
dusterherz/advent-of-code-2021
|
82d6808702ec72a1ba4f241afc8e3d174e14dfc3
|
[
"MIT"
] | null | null | null |
from .input import read_input, read_input_as_numbers
from .parse import parse_data
| 27.666667
| 52
| 0.855422
| 14
| 83
| 4.714286
| 0.571429
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108434
| 83
| 2
| 53
| 41.5
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d53796803e725d69029ca14e37f4f18029a8ba7d
| 291
|
py
|
Python
|
ibis/utility/stats.py
|
CIDARLAB/ibis
|
5108848c1d45326b3c65213b2f8deaa88fd29be6
|
[
"MIT"
] | 1
|
2021-02-16T23:19:07.000Z
|
2021-02-16T23:19:07.000Z
|
ibis/utility/stats.py
|
CIDARLAB/ibis
|
5108848c1d45326b3c65213b2f8deaa88fd29be6
|
[
"MIT"
] | 1
|
2021-04-18T13:45:15.000Z
|
2021-04-18T13:45:15.000Z
|
ibis/utility/stats.py
|
CIDARLAB/scoring-project
|
5108848c1d45326b3c65213b2f8deaa88fd29be6
|
[
"MIT"
] | 2
|
2021-04-27T23:52:06.000Z
|
2021-07-02T13:44:26.000Z
|
"""
--------------------------------------------------------------------------------
Description:
Generates a statistical report for genetic circuit scoring
Written by W.R. Jackson, Ben Bremer, Eric South
--------------------------------------------------------------------------------
"""
| 32.333333
| 80
| 0.33677
| 18
| 291
| 5.444444
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075601
| 291
| 8
| 81
| 36.375
| 0.364312
| 0.969072
| 0
| null | 1
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d5a107ee0258abdb4e94ec461ecb7c60e6b525f6
| 40
|
py
|
Python
|
volkscv/analyzer/__init__.py
|
YuxinZou/volkscv
|
67ac83f0c0ac85bd6606053732b454db17c53de0
|
[
"Apache-2.0"
] | 59
|
2020-07-09T03:22:51.000Z
|
2021-12-14T11:26:43.000Z
|
volkscv/analyzer/__init__.py
|
ChaseMonsterAway/volkscv
|
aa7e898cc29e3e5f26363e56bf56f4c56574bbd8
|
[
"Apache-2.0"
] | 1
|
2020-11-24T12:18:19.000Z
|
2020-11-25T03:12:57.000Z
|
volkscv/analyzer/__init__.py
|
ChaseMonsterAway/volkscv
|
aa7e898cc29e3e5f26363e56bf56f4c56574bbd8
|
[
"Apache-2.0"
] | 24
|
2020-08-05T03:13:37.000Z
|
2021-10-11T02:55:25.000Z
|
from . import statistics, visualization
| 20
| 39
| 0.825
| 4
| 40
| 8.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 40
| 1
| 40
| 40
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
d5a22f9877f9671ce48276e1c3f41ba13e917334
| 403
|
py
|
Python
|
src/example_package/calculator.py
|
zhadba/packaging-framework
|
48cd8e39ceece60cf48ecd964b1ce702abe8c507
|
[
"MIT"
] | null | null | null |
src/example_package/calculator.py
|
zhadba/packaging-framework
|
48cd8e39ceece60cf48ecd964b1ce702abe8c507
|
[
"MIT"
] | null | null | null |
src/example_package/calculator.py
|
zhadba/packaging-framework
|
48cd8e39ceece60cf48ecd964b1ce702abe8c507
|
[
"MIT"
] | null | null | null |
class Calculator():
def __init__(self, number_1, number_2):
self.number_1 = number_1
self.number_2 = number_2
def add(self):
return self.number_1 + self.number_2
def subtract(self):
return self.number_1 - self.number_2
def multiply(self):
return self.number_1 * self.number_2
def divide(self):
return self.number_1 / self.number_2
| 23.705882
| 44
| 0.64268
| 59
| 403
| 4.084746
| 0.220339
| 0.456432
| 0.273859
| 0.352697
| 0.643154
| 0.568465
| 0.568465
| 0.568465
| 0.435685
| 0
| 0
| 0.047458
| 0.26799
| 403
| 17
| 45
| 23.705882
| 0.769492
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.416667
| false
| 0
| 0
| 0.333333
| 0.833333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
d5a3c1e35219dc1ef093b05533332f2ed46ec946
| 391
|
py
|
Python
|
calc/calculadora.py
|
areguilo/X-Serv-15.4-Django-calc
|
c155f8e598ca39513b28387b09c1a14d08275517
|
[
"Apache-2.0"
] | null | null | null |
calc/calculadora.py
|
areguilo/X-Serv-15.4-Django-calc
|
c155f8e598ca39513b28387b09c1a14d08275517
|
[
"Apache-2.0"
] | null | null | null |
calc/calculadora.py
|
areguilo/X-Serv-15.4-Django-calc
|
c155f8e598ca39513b28387b09c1a14d08275517
|
[
"Apache-2.0"
] | null | null | null |
#/usr/bin/python3
import sys
def suma(num1, num2):
return num1+num2
def resta(num1, num2):
return num1-num2
def multiplicacion(num1, num2):
return num1*num2
def division(num1, num2):
try:
return num1/num2
except ZeroDivisionError:
return("Zero Division Error")
diccionario = {"sum":suma, "res":resta, "mul":multiplicacion, "div":division}
| 19.55
| 77
| 0.654731
| 49
| 391
| 5.22449
| 0.469388
| 0.25
| 0.21875
| 0.210938
| 0.292969
| 0.292969
| 0
| 0
| 0
| 0
| 0
| 0.055921
| 0.222506
| 391
| 19
| 78
| 20.578947
| 0.786184
| 0.040921
| 0
| 0
| 0
| 0
| 0.082888
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.307692
| false
| 0
| 0.076923
| 0.230769
| 0.692308
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
63587a0d54d201ea600bfff2ae1174db333d0cf0
| 142
|
py
|
Python
|
src/doc/views.py
|
coci/vocabulary-api
|
21d3479bbedbf927c14a71cee480dbf675437378
|
[
"Apache-2.0"
] | 1
|
2020-10-05T13:35:08.000Z
|
2020-10-05T13:35:08.000Z
|
src/doc/views.py
|
coci/vocabulary-api
|
21d3479bbedbf927c14a71cee480dbf675437378
|
[
"Apache-2.0"
] | 6
|
2020-10-02T09:38:55.000Z
|
2020-11-12T12:12:40.000Z
|
src/doc/views.py
|
coci/vocabulary-api
|
21d3479bbedbf927c14a71cee480dbf675437378
|
[
"Apache-2.0"
] | 1
|
2020-10-02T09:53:53.000Z
|
2020-10-02T09:53:53.000Z
|
from django.shortcuts import render
def doc(request):
return render(request, 'doc/index.html', context={'schema_url': 'redoc/schema.yml'})
| 23.666667
| 85
| 0.746479
| 20
| 142
| 5.25
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.098592
| 142
| 5
| 86
| 28.4
| 0.820313
| 0
| 0
| 0
| 0
| 0
| 0.28169
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
637b94f21ccdf2f0e35bb6a0386224086cb1210d
| 1,044
|
gyp
|
Python
|
network_bearer_selection/network_bearer_selection.gyp
|
ricardotk/tizen-extensions-crosswalk
|
252e07df73a7b96aba033263d56a5df4f5308f07
|
[
"BSD-3-Clause"
] | 1
|
2016-11-21T21:21:19.000Z
|
2016-11-21T21:21:19.000Z
|
network_bearer_selection/network_bearer_selection.gyp
|
ricardotk/tizen-extensions-crosswalk
|
252e07df73a7b96aba033263d56a5df4f5308f07
|
[
"BSD-3-Clause"
] | null | null | null |
network_bearer_selection/network_bearer_selection.gyp
|
ricardotk/tizen-extensions-crosswalk
|
252e07df73a7b96aba033263d56a5df4f5308f07
|
[
"BSD-3-Clause"
] | null | null | null |
{
'includes':[
'../common/common.gypi',
],
'targets': [
{
'target_name': 'tizen_network_bearer_selection',
'type': 'loadable_module',
'sources': [
'network_bearer_selection_api.js',
'network_bearer_selection_connection_mobile.cc',
'network_bearer_selection_connection_mobile.h',
'network_bearer_selection_context.cc',
'network_bearer_selection_context.h',
'network_bearer_selection_context_desktop.cc',
'network_bearer_selection_context_desktop.h',
'network_bearer_selection_context_mobile.cc',
'network_bearer_selection_context_mobile.h',
'network_bearer_selection_request.cc',
'network_bearer_selection_request.h',
],
'conditions': [
[ 'extension_host_os=="mobile"', {
'includes': [
'../common/pkg-config.gypi',
],
'variables': {
'packages': [
'capi-network-connection',
],
},
}],
],
},
],
}
| 28.216216
| 56
| 0.585249
| 91
| 1,044
| 6.208791
| 0.351648
| 0.276106
| 0.467257
| 0.307965
| 0.548673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.283525
| 1,044
| 36
| 57
| 29
| 0.755348
| 0
| 0
| 0.222222
| 0
| 0
| 0.612069
| 0.528736
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
89252bbde86e2477ef4c07cae35cdd1b67b050c3
| 240
|
py
|
Python
|
pisa/utils/hypersurface/__init__.py
|
atrettin/pisa
|
9702ed946e3f669f1126ca4ffe96b49280a8ff12
|
[
"Apache-2.0"
] | 5
|
2021-03-10T18:18:10.000Z
|
2022-03-04T03:04:05.000Z
|
pisa/utils/hypersurface/__init__.py
|
atrettin/pisa
|
9702ed946e3f669f1126ca4ffe96b49280a8ff12
|
[
"Apache-2.0"
] | 105
|
2019-03-21T13:53:59.000Z
|
2021-01-25T16:34:14.000Z
|
pisa/utils/hypersurface/__init__.py
|
atrettin/pisa
|
9702ed946e3f669f1126ca4ffe96b49280a8ff12
|
[
"Apache-2.0"
] | 26
|
2019-03-03T22:25:04.000Z
|
2020-10-23T15:57:10.000Z
|
from .hypersurface import *
# with the switch to Python 3.7 this did not get imported automatically anymore... why?!
from .hypersurface import get_hypersurface_file_name
from .hyper_interpolator import *
from .hypersurface_plotting import *
| 48
| 88
| 0.816667
| 33
| 240
| 5.787879
| 0.69697
| 0.251309
| 0.230366
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009524
| 0.125
| 240
| 5
| 89
| 48
| 0.9
| 0.358333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
897f5b40bcd3534e3bad2c8d9ffb7b55816566a6
| 18,125
|
py
|
Python
|
test/test_ast.py
|
timothyrenner/svl
|
a74c09c49f2e14046acd4b0eeb861f8fef6bca96
|
[
"MIT"
] | 8
|
2019-03-27T12:49:21.000Z
|
2020-10-10T11:16:25.000Z
|
test/test_ast.py
|
timothyrenner/svl
|
a74c09c49f2e14046acd4b0eeb861f8fef6bca96
|
[
"MIT"
] | 65
|
2018-08-26T14:48:45.000Z
|
2020-03-17T12:24:42.000Z
|
test/test_ast.py
|
timothyrenner/svl
|
a74c09c49f2e14046acd4b0eeb861f8fef6bca96
|
[
"MIT"
] | 1
|
2019-09-13T19:39:07.000Z
|
2019-09-13T19:39:07.000Z
|
from svl.compiler.ast import parse_svl
def test_line_chart():
""" Tests that the line chart type is properly parsed.
"""
svl_string = """
DATASETS
bigfoot "data/bigfoot_sightings.csv"
LINE bigfoot
X date by year LABEL "Year"
Y date COUNT LABEL "Number of Sightings"
SPLIT BY classification
TITLE "Bigfoot Sightings by Year and Classification"
FILTER "date > '1990-01-01'"
"""
parsed_svl_truth = {
"datasets": {"bigfoot": {"file": "data/bigfoot_sightings.csv"}},
"vcat": [
{
"type": "line",
"data": "bigfoot",
"title": "Bigfoot Sightings by Year and Classification",
"x": {"field": "date", "temporal": "YEAR", "label": "Year"},
"y": {
"agg": "COUNT",
"field": "date",
"label": "Number of Sightings",
},
"split_by": {"field": "classification"},
"filter": "date > '1990-01-01'",
}
],
}
parsed_svl = parse_svl(svl_string)
assert parsed_svl_truth == parsed_svl
def test_bar_chart():
""" Tests that the bar chart type is properly parsed.
"""
svl_string = """
DATASETS
bigfoot "data/bigfoot_sightings.csv"
BAR bigfoot
X classification
Y classification COUNT
"""
parsed_svl_truth = {
"datasets": {"bigfoot": {"file": "data/bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"type": "bar",
"x": {"field": "classification"},
"y": {"agg": "COUNT", "field": "classification"},
}
],
}
parsed_svl_answer = parse_svl(svl_string)
assert parsed_svl_truth == parsed_svl_answer
def test_bar_chart_sum():
""" Tests that the bar chart type is properly parsed with a SUM
aggregation.
"""
svl_string = """
DATASETS
bigfoot "data/bigfoot_sightings.csv"
BAR bigfoot
X classification
Y temperature_mid SUM
"""
parsed_svl_truth = {
"datasets": {"bigfoot": {"file": "data/bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"type": "bar",
"x": {"field": "classification"},
"y": {"agg": "SUM", "field": "temperature_mid"},
}
],
}
parsed_svl_answer = parse_svl(svl_string)
assert parsed_svl_truth == parsed_svl_answer
def test_histogram_step():
""" Tests that the histogram type is properly parsed when the step size
is specified.
"""
svl_string = """
DATASETS
bigfoot "data/bigfoot_sightings.csv"
HISTOGRAM bigfoot
X temperature_mid
STEP 5
"""
parsed_svl_truth = {
"datasets": {"bigfoot": {"file": "data/bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"type": "histogram",
"x": {"field": "temperature_mid"},
"step": 5,
}
],
}
parsed_svl_answer = parse_svl(svl_string)
assert parsed_svl_truth == parsed_svl_answer
def test_histogram_bins():
""" Tests that the histogram type is properly parsed when the number of
bins is given as an argument.
"""
svl_string = """
DATASETS
bigfoot "data/bigfoot_sightings.csv"
HISTOGRAM bigfoot
TITLE "Bigfoot Sighting Humidity"
BINS 25
Y humidity LABEL "Humidity"
"""
parsed_svl_truth = {
"datasets": {"bigfoot": {"file": "data/bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"title": "Bigfoot Sighting Humidity",
"type": "histogram",
"y": {"field": "humidity", "label": "Humidity"},
"bins": 25,
}
],
}
parsed_svl_answer = parse_svl(svl_string)
assert parsed_svl_truth == parsed_svl_answer
def test_histogram_split_by():
""" Tests that the histogram type is properly parsed.
"""
svl_string = """
DATASETS
bigfoot "data/bigfoot_sightings.csv"
HISTOGRAM bigfoot
X temperature_mid
STEP 5
SPLIT BY classification
"""
parsed_svl_truth = {
"datasets": {"bigfoot": {"file": "data/bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"type": "histogram",
"x": {"field": "temperature_mid"},
"step": 5,
"split_by": {"field": "classification"},
}
],
}
parsed_svl_answer = parse_svl(svl_string)
assert parsed_svl_truth == parsed_svl_answer
def test_pie():
""" Tests that the pie type is properly parsed.
"""
svl_string = """
DATASETS
bigfoot "data/bigfoot_sightings.csv"
PIE bigfoot
TITLE "Bigfoot Sightings with Location"
HOLE 0.3
AXIS TRANSFORM "CASE WHEN latitude IS NULL THEN 'no_location'
ELSE 'has_location' END"
"""
transform_truth = """CASE WHEN latitude IS NULL THEN \'no_location\'
ELSE \'has_location\' END"""
parsed_svl_truth = {
"datasets": {"bigfoot": {"file": "data/bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"title": "Bigfoot Sightings with Location",
"type": "pie",
"axis": {"transform": transform_truth},
"hole": 0.3,
}
],
}
parsed_svl_answer = parse_svl(svl_string)
print(parsed_svl_answer["vcat"])
assert parsed_svl_truth == parsed_svl_answer
def test_number():
""" Tests that the number type is properly parsed.
"""
svl_string = """
NUMBER bigfoot
VALUE report_id COUNT
TITLE "Number of Bigfoot Sightings"
FILTER "Classification = 'A'"
"""
truth = {
"datasets": {},
"vcat": [
{
"data": "bigfoot",
"type": "number",
"value": {"field": "report_id", "agg": "COUNT"},
"title": "Number of Bigfoot Sightings",
"filter": "Classification = 'A'",
}
],
}
answer = parse_svl(svl_string)
assert truth == answer
def test_number_transform():
""" Tests that the number chart type is properly parsed when the field is
a TRANSFORM.
"""
svl_string = """
NUMBER bigfoot
VALUE TRANSFORM "COUNT(*)"
"""
truth = {
"datasets": {},
"vcat": [
{
"data": "bigfoot",
"type": "number",
"value": {"transform": "COUNT(*)"},
}
],
}
answer = parse_svl(svl_string)
assert truth == answer
def test_scatter():
""" Tests that the scatter type is properly parsed.
"""
svl_string = """
DATASETS
bigfoot "data/bigfoot_sightings.csv"
SCATTER bigfoot
X latitude
Y temperature_mid
SPLIT BY classification
"""
parsed_svl_truth = {
"datasets": {"bigfoot": {"file": "data/bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"type": "scatter",
"x": {"field": "latitude"},
"y": {"field": "temperature_mid"},
"split_by": {"field": "classification"},
}
],
}
parsed_svl_answer = parse_svl(svl_string)
assert parsed_svl_truth == parsed_svl_answer
def test_case_insensitivity():
""" Tests that language keywords are case insensitive.
"""
svl_string = """
DATASETS
bigfoot "data/bigfoot_sightings.csv"
bar bigfoot
x classification
y classification CoUnT
"""
parsed_svl_truth = {
"datasets": {"bigfoot": {"file": "data/bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"type": "bar",
"x": {"field": "classification"},
"y": {"agg": "COUNT", "field": "classification"},
}
],
}
parsed_svl_answer = parse_svl(svl_string)
assert parsed_svl_truth == parsed_svl_answer
def test_comment():
""" Tests that comments are ignored.
"""
svl_string = """
DATASETS
-- Time to go squatchin.
bigfoot "data/bigfoot_sightings.csv"
HISTOGRAM bigfoot
X temperature_mid
STEP 5 -- Every five degrees should be granular enough.
"""
parsed_svl_truth = {
"datasets": {"bigfoot": {"file": "data/bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"type": "histogram",
"x": {"field": "temperature_mid"},
"step": 5,
}
],
}
parsed_svl_answer = parse_svl(svl_string)
assert parsed_svl_truth == parsed_svl_answer
def test_concat():
""" Tests that the concat function is correctly parsed and transformed.
"""
svl_string = """
DATASETS
bigfoot "data/bigfoot_sightings.csv"
CONCAT(
SCATTER bigfoot
X latitude
Y temperature_mid
BAR bigfoot
X classification
Y classification COUNT
)
"""
parsed_svl_truth = {
"datasets": {"bigfoot": {"file": "data/bigfoot_sightings.csv"}},
"vcat": [
{
"hcat": [
{
"data": "bigfoot",
"type": "scatter",
"x": {"field": "latitude"},
"y": {"field": "temperature_mid"},
},
{
"data": "bigfoot",
"type": "bar",
"x": {"field": "classification"},
"y": {"agg": "COUNT", "field": "classification"},
},
]
}
],
}
parsed_svl_answer = parse_svl(svl_string)
assert parsed_svl_truth == parsed_svl_answer
def test_implicit_vcat():
""" Tests that the implicit vertical concatenation of parenthesized
charts is correctly parsed and transformed.
"""
svl_string = """
DATASETS
bigfoot "data/bigfoot_sightings.csv"
(
SCATTER bigfoot
X latitude
Y temperature_mid
BAR bigfoot
X classification
Y classification COUNT
)
"""
parsed_svl_truth = {
"datasets": {"bigfoot": {"file": "data/bigfoot_sightings.csv"}},
"vcat": [
{
"vcat": [
{
"data": "bigfoot",
"type": "scatter",
"x": {"field": "latitude"},
"y": {"field": "temperature_mid"},
},
{
"data": "bigfoot",
"type": "bar",
"x": {"field": "classification"},
"y": {"agg": "COUNT", "field": "classification"},
},
]
}
],
}
parsed_svl_answer = parse_svl(svl_string)
assert parsed_svl_truth == parsed_svl_answer
def test_sql_dataset():
""" Tests that SQL-defined datasets are parsed correctly.
"""
svl_string = """
DATASETS
bigfoot "bigfoot_sightings.csv"
recent_bigfoot_sightings SQL
"SELECT * FROM bigfoot WHERE date >= '2008-01-01'"
HISTOGRAM recent_bigfoot_sightings
X temperature_mid
"""
parsed_svl_truth = {
"datasets": {
"bigfoot": {"file": "bigfoot_sightings.csv"},
"recent_bigfoot_sightings": {
"sql": "SELECT * FROM bigfoot WHERE date >= '2008-01-01'"
},
},
"vcat": [
{
"data": "recent_bigfoot_sightings",
"type": "histogram",
"x": {"field": "temperature_mid"},
}
],
}
parsed_svl_answer = parse_svl(svl_string)
assert parsed_svl_truth == parsed_svl_answer
def test_no_datasets():
""" Tests that the parse_svl function returns the correct value when
there's no DATASETS directive.
"""
svl_string = """
HISTOGRAM bigfoot
X temperature_mid
SPLIT BY classification
"""
truth = {
"datasets": {
# A validator would catch this, but from a parsing perspective this
# is valid.
},
"vcat": [
{
"data": "bigfoot",
"type": "histogram",
"x": {"field": "temperature_mid"},
"split_by": {"field": "classification"},
}
],
}
answer = parse_svl(svl_string)
assert truth == answer
def test_with_kwargs():
""" Tests that the parse_svl function returns the correct value when the
kwargs are used.
"""
svl_string = """
HISTOGRAM bigfoot
X temperature_mid
SPLIT BY classification
"""
truth = {
"datasets": {"bigfoot": {"file": "bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"type": "histogram",
"x": {"field": "temperature_mid"},
"split_by": {"field": "classification"},
}
],
}
answer = parse_svl(svl_string, bigfoot="bigfoot_sightings.csv")
assert truth == answer
def test_sort():
""" Tests that the parse_svl function returns the correct value with a
SORT modifier on one axis.
"""
svl_string = """
DATASETS
bigfoot "bigfoot_sightings.csv"
BAR bigfoot
X classification SORT ASC
Y classification COUNT
"""
truth = {
"datasets": {"bigfoot": {"file": "bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"type": "bar",
"x": {"field": "classification", "sort": "ASC"},
"y": {"field": "classification", "agg": "COUNT"},
}
],
}
answer = parse_svl(svl_string)
assert truth == answer
def test_color_by():
""" Tests that the parse_svl function can parse SVL with a COLOR BY.
"""
svl_string = """
DATASETS
bigfoot "bigfoot_sightings.csv"
LINE bigfoot
X date BY YEAR
Y report_id COUNT LABEL "Number of Sightings"
COLOR BY temperature_mid AVG "Jet" LABEL "Average Temperature (F)"
"""
truth = {
"datasets": {"bigfoot": {"file": "bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"type": "line",
"x": {"field": "date", "temporal": "YEAR"},
"y": {
"field": "report_id",
"agg": "COUNT",
"label": "Number of Sightings",
},
"color_by": {
"field": "temperature_mid",
"agg": "AVG",
"color_scale": "Jet",
"label": "Average Temperature (F)",
},
}
],
}
answer = parse_svl(svl_string)
assert truth == answer
def test_split_by_transform():
""" Tests that the SPLIT BY directive with a TRANSFORM returns the
correct value.
"""
svl_string = """
DATASETS
bigfoot "bigfoot_sightings.csv"
LINE bigfoot
X date BY YEAR
Y report_id COUNT
SPLIT BY TRANSFORM
"CASE WHEN temperature > 85 THEN 'hot' ELSE 'not_hot' END"
"""
truth = {
"datasets": {"bigfoot": {"file": "bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"type": "line",
"x": {"field": "date", "temporal": "YEAR"},
"y": {"field": "report_id", "agg": "COUNT"},
"split_by": {
"transform": "CASE WHEN temperature > 85 THEN 'hot' "
"ELSE 'not_hot' END"
},
}
],
}
answer = parse_svl(svl_string)
assert truth == answer
def test_split_by_temporal():
""" Tests that the SPLIT BY directive with a TEMPORAL modifier returns
the correct value.
"""
svl_string = """
DATASETS bigfoot "bigfoot_sightings.csv"
BAR bigfoot
X classification
Y report_number COUNT
SPLIT BY date BY YEAR
"""
truth = {
"datasets": {"bigfoot": {"file": "bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"type": "bar",
"x": {"field": "classification"},
"y": {"field": "report_number", "agg": "COUNT"},
"split_by": {"field": "date", "temporal": "YEAR"},
}
],
}
answer = parse_svl(svl_string)
assert truth == answer
def test_split_by_label():
""" Tests that the SPLIT BY directive with a LABEL modifier returns
the correct value.
"""
svl_string = """
DATASETS bigfoot "bigfoot_sightings.csv"
HISTOGRAM bigfoot
X temperature
SPLIT BY classification LABEL "Classification"
"""
truth = {
"datasets": {"bigfoot": {"file": "bigfoot_sightings.csv"}},
"vcat": [
{
"data": "bigfoot",
"type": "histogram",
"x": {"field": "temperature"},
"split_by": {
"field": "classification",
"label": "Classification",
},
}
],
}
answer = parse_svl(svl_string)
assert truth == answer
| 26.116715
| 79
| 0.490814
| 1,656
| 18,125
| 5.183575
| 0.092995
| 0.061859
| 0.08411
| 0.064306
| 0.824907
| 0.798579
| 0.772251
| 0.744292
| 0.711673
| 0.678705
| 0
| 0.004462
| 0.381683
| 18,125
| 693
| 80
| 26.154401
| 0.761488
| 0.091531
| 0
| 0.646067
| 0
| 0
| 0.42117
| 0.064843
| 0
| 0
| 0
| 0
| 0.041199
| 1
| 0.041199
| false
| 0
| 0.001873
| 0
| 0.043071
| 0.001873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
899630240c317341cd88fd9f9c98228a45b7e607
| 41
|
py
|
Python
|
orders/serializers/__init__.py
|
julianarchila/ecommerce-django-api
|
d0665745c2a16dc8bc1acb54ead66f69da129271
|
[
"MIT"
] | null | null | null |
orders/serializers/__init__.py
|
julianarchila/ecommerce-django-api
|
d0665745c2a16dc8bc1acb54ead66f69da129271
|
[
"MIT"
] | null | null | null |
orders/serializers/__init__.py
|
julianarchila/ecommerce-django-api
|
d0665745c2a16dc8bc1acb54ead66f69da129271
|
[
"MIT"
] | null | null | null |
from .orders import *
from .cart import *
| 20.5
| 21
| 0.731707
| 6
| 41
| 5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 41
| 2
| 22
| 20.5
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
899bccd21c2a9f94735151c115790d1e30562ab5
| 52
|
py
|
Python
|
spvcm/lower_level/__init__.py
|
weikang9009/spvcm
|
00ec35331e0e1a67bcd841a6b3761a23099617f7
|
[
"MIT"
] | 14
|
2017-06-20T18:39:04.000Z
|
2021-03-27T02:21:46.000Z
|
spvcm/lower_level/__init__.py
|
weikang9009/spvcm
|
00ec35331e0e1a67bcd841a6b3761a23099617f7
|
[
"MIT"
] | 12
|
2018-05-11T11:13:21.000Z
|
2020-02-07T14:23:12.000Z
|
spvcm/lower_level/__init__.py
|
weikang9009/spvcm
|
00ec35331e0e1a67bcd841a6b3761a23099617f7
|
[
"MIT"
] | 8
|
2017-05-20T00:55:40.000Z
|
2020-07-02T14:52:49.000Z
|
from .se import Lower_SE
from .sma import Lower_SMA
| 17.333333
| 26
| 0.807692
| 10
| 52
| 4
| 0.5
| 0.55
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 52
| 2
| 27
| 26
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
9835dc46494574ab211c2baeb62322bfbb6acc82
| 40
|
py
|
Python
|
smartlinks/tests/management/commands/__init__.py
|
ixc/glamkit-smartlinks
|
c550f372cecd08bdc81795b18f6b0cec38ac7bd2
|
[
"BSD-3-Clause"
] | 3
|
2016-11-28T22:04:40.000Z
|
2021-05-23T22:35:37.000Z
|
smartlinks/tests/management/commands/__init__.py
|
ixc/glamkit-smartlinks
|
c550f372cecd08bdc81795b18f6b0cec38ac7bd2
|
[
"BSD-3-Clause"
] | null | null | null |
smartlinks/tests/management/commands/__init__.py
|
ixc/glamkit-smartlinks
|
c550f372cecd08bdc81795b18f6b0cec38ac7bd2
|
[
"BSD-3-Clause"
] | 2
|
2017-08-13T06:44:56.000Z
|
2017-10-04T00:14:35.000Z
|
from reset_smartlink_index_test import *
| 40
| 40
| 0.9
| 6
| 40
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 40
| 1
| 40
| 40
| 0.891892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
984a0a8c88553b6dc6c87d60519c9615fcbbad72
| 12,534
|
py
|
Python
|
eeauditor/auditors/aws/Amazon_EFS_Auditor.py
|
kbhagi/ElectricEye
|
31960e1e1cfb75c5d354844ea9e07d5295442823
|
[
"Apache-2.0"
] | 442
|
2020-03-15T20:56:36.000Z
|
2022-03-31T22:13:07.000Z
|
eeauditor/auditors/aws/Amazon_EFS_Auditor.py
|
kbhagi/ElectricEye
|
31960e1e1cfb75c5d354844ea9e07d5295442823
|
[
"Apache-2.0"
] | 57
|
2020-03-15T22:09:56.000Z
|
2022-03-31T13:17:06.000Z
|
eeauditor/auditors/aws/Amazon_EFS_Auditor.py
|
kbhagi/ElectricEye
|
31960e1e1cfb75c5d354844ea9e07d5295442823
|
[
"Apache-2.0"
] | 59
|
2020-03-15T21:19:10.000Z
|
2022-03-31T15:01:31.000Z
|
#This file is part of ElectricEye.
#SPDX-License-Identifier: Apache-2.0
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing,
#software distributed under the License is distributed on an
#"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
#KIND, either express or implied. See the License for the
#specific language governing permissions and limitations
#under the License.
import boto3
import datetime
from check_register import CheckRegister
registry = CheckRegister()
# import boto3 clients
efs = boto3.client("efs")
# loop through EFS file systems
def describe_file_systems(cache):
response = cache.get("describe_file_systems")
if response:
return response
cache["describe_file_systems"] = efs.describe_file_systems()
return cache["describe_file_systems"]
@registry.register_check("efs")
def efs_filesys_encryption_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[EFS.1] EFS File Systems should have encryption enabled"""
response = describe_file_systems(cache)
myFileSys = response["FileSystems"]
for filesys in myFileSys:
encryptionCheck = str(filesys["Encrypted"])
fileSysId = str(filesys["FileSystemId"])
fileSysArn = f"arn:{awsPartition}:elasticfilesystem:{awsRegion}:{awsAccountId}:file-system/{fileSysId}"
# ISO Time
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
if encryptionCheck == "False":
finding = {
"SchemaVersion": "2018-10-08",
"Id": fileSysArn + "/efs-encryption-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": fileSysArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "HIGH"},
"Confidence": 99,
"Title": "[EFS.1] EFS File Systems should have encryption enabled",
"Description": "EFS file system "
+ fileSysId
+ " does not have encryption enabled. EFS file systems cannot be encrypted after creation, consider backing up data and creating a new encrypted file system.",
"Remediation": {
"Recommendation": {
"Text": "For EFS encryption information refer to the Data Encryption in EFS section of the Amazon Elastic File System User Guide",
"Url": "https://docs.aws.amazon.com/efs/latest/ug/encryption.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElasticFileSystem",
"Id": fileSysArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"fileSystemId": fileSysId}},
}
],
"Compliance": {
"Status": "FAILED",
"RelatedRequirements": [
"NIST CSF PR.DS-1",
"NIST SP 800-53 MP-8",
"NIST SP 800-53 SC-12",
"NIST SP 800-53 SC-28",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
else:
finding = {
"SchemaVersion": "2018-10-08",
"Id": fileSysArn + "/efs-encryption-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": fileSysArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[EFS.1] EFS File Systems should have encryption enabled",
"Description": "EFS file system " + fileSysId + " has encryption enabled.",
"Remediation": {
"Recommendation": {
"Text": "For EFS encryption information refer to the Data Encryption in EFS section of the Amazon Elastic File System User Guide",
"Url": "https://docs.aws.amazon.com/efs/latest/ug/encryption.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElasticFileSystem",
"Id": fileSysArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"fileSystemId": fileSysId}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-1",
"NIST SP 800-53 MP-8",
"NIST SP 800-53 SC-12",
"NIST SP 800-53 SC-28",
"AICPA TSC CC6.1",
"ISO 27001:2013 A.8.2.3",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
@registry.register_check("efs")
def efs_filesys_policy_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict:
"""[EFS.2] EFS File Systems should not use the default file system policy"""
response = describe_file_systems(cache)
myFileSys = response["FileSystems"]
for filesys in myFileSys:
fileSysId = str(filesys["FileSystemId"])
fileSysArn = f"arn:{awsPartition}:elasticfilesystem:{awsRegion}:{awsAccountId}:file-system/{fileSysId}"
# ISO Time
iso8601Time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
try:
response = efs.describe_file_system_policy(
FileSystemId=fileSysId
)
finding = {
"SchemaVersion": "2018-10-08",
"Id": fileSysArn + "/efs-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": fileSysArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "INFORMATIONAL"},
"Confidence": 99,
"Title": "[EFS.2] EFS File Systems should not use the default file system policy",
"Description": "EFS file system " + fileSysId + " is not using the default file system policy.",
"Remediation": {
"Recommendation": {
"Text": "For EFS policies information refer to the Identity and Access Management in EFS section of the Amazon Elastic File System User Guide",
"Url": "https://docs.aws.amazon.com/efs/latest/ug/iam-access-control-nfs-efs.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElasticFileSystem",
"Id": fileSysArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"fileSystemId": fileSysId}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-1",
"NIST CSF PR.AC-1",
"NIST CSF PR.AC-4",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-5",
"AICPA TSC CC6.1",
"AICPA TSC CC6.3",
"ISO 27001:2013 A.9.1.1",
"ISO 27001:2013 A.9.4.1",
],
},
"Workflow": {"Status": "RESOLVED"},
"RecordState": "ARCHIVED",
}
yield finding
except efs.exceptions.FileSystemNotFound:
finding = {
"SchemaVersion": "2018-10-08",
"Id": fileSysArn + "/efs-policy-check",
"ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default",
"GeneratorId": fileSysArn,
"AwsAccountId": awsAccountId,
"Types": [
"Software and Configuration Checks/AWS Security Best Practices",
"Effects/Data Exposure",
],
"FirstObservedAt": iso8601Time,
"CreatedAt": iso8601Time,
"UpdatedAt": iso8601Time,
"Severity": {"Label": "MEDIUM"},
"Confidence": 99,
"Title": "[EFS.2] EFS File Systems should not use the default file system policy",
"Description": "EFS file system " + fileSysId + " is using a default file system policy.",
"Remediation": {
"Recommendation": {
"Text": "For EFS policies information refer to the Identity and Access Management in EFS section of the Amazon Elastic File System User Guide",
"Url": "https://docs.aws.amazon.com/efs/latest/ug/iam-access-control-nfs-efs.html",
}
},
"ProductFields": {"Product Name": "ElectricEye"},
"Resources": [
{
"Type": "AwsElasticFileSystem",
"Id": fileSysArn,
"Partition": awsPartition,
"Region": awsRegion,
"Details": {"Other": {"fileSystemId": fileSysId}},
}
],
"Compliance": {
"Status": "PASSED",
"RelatedRequirements": [
"NIST CSF PR.DS-1",
"NIST CSF PR.AC-1",
"NIST CSF PR.AC-4",
"NIST SP 800-53 IA-1",
"NIST SP 800-53 IA-2",
"NIST SP 800-53 IA-5",
"AICPA TSC CC6.1",
"AICPA TSC CC6.3",
"ISO 27001:2013 A.9.1.1",
"ISO 27001:2013 A.9.4.1",
],
},
"Workflow": {"Status": "NEW"},
"RecordState": "ACTIVE",
}
yield finding
except:
pass
| 45.912088
| 175
| 0.494176
| 1,076
| 12,534
| 5.732342
| 0.227695
| 0.027562
| 0.01751
| 0.021401
| 0.785019
| 0.784533
| 0.784533
| 0.749514
| 0.749514
| 0.742704
| 0
| 0.036944
| 0.397479
| 12,534
| 273
| 176
| 45.912088
| 0.779793
| 0.079863
| 0
| 0.76569
| 0
| 0.020921
| 0.385953
| 0.05485
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012552
| false
| 0.016736
| 0.012552
| 0
| 0.033473
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
98690ee34ba914df1984549adc1c7b7be26f7395
| 286
|
py
|
Python
|
OpenGLCffi/GLES3/EXT/EXT/texture_view.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
OpenGLCffi/GLES3/EXT/EXT/texture_view.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
OpenGLCffi/GLES3/EXT/EXT/texture_view.py
|
cydenix/OpenGLCffi
|
c78f51ae5e6b655eb2ea98f072771cf69e2197f3
|
[
"MIT"
] | null | null | null |
from OpenGLCffi.GLES3 import params
@params(api='gles3', prms=['texture', 'target', 'origtexture', 'internalformat', 'minlevel', 'numlevels', 'minlayer', 'numlayers'])
def glTextureViewEXT(texture, target, origtexture, internalformat, minlevel, numlevels, minlayer, numlayers):
pass
| 40.857143
| 131
| 0.755245
| 28
| 286
| 7.714286
| 0.642857
| 0.12037
| 0.222222
| 0.351852
| 0.666667
| 0.666667
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0.007692
| 0.090909
| 286
| 6
| 132
| 47.666667
| 0.823077
| 0
| 0
| 0
| 0
| 0
| 0.271127
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0.25
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
f687273e641e20a98ebf9a151dcca8a4472a07b7
| 184
|
py
|
Python
|
SciPyFST/fstUtils/__init__.py
|
MorriganR/SciPyFST
|
16f263c9c08ab2a2e0be4d89818ccf8a882351f1
|
[
"MIT"
] | null | null | null |
SciPyFST/fstUtils/__init__.py
|
MorriganR/SciPyFST
|
16f263c9c08ab2a2e0be4d89818ccf8a882351f1
|
[
"MIT"
] | 2
|
2021-11-28T19:57:52.000Z
|
2022-02-06T20:45:04.000Z
|
SciPyFST/fstUtils/__init__.py
|
MorriganR/SciPyFST
|
16f263c9c08ab2a2e0be4d89818ccf8a882351f1
|
[
"MIT"
] | null | null | null |
from .toDot import *
from .toTable import *
from .toMdTable import *
from .toTexTable import *
from .fstFromDict import *
from .playToWave import *
from .copyWithRenameStates import *
| 23
| 35
| 0.771739
| 21
| 184
| 6.761905
| 0.428571
| 0.422535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 184
| 7
| 36
| 26.285714
| 0.910256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f69ffe69c3ce08d3adb8a24db68a6a3b749248d3
| 29
|
py
|
Python
|
python2/rosmln/src/rosmln/srv/__init__.py
|
seba90/pracmln
|
2af9e11d72f077834cf130343a2506344480fb07
|
[
"BSD-2-Clause"
] | 123
|
2016-02-13T08:49:46.000Z
|
2022-03-15T10:23:55.000Z
|
python2/rosmln/src/rosmln/srv/__init__.py
|
seba90/pracmln
|
2af9e11d72f077834cf130343a2506344480fb07
|
[
"BSD-2-Clause"
] | 29
|
2016-06-13T16:06:50.000Z
|
2022-01-07T23:31:22.000Z
|
python2/rosmln/src/rosmln/srv/__init__.py
|
seba90/pracmln
|
2af9e11d72f077834cf130343a2506344480fb07
|
[
"BSD-2-Clause"
] | 51
|
2016-03-22T05:42:45.000Z
|
2021-11-06T17:36:01.000Z
|
from ._MLNInterface import *
| 14.5
| 28
| 0.793103
| 3
| 29
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.88
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f6aa255fd57bd2f9d1269d67a9828bf1b9f9178d
| 38
|
py
|
Python
|
tests/conftest.py
|
kawmarco/jsonsubset
|
c4b6ca89ab7ac0cb471d5e83eeef8e3ab5ebd428
|
[
"MIT"
] | 3
|
2019-12-13T02:37:14.000Z
|
2021-07-15T00:50:08.000Z
|
tests/conftest.py
|
kawmarco/jsonsubset
|
c4b6ca89ab7ac0cb471d5e83eeef8e3ab5ebd428
|
[
"MIT"
] | 4
|
2017-08-11T23:04:06.000Z
|
2017-08-12T20:37:52.000Z
|
tests/conftest.py
|
kawmarco/jsonsubset
|
c4b6ca89ab7ac0cb471d5e83eeef8e3ab5ebd428
|
[
"MIT"
] | null | null | null |
import pyximport; pyximport.install()
| 19
| 37
| 0.815789
| 4
| 38
| 7.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 38
| 1
| 38
| 38
| 0.885714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f6af2448408626207a3eb413bca0d59a038c43b2
| 36,622
|
py
|
Python
|
trader_funcs.py
|
mcmissile007/trader
|
06070f0d786d08ffe4b8bbb5a08dcb2808cf16c4
|
[
"MIT"
] | null | null | null |
trader_funcs.py
|
mcmissile007/trader
|
06070f0d786d08ffe4b8bbb5a08dcb2808cf16c4
|
[
"MIT"
] | null | null | null |
trader_funcs.py
|
mcmissile007/trader
|
06070f0d786d08ffe4b8bbb5a08dcb2808cf16c4
|
[
"MIT"
] | null | null | null |
import json
import time
import poloniex_funcs as _poloniex
from datetime import datetime
import database_funcs as _db
import purchase_operations_funcs as _purchase
import matrix_funcs as _matrix
def get_last_purchase_trade_operations (semaphore,logger,currency_pair):
trade_history = _poloniex.get_trade_history (semaphore,logger,currency_pair)#suppose sorter
if trade_history == False:
return False
last_purchase_trade_operations = []
for trade in trade_history:
if trade['type'] == "sell":
return last_purchase_trade_operations
else:
last_purchase_trade_operations.append(trade)
return last_purchase_trade_operations
def get_last_purchase_operations (semaphore,logger,currency_pair):
last_purchase_operations = []
last_purchase_trade_operations = get_last_purchase_trade_operations (semaphore,logger,currency_pair)
if last_purchase_trade_operations == False or last_purchase_trade_operations == [] :
return last_purchase_operations
else:
for trade in last_purchase_trade_operations:
date_ts = trade['date']
rate = float(trade['rate'])
fee = float(trade['fee'])
total = float(trade['total'])
amount = float(trade['amount'])
epoch = datetime.strptime(date_ts,'%Y-%m-%d %H:%M:%S.%f').timestamp()
last_purchase_operations.append((rate,epoch,fee,total,amount))
return last_purchase_operations
def try_to_buy_in_base (semaphore,logger,remote_data_base_config,currency_pair,close,increment,time_frame,output_rsi,model,mean_purchase_prices,global_quote_percent):
ticket = _db.getLastTicketFromDB(logger,remote_data_base_config,currency_pair,time_frame)
logger.debug("try to buy last ticket:{}".format(ticket))
purchase_type = "default"
if ticket:
last = ticket['last']
lowestAsk = ticket['lowestAsk']
purchase_loss = (lowestAsk/last) -1.0
purchase_price = lowestAsk
initial_amount_to_buy_in_base = float(model['initial_amount_to_buy_in_base'])
min_amount_to_buy_in_base = float(model['min_amount_to_buy_in_base'])
max_amount_to_buy_in_base = float(model['max_amount_to_buy_in_base'])
logger.debug("purchase_price lowestAsk:{}".format(purchase_price))
logger.debug("purchase_loss:{}".format(purchase_loss))
logger.debug("initial amount to buy in base:{}".format(initial_amount_to_buy_in_base))
logger.debug("min_amount_to_buy_in_base:{}".format(min_amount_to_buy_in_base))
logger.debug("max_amount_to_buy_in_base:{}".format(max_amount_to_buy_in_base))
purchase_operations = get_last_purchase_operations (semaphore,logger,currency_pair)
logger.debug("purchase_operations:{}".format(purchase_operations))
if len(purchase_operations) == 0:
amount_to_buy_in_base = initial_amount_to_buy_in_base
amount_to_buy_in_quote = amount_to_buy_in_base / purchase_price
logger.debug("intial amount_to_buy_in_base:{}".format(amount_to_buy_in_base))
logger.debug("amount_to_buy_in_quote:{}".format(amount_to_buy_in_quote))
current_mean_price = purchase_price
else:
current_mean_price = _purchase.getCurrentRealMeanPrice(purchase_operations)
logger.debug("current_mean_price:{}".format(current_mean_price))
delta_price_over_mean = (purchase_price/current_mean_price) - 1.0
logger.debug("delta_price_over_mean:{}".format(delta_price_over_mean))
if delta_price_over_mean > -0.002:
logger.debug("purchase_price:{}".format(purchase_price))
logger.debug("current_mean_price:{}".format(current_mean_price))
logger.debug("aborted buy not improve mean")
logger.debug("delta_price_over_mean:{}".format(delta_price_over_mean))
return
if model['r_mode'] == 1:
#sometimes in purchase_operations the same purchase has multiple entries because it is divided into pieces
#instead mean_purchase_prices has unique values per purchase
if len(mean_purchase_prices) == 0:
r_mod = model['r'] * len(purchase_operations)
else:
r_mod = model['r'] * len(mean_purchase_prices)
elif model['r_mode'] == 2:
r_mod = global_quote_percent/20.0
else:
r_mod = model['r']
r_mod = max(r_mod,model['r'])
logger.debug("r_mod:{}".format(r_mod))
amount_to_buy_in_quote = _purchase.get_amount_to_buy_in_quote(logger,purchase_operations,purchase_price,initial_amount_to_buy_in_base,min_amount_to_buy_in_base,max_amount_to_buy_in_base,r_mod)
amount_to_buy_in_base = amount_to_buy_in_quote * purchase_price
logger.debug("we are gointo buy in quote:{}".format(amount_to_buy_in_quote))
logger.debug("we are gointo buy in base:{}".format(amount_to_buy_in_base))
current_amount_invested = _purchase.getTotalCurrentBaseInversion(purchase_operations)
logger.debug("current_amount_invested:{}".format(current_amount_invested))
available_balances = _poloniex.get_available_balances(semaphore,logger)
if available_balances == False:
logger.debug("Error getting available_balances:{}".format(available_balances))
return False
currencies = currency_pair.split("_")
base_currency = currencies[0]
quote_currency = currencies[1]
base_balance = float(available_balances[base_currency])
quote_balance = float(available_balances[quote_currency])
logger.debug("base_balance:{}".format(base_balance))
logger.debug("quote_balance:{}".format(quote_balance))
if amount_to_buy_in_base < 5.0:
logger.debug("aborted buy by amount_to_buy_in_base")
return
if base_balance <= 10*min_amount_to_buy_in_base:
logger.debug("aborted buy by low balance")
logger.debug("base_balance:{}".format(base_balance))
return
if base_balance <= amount_to_buy_in_base :
amount_to_buy_in_base = base_balance - (base_balance*0.01) #its necesary pay fees
amount_to_buy_in_quote = amount_to_buy_in_base/purchase_price
logger.debug("low balance revisited we are gointo buy in quote:{}".format(amount_to_buy_in_quote))
logger.debug("low balance revisited we are gointo buy in basde:{}".format(amount_to_buy_in_base))
if purchase_loss < 0.015:
purchase_type = "now"
response = _poloniex.buy_now(semaphore,logger,remote_data_base_config,currency_pair,time_frame,purchase_price,amount_to_buy_in_quote)
else:
purchase_price = last
purchase_type = "open_order"
logger.debug("new purchase_price:{} try to open order".format(purchase_price))
response = _poloniex.buy(semaphore,logger,currency_pair,purchase_price,amount_to_buy_in_quote)
logger.debug("response to buy:{}".format(response))
if response != False:
if 'orderNumber' in response and int(response['orderNumber']) > 0 :
time.sleep(10)
purchase_operations = get_last_purchase_operations (semaphore,logger,currency_pair)
logger.debug("updated purchase_operations:{}".format(purchase_operations))
current_mean_price = _purchase.getCurrentRealMeanPrice(purchase_operations)
logger.debug("new current_mean_price:{}".format(current_mean_price))
delta_price_over_mean = (purchase_price/current_mean_price) - 1.0
logger.debug("new delta_price_over_mean:{}".format(delta_price_over_mean))
mean_purchase_prices.append(current_mean_price)
_db.logInsertOrder(logger,remote_data_base_config,currency_pair,'buy',purchase_price,amount_to_buy_in_quote,amount_to_buy_in_base,increment,"neighbors","1",ticket['last'],ticket['highestBid'],ticket['lowestAsk'],current_mean_price,output_rsi,int(response['orderNumber']),json.dumps(response))
_matrix.send("New purchase {0} {1}s = {2} USDCs . purchase_price {3} . new current_mean_price {4}. delta_price_over_mean {5}. purchase_type {6}. base_balance {7}. quote_balance {8}".format(amount_to_buy_in_quote,quote_currency,amount_to_buy_in_base,purchase_price,current_mean_price,delta_price_over_mean,purchase_type,base_balance,quote_balance))
def try_to_sell_NOW (semaphore,logger,remote_data_base_config,currency_pair,increment,time_frame,output_rsi,amount_to_sell,mean_purchase_prices):
ticket = _db.getLastTicketFromDB(logger,remote_data_base_config,currency_pair,time_frame)
logger.debug("try to sell last ticket NOW:{}".format(ticket))
if ticket:
last = ticket['last']
highestBid = ticket['highestBid']
sell_loss = (last/highestBid) -1.0
sell_price = highestBid
if mean_purchase_prices == []:
current_mean_purchase_price = 0
else:
current_mean_purchase_price = mean_purchase_prices[-1]
logger.debug("sell_price:{}".format(sell_price))
logger.debug("sell_loss:{}".format(sell_loss))
logger.debug("current_mean_purchase_price:{}".format(current_mean_purchase_price))
amount_in_base = amount_to_sell * sell_price
logger.debug("amount to sell in quote currency:{}".format(amount_to_sell))
logger.debug("last rate:{}".format(ticket['last']))
logger.debug("amount to sell in base:{}".format(amount_in_base))
sell_price_limit = sell_price - (sell_price*0.005)
logger.debug("sell_price:{}".format(sell_price))
logger.debug("sell_price_limit:{}".format(sell_price_limit))
response = _poloniex.sell_now_secure(semaphore,logger,remote_data_base_config,currency_pair,time_frame,sell_price,amount_to_sell,sell_price_limit)
if response != False:
if 'orderNumber' in response and int(response['orderNumber']) > 0 :
_db.logInsertOrder(logger,remote_data_base_config,currency_pair,'sell',sell_price,amount_to_sell,amount_in_base,increment,"neighbors","1",ticket['last'],ticket['highestBid'],ticket['lowestAsk'],current_mean_purchase_price,output_rsi,int(response['orderNumber']),json.dumps(response))
def try_to_sell_UNSOLD (semaphore,logger,remote_data_base_config,currency_pair,close,increment,time_frame,candle_rsi,output_rsi,amount_to_sell,always_win,min_current_rate_benefit,max_amount_to_buy_in_base,base_balance,sos_rate,mean_purchase_prices):
ticket = _db.getLastTicketFromDB(logger,remote_data_base_config,currency_pair,time_frame)
logger.debug("try to sell last ticket:{}".format(ticket))
if ticket:
last = ticket['last']
highestBid = ticket['highestBid']
sell_loss = (last/highestBid) -1.0
sell_price = highestBid
if mean_purchase_prices == []:
current_mean_purchase_price = 0
logger.error("zero current_mean_purchase_price:{}".format(current_mean_purchase_price))
return
else:
current_mean_purchase_price = mean_purchase_prices[-1]
logger.debug("sell_price:{}".format(sell_price))
logger.debug("sell_loss:{}".format(sell_loss))
logger.debug("current_mean_purchase_price:{}".format(current_mean_purchase_price))
if current_mean_purchase_price == 0:
logger.error("zero current_mean_purchase_price:{}".format(current_mean_purchase_price))
return
amount_in_base = amount_to_sell * sell_price
logger.debug("amount to sell in quote currency:{}".format(amount_to_sell))
logger.debug("last rate:{}".format(ticket['last']))
logger.debug("amount to sell in base:{}".format(amount_in_base))
current_rate_benefit = (sell_price/current_mean_purchase_price) - 1.0
logger.debug("current_rate_benefit:{}".format(current_rate_benefit))
abs_rate_loss = abs(current_rate_benefit/(sell_loss+0.0001))
logger.debug("abs rate_loss:{}".format(abs_rate_loss))
if always_win:
if current_rate_benefit > float(sos_rate):
sell_price_limit = sell_price - (sell_price*0.005)
logger.debug("sell_price:{}".format(sell_price))
logger.debug("sell_price_limit:{}".format(sell_price_limit))
response = _poloniex.sell_now_secure(semaphore,logger,remote_data_base_config,currency_pair,time_frame,sell_price,amount_to_sell,sell_price_limit)
if response != False:
if 'orderNumber' in response and int(response['orderNumber']) > 0 :
_db.logInsertOrder(logger,remote_data_base_config,currency_pair,'sell',sell_price,amount_to_sell,amount_in_base,increment,"neighbors","1",ticket['last'],ticket['highestBid'],ticket['lowestAsk'],current_mean_purchase_price,output_rsi,int(response['orderNumber']),json.dumps(response))
_matrix.send("New sell UNSOLD currency_pair {0} . amount_in_quote {1} . amount_in_base {2} . sell_price {3} . mean_purchase_price {4} . current_rate_benefit {5} . ".format(currency_pair,amount_to_sell,amount_in_base,sell_price,current_mean_purchase_price,current_rate_benefit))
else:
logger.debug("current rate benefit not enough to UNSOLD sell:{}".format(current_rate_benefit))
else:
if candle_rsi > (output_rsi / 1000.0):
sell_price_limit = sell_price - (sell_price*0.005)
logger.debug("sell_price:{}".format(sell_price))
logger.debug("sell_price_limit:{}".format(sell_price_limit))
response = _poloniex.sell_now_secure(semaphore,logger,remote_data_base_config,currency_pair,time_frame,sell_price,amount_to_sell,sell_price_limit)
if response != False:
if 'orderNumber' in response and int(response['orderNumber']) > 0 :
_db.logInsertOrder(logger,remote_data_base_config,currency_pair,'sell',sell_price,amount_to_sell,amount_in_base,increment,"neighbors","1",ticket['last'],ticket['highestBid'],ticket['lowestAsk'],current_mean_purchase_price,output_rsi,int(response['orderNumber']),json.dumps(response))
_matrix.send("New sell UNSOLD currency_pair {0} . amount_in_quote {1} . amount_in_base {2} . sell_price {3} . mean_purchase_price {4} . current_rate_benefit {5} . ".format(currency_pair,amount_to_sell,amount_in_base,sell_price,current_mean_purchase_price,current_rate_benefit))
else:
logger.debug("always win is false but candle_rsi is low to UNSOLD sell :{}".format(candle_rsi))
def try_to_sell_SOS (semaphore,logger,remote_data_base_config,currency_pair,close,increment,time_frame,candle_rsi,output_rsi,amount_to_sell,always_win,min_current_rate_benefit,max_amount_to_buy_in_base,base_balance,sos_rate,mean_purchase_prices):
ticket = _db.getLastTicketFromDB(logger,remote_data_base_config,currency_pair,time_frame)
logger.debug("try to sell last ticket:{}".format(ticket))
if ticket:
last = ticket['last']
highestBid = ticket['highestBid']
sell_loss = (last/highestBid) -1.0
sell_price = highestBid
if mean_purchase_prices == []:
current_mean_purchase_price = 0
else:
current_mean_purchase_price = mean_purchase_prices[-1]
logger.debug("sell_price:{}".format(sell_price))
logger.debug("sell_loss:{}".format(sell_loss))
logger.debug("current_mean_purchase_price:{}".format(current_mean_purchase_price))
amount_in_base = amount_to_sell * sell_price
logger.debug("amount to sell in quote currency:{}".format(amount_to_sell))
logger.debug("last rate:{}".format(ticket['last']))
logger.debug("amount to sell in base:{}".format(amount_in_base))
purchase_operations = get_last_purchase_operations (semaphore,logger,currency_pair)
logger.debug("purchase_operations:{}".format(purchase_operations))
total_amount_invested_in_base = _purchase.getTotalCurrentBaseInversion(purchase_operations)
logger.debug("total_amount_invested_in_base:{}".format(total_amount_invested_in_base))
rate_amount = total_amount_invested_in_base / max_amount_to_buy_in_base
logger.debug("max_amount_to_buy_in_base:{}".format(max_amount_to_buy_in_base))
logger.debug("rate_amount:{}".format(rate_amount))
current_percent_benefit = _purchase.getCurrentPercentBenefit(purchase_operations,sell_price)
logger.debug("current_percent_benefit:{}".format(current_percent_benefit))
current_rate_benefit = current_percent_benefit/100.0
logger.debug("current_rate_benefit:{}".format(current_rate_benefit))
total_current_base_benefit = total_amount_invested_in_base * current_rate_benefit
logger.debug("total_current_base_benefit:{}".format(total_current_base_benefit))
abs_rate_loss = abs(current_rate_benefit/(sell_loss+0.0001))
logger.debug("abs rate_loss:{}".format(abs_rate_loss))
if always_win:
if current_rate_benefit > float(sos_rate):
'''
sell_price_limit = sell_price - (sell_price*0.005)
logger.debug("sell_price:{}".format(sell_price))
logger.debug("sell_price_limit:{}".format(sell_price_limit))
response = _poloniex.sell_now_secure(semaphore,logger,remote_data_base_config,currency_pair,time_frame,sell_price,amount_to_sell,sell_price_limit)
if response != False:
if 'orderNumber' in response and int(response['orderNumber']) > 0 :
_db.logInsertOrder(logger,remote_data_base_config,currency_pair,'sell',sell_price,amount_to_sell,amount_in_base,increment,"neighbors","1",ticket['last'],ticket['highestBid'],ticket['lowestAsk'],current_mean_purchase_price,output_rsi,int(response['orderNumber']),json.dumps(response))
_matrix.send("New sell SOS currency_pair {0} . amount_in_quote {1} . amount_in_base {2} . sell_price {3} . mean_purchase_price {4} . current_percent_benefit {5}% . total_current_base_benefit {6} USDCs ".format(currency_pair,amount_to_sell,amount_in_base,sell_price,current_mean_purchase_price,current_percent_benefit,total_current_base_benefit))
'''
fraction = 4
logger.debug("total_amount_to_sell:{}".format(amount_to_sell))
amount_to_sell = amount_to_sell / float(fraction)
amount_in_base = amount_in_base / float(fraction)
logger.debug("new_amount_to_sell:{}".format(amount_to_sell))
logger.debug("new_amount_in_base:{}".format(amount_in_base))
sell_price = sell_price - (sell_price*0.001)
for i in range(fraction):
step = i/1000
sell_price = sell_price + (sell_price*step)
logger.debug("sell_price:{}".format(sell_price))
if i == fraction - 1:
amount_to_sell = amount_to_sell - (amount_to_sell*0.005) # to ensure I don't spend what I don't have
logger.debug("amount_to_sell:{}".format(amount_to_sell))
response = _poloniex.sell(semaphore,logger,currency_pair,sell_price,amount_to_sell)
if response != False:
if 'orderNumber' in response and int(response['orderNumber']) > 0 :
_db.logInsertOrder(logger,remote_data_base_config,currency_pair,'sell',sell_price,amount_to_sell,amount_in_base,increment,"neighbors","1",ticket['last'],ticket['highestBid'],ticket['lowestAsk'],current_mean_purchase_price,output_rsi,int(response['orderNumber']),json.dumps(response))
_matrix.send("New sell open order postonly SOS currency_pair {0} . amount_in_quote {1} . amount_in_base {2} . sell_price {3} . mean_purchase_price {4} . current_percent_benefit {5}% . total_current_base_benefit {6} USDCs ".format(currency_pair,amount_to_sell,amount_in_base,sell_price,current_mean_purchase_price,current_percent_benefit,total_current_base_benefit))
_matrix.send("I'm going to sleep Pr.Falken.")
time.sleep(86400)
else:
logger.debug("current rate benefit not enough to SOS sell:{}".format(current_rate_benefit))
else:
if candle_rsi > (output_rsi / 1000.0):
sell_price_limit = sell_price - (sell_price*0.005)
logger.debug("sell_price:{}".format(sell_price))
logger.debug("sell_price_limit:{}".format(sell_price_limit))
response = _poloniex.sell_now_secure(semaphore,logger,remote_data_base_config,currency_pair,time_frame,sell_price,amount_to_sell,sell_price_limit)
if response != False:
if 'orderNumber' in response and int(response['orderNumber']) > 0 :
_db.logInsertOrder(logger,remote_data_base_config,currency_pair,'sell',sell_price,amount_to_sell,amount_in_base,increment,"neighbors","1",ticket['last'],ticket['highestBid'],ticket['lowestAsk'],current_mean_purchase_price,output_rsi,int(response['orderNumber']),json.dumps(response))
_matrix.send("New sell SOS currency_pair {0} . amount_in_quote {1} . amount_in_base {2} . sell_price {3} . mean_purchase_price {4} . current_percent_benefit {5}% . total_current_base_benefit {6} USDCs".format(currency_pair,amount_to_sell,amount_in_base,sell_price,current_mean_purchase_price,current_percent_benefit,total_current_base_benefit))
else:
logger.debug("always win is false but candle_rsi is low to SOS sell :{}".format(candle_rsi))
def try_to_sell (semaphore,logger,remote_data_base_config,currency_pair,close,increment,time_frame,output_rsi,amount_to_sell,always_win,min_current_rate_benefit,max_amount_to_buy_in_base,base_balance,mean_purchase_prices):
ticket = _db.getLastTicketFromDB(logger,remote_data_base_config,currency_pair,time_frame)
logger.debug("try to sell last ticket:{}".format(ticket))
if ticket:
last = ticket['last']
highestBid = ticket['highestBid']
sell_loss = (last/highestBid) -1.0
sell_price = highestBid
if mean_purchase_prices == []:
current_mean_purchase_price = 0
else:
current_mean_purchase_price = mean_purchase_prices[-1]
logger.debug("sell_price:{}".format(sell_price))
logger.debug("sell_loss:{}".format(sell_loss))
logger.debug("current_mean_purchase_price:{}".format(current_mean_purchase_price))
amount_in_base = amount_to_sell * sell_price
logger.debug("amount to sell in quote currency:{}".format(amount_to_sell))
logger.debug("last rate:{}".format(ticket['last']))
logger.debug("amount to sell in base:{}".format(amount_in_base))
purchase_operations = get_last_purchase_operations (semaphore,logger,currency_pair)
logger.debug("purchase_operations:{}".format(purchase_operations))
total_amount_invested_in_base = _purchase.getTotalCurrentBaseInversion(purchase_operations)
logger.debug("total_amount_invested_in_base:{}".format(total_amount_invested_in_base))
rate_amount = total_amount_invested_in_base / max_amount_to_buy_in_base
logger.debug("max_amount_to_buy_in_base:{}".format(max_amount_to_buy_in_base))
logger.debug("rate_amount:{}".format(rate_amount))
current_percent_benefit = _purchase.getCurrentPercentBenefit(purchase_operations,sell_price)
logger.debug("current_percent_benefit:{}".format(current_percent_benefit))
current_rate_benefit = current_percent_benefit/100.0
logger.debug("current_rate_benefit:{}".format(current_rate_benefit))
total_current_base_benefit = total_amount_invested_in_base * current_rate_benefit
logger.debug("total_current_base_benefit:{}".format(total_current_base_benefit))
abs_rate_loss = abs(current_rate_benefit/(sell_loss+0.0001))
logger.debug("abs rate_loss:{}".format(abs_rate_loss))
'''
if always_win:
if (current_rate_benefit < min_current_rate_benefit):
logger.debug("aborted sell i want to win always:{} and current benefit:{}".format(min_current_rate_benefit,current_rate_benefit))
return
'''
if True: #in not sos mode always win is always true
if (current_rate_benefit < min_current_rate_benefit):
logger.debug("aborted sell i want to win always:{} and current benefit:{}".format(min_current_rate_benefit,current_rate_benefit))
return
#to avoid splitting the sell always sell_now_secure with highestBid
#splitting the sell can cause some errors getting current mean price
'''
if sell_loss < 0.006 or abs_rate_loss>4.0 or rate_amount > 0.5 :
rate_secure = current_rate_benefit * 0.1
sell_price_limit = sell_price - (sell_price*rate_secure)
logger.debug("sell_price:{}".format(sell_price))
logger.debug("sell_price_limit:{}".format(sell_price_limit))
response = sell_now_secure(semaphore,logger,remote_data_base_config,currency_pair,time_frame,sell_price,amount_to_sell,sell_price_limit)
else:
sell_price = last
logger.debug("new sell_price:{} try to open order".format(sell_price))
response = sell(semaphore,logger,currency_pair,sell_price,amount_to_sell)
'''
rate_secure = current_rate_benefit * 0.1
sell_price_limit = sell_price - (sell_price*rate_secure)
logger.debug("sell_price:{}".format(sell_price))
logger.debug("sell_price_limit:{}".format(sell_price_limit))
response = _poloniex.sell_now_secure(semaphore,logger,remote_data_base_config,currency_pair,time_frame,sell_price,amount_to_sell,sell_price_limit)
if response != False:
if 'orderNumber' in response and int(response['orderNumber']) > 0 :
_db.logInsertOrder(logger,remote_data_base_config,currency_pair,'sell',sell_price,amount_to_sell,amount_in_base,increment,"neighbors","1",ticket['last'],ticket['highestBid'],ticket['lowestAsk'],current_mean_purchase_price,output_rsi,int(response['orderNumber']),json.dumps(response))
_matrix.send("New sell currency_pair {0} . amount_in_quote {1} . amount_in_base {2} . sell_price {3} . mean_purchase_price {4} . current_percent_benefit {5}% . total_current_base_benefit {6} USDCs".format(currency_pair,amount_to_sell,amount_in_base,sell_price,current_mean_purchase_price,current_percent_benefit,total_current_base_benefit))
def simple_manage_open_orders(semaphore,logger,currency_pair,remote_data_base_config,model,time_frame,base_currency,quote_currency,output_rsi,close,increment,mean_purchase_prices,global_quote_percent):
open_orders = _poloniex.get_open_orders(semaphore,logger,currency_pair)
logger.debug("open_orders response:{}".format(open_orders))
if open_orders == False:
logger.error("Error getting open orders")
return False
if open_orders == []:
logger.debug("There is no open open orders OK!.")
return True
else:
logger.debug("There is open orders.")
for open_order in open_orders:
logger.debug("open order:{}".format(open_order))
if not 'orderNumber' in open_order:
logger.error("Error in open_order orderNumber")
return False
if int(open_order['orderNumber']) < 1:
logger.error("Error in open_order orderNumber less than 1")
return False
if 'date' in open_order:
order_datetime = datetime.strptime(open_order['date'] ,'%Y-%m-%d %H:%M:%S.%f')
seconds_diff = (datetime.now() - order_datetime).total_seconds()
logger.debug("seconds_diff:{}".format(seconds_diff))
else:
logger.error("Error in open_order date")
return False
if 'type' in open_order:
if open_order['type'] == 'buy':
if seconds_diff > 15 * 60:
retval = _poloniex.cancel_order(semaphore,logger,open_order['orderNumber'])
logger.debug("cancel order response:{}".format(retval))
if retval != False and "success" in retval and int(retval['success']) == 1:
_db.logInsertCancelOrder(logger,remote_data_base_config,currency_pair,"buy","neighbors","1",int(open_order['orderNumber']),json.dumps(retval))
return True
logger.error("buy order still open")
return False
if open_order['type'] == 'sell':
logger.error("sell order still open")
return False
return False
def manage_open_orders(semaphore,logger,currency_pair,remote_data_base_config,model,time_frame,base_currency,quote_currency,output_rsi,close,increment,mean_purchase_prices,global_quote_percent):
open_orders = _poloniex.get_open_orders(semaphore,logger,currency_pair)
logger.debug("open_orders response:{}".format(open_orders))
if open_orders == False:
logger.error("Error getting open orders")
return False
if open_orders == []:
logger.debug("There is no open open orders OK!.")
return True
else:
logger.debug("There is open orders i'll try to cancel them.")
for open_order in open_orders:
logger.debug("open order:{}".format(open_order))
if not 'orderNumber' in open_order:
logger.error("Error in open_order orderNumber")
return False
if int(open_order['orderNumber']) < 1:
logger.error("Error in open_order orderNumber less than 1")
return False
if 'date' in open_order:
order_datetime = datetime.strptime(open_order['date'] ,'%Y-%m-%d %H:%M:%S.%f')
seconds_diff = (datetime.now() - order_datetime).total_seconds()
logger.debug("seconds_diff:{}".format(seconds_diff))
else:
logger.error("Error in open_order date")
return False
if 'type' in open_order:
if open_order['type'] == 'buy':
if seconds_diff > 15 * 60:
retval = _poloniex.cancel_order(semaphore,logger,open_order['orderNumber'])
logger.debug("cancel order response:{}".format(retval))
if retval != False and "success" in retval and int(retval['success']) == 1:
_db.logInsertCancelOrder(logger,remote_data_base_config,currency_pair,"buy","neighbors","1",int(open_order['orderNumber']),json.dumps(retval))
else:
#if ticket has changed cancel current order and add a new one
open_order_purchase_price = float(open_order['rate'])
logger.debug("open_order_purchase_price:{}".format(open_order_purchase_price))
ticket = _db.getLastTicketFromDB(logger,remote_data_base_config,currency_pair,time_frame)
if ticket == False:
logger.debug("Error getting last ticket to calculate new open_order_purchase_price:{}".format(ticket))
return False
last = float(ticket['last'])
logger.debug("last:{}".format(last))
if last != open_order_purchase_price:
logger.debug("the last value has changed now:{} before:{} ".format(last,open_order_purchase_price))
retval = _poloniex.cancel_order(semaphore,logger,open_order['orderNumber'])
logger.debug("response to cancel_order:{}".format(retval))
delta_change = (last/open_order_purchase_price) - 1.0
if retval != False and "success" in retval and int(retval['success']) == 1:
logger.debug("cancel order response ok:{}".format(retval))
_db.logInsertCancelOrder(logger,remote_data_base_config,currency_pair,"buy","neighbors","1",int(open_order['orderNumber']),json.dumps(retval))
time.sleep(2)
purchase_price = last
if delta_change < 0.005:
logger.debug("new purchase_price:{} try to open order".format(purchase_price))
try_to_buy_in_base (semaphore,logger,remote_data_base_config,currency_pair,close,increment,time_frame,output_rsi,model,mean_purchase_prices,global_quote_percent)
else:
logger.debug("delta change:{} too high. The price is not as good as when shouldIInvest was yes.".format(delta_change))
if open_order['type'] == 'sell':
if seconds_diff > 40 * 60:
retval = _poloniex.cancel_order(semaphore,logger,open_order['orderNumber'])
logger.debug("cancel order response:{}".format(retval))
if retval != False and "success" in retval and int(retval['success']) == 1:
_db.logInsertCancelOrder(logger,remote_data_base_config,currency_pair,"sell","neighbors","1",int(open_order['orderNumber']),json.dumps(retval))
else:
#if ticket has changed cancel current order and add a new one
open_order_sell_price = float(open_order['rate'])
logger.debug("open_order_sell_price:{}".format(open_order_sell_price))
ticket = _db.getLastTicketFromDB(logger,remote_data_base_config,currency_pair,time_frame)
if ticket == False:
logger.debug("Error getting last ticket to calculate new open_order_sell_price:{}".format(ticket))
return False
last = float(ticket['last'])
logger.debug("last:{}".format(last))
if last != open_order_sell_price:
logger.debug("the last value has changed now:{} before:{} ".format(last,open_order_sell_price))
retval = _poloniex.cancel_order(semaphore,logger,open_order['orderNumber'])
logger.debug("response to cancel_order:{}".format(retval))
if retval != False and "success" in retval and int(retval['success']) == 1:
_db.logInsertCancelOrder(logger,remote_data_base_config,currency_pair,"sell","neighbors","1",int(open_order['orderNumber']),json.dumps(retval))
time.sleep(2)
sell_price = last
logger.debug("new sell_price:{} try to open order".format(sell_price))
available_balances = _poloniex.get_available_balances(semaphore,logger)
if available_balances == False:
logger.debug("Error getting available_balances:{}".format(available_balances))
return False
base_balance = float(available_balances[base_currency])
quote_balance = float(available_balances[quote_currency])
try_to_sell (semaphore,logger,remote_data_base_config,currency_pair,close,increment,time_frame,output_rsi,quote_balance,model['always_win'] ,model['min_current_rate_benefit'] ,model['max_amount_to_buy_in_base'],base_balance,mean_purchase_prices )
return False
| 69.623574
| 396
| 0.66755
| 4,527
| 36,622
| 5.032251
| 0.052132
| 0.063737
| 0.017207
| 0.030815
| 0.870243
| 0.845266
| 0.829814
| 0.810412
| 0.797814
| 0.761687
| 0
| 0.0083
| 0.23019
| 36,622
| 526
| 397
| 69.623574
| 0.799766
| 0.0148
| 0
| 0.676923
| 0
| 0.013187
| 0.174608
| 0.040203
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01978
| false
| 0
| 0.015385
| 0
| 0.101099
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
63ee071732091fda678d6e19bc619576aaf735cf
| 207
|
py
|
Python
|
packages/regression_model/path_here.py
|
sifubro/deploying-ml-api-udemy
|
c745a5d5839205aa66806524fdf92b070a27c599
|
[
"BSD-3-Clause"
] | null | null | null |
packages/regression_model/path_here.py
|
sifubro/deploying-ml-api-udemy
|
c745a5d5839205aa66806524fdf92b070a27c599
|
[
"BSD-3-Clause"
] | null | null | null |
packages/regression_model/path_here.py
|
sifubro/deploying-ml-api-udemy
|
c745a5d5839205aa66806524fdf92b070a27c599
|
[
"BSD-3-Clause"
] | null | null | null |
import pathlib
print(__name__)
print(pathlib.Path(__file__))
print(pathlib.Path(__file__).resolve())
print(pathlib.Path(__file__).resolve().parent)
print(pathlib.Path(__file__).resolve().parent.parent)
| 17.25
| 53
| 0.777778
| 26
| 207
| 5.423077
| 0.307692
| 0.340426
| 0.453901
| 0.567376
| 0.659574
| 0.468085
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057971
| 207
| 12
| 53
| 17.25
| 0.723077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.166667
| 0
| 0.166667
| 0.833333
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
1205a4bc72010bf8c2e3ed82666760a2afd0b426
| 29
|
py
|
Python
|
da_functions/__init__.py
|
armandotoledo/ibv_DA_functions
|
58b37eeb84e215e36d993f38dff568f9529276af
|
[
"MIT"
] | null | null | null |
da_functions/__init__.py
|
armandotoledo/ibv_DA_functions
|
58b37eeb84e215e36d993f38dff568f9529276af
|
[
"MIT"
] | null | null | null |
da_functions/__init__.py
|
armandotoledo/ibv_DA_functions
|
58b37eeb84e215e36d993f38dff568f9529276af
|
[
"MIT"
] | null | null | null |
from .functions import basic
| 14.5
| 28
| 0.827586
| 4
| 29
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137931
| 29
| 1
| 29
| 29
| 0.96
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
12214b5578d8ab05772f63f8c2fbcb849e375ea0
| 345
|
py
|
Python
|
fin_benefits/__init__.py
|
ajtanskanen/benefits
|
c67127c917b41ac707d89dcc2263587c4801163b
|
[
"MIT"
] | null | null | null |
fin_benefits/__init__.py
|
ajtanskanen/benefits
|
c67127c917b41ac707d89dcc2263587c4801163b
|
[
"MIT"
] | null | null | null |
fin_benefits/__init__.py
|
ajtanskanen/benefits
|
c67127c917b41ac707d89dcc2263587c4801163b
|
[
"MIT"
] | null | null | null |
from .benefits import Benefits
from .parameters import perheparametrit, tee_selite, print_examples
from .basic_income import BasicIncomeBenefits
from .benefits_unemp_EK import BenefitsEK
from .benefits_unemp_porrastus import BenefitsPorrastus
from .benefits_unemp_EK2020 import BenefitsEK2020
from .benefits_unemp_korotus import BenefitsKorotus
| 43.125
| 67
| 0.886957
| 41
| 345
| 7.195122
| 0.512195
| 0.20339
| 0.230508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025397
| 0.086957
| 345
| 7
| 68
| 49.285714
| 0.911111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.142857
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.